target/i386: unify masking of interrupts

Interrupt handling depends on various flags in env->hflags or env->hflags2,
and the exact detail were not exactly replicated between x86_cpu_has_work
and x86_cpu_exec_interrupt.  Create a new function that extracts the
highest-priority non-masked interrupt, and use it in both functions.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2018-08-21 15:31:24 +02:00
parent 27e18b8952
commit 92d5f1a414
3 changed files with 91 additions and 67 deletions

View file

@ -5429,20 +5429,51 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
cpu->env.eip = tb->pc - tb->cs_base; cpu->env.eip = tb->pc - tb->cs_base;
} }
static bool x86_cpu_has_work(CPUState *cs) int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
{ {
X86CPU *cpu = X86_CPU(cs); X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env; CPUX86State *env = &cpu->env;
return ((cs->interrupt_request & (CPU_INTERRUPT_HARD | #if !defined(CONFIG_USER_ONLY)
CPU_INTERRUPT_POLL)) && if (interrupt_request & CPU_INTERRUPT_POLL) {
(env->eflags & IF_MASK)) || return CPU_INTERRUPT_POLL;
(cs->interrupt_request & (CPU_INTERRUPT_NMI | }
CPU_INTERRUPT_INIT | #endif
CPU_INTERRUPT_SIPI | if (interrupt_request & CPU_INTERRUPT_SIPI) {
CPU_INTERRUPT_MCE)) || return CPU_INTERRUPT_SIPI;
((cs->interrupt_request & CPU_INTERRUPT_SMI) && }
!(env->hflags & HF_SMM_MASK));
if (env->hflags2 & HF2_GIF_MASK) {
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
!(env->hflags & HF_SMM_MASK)) {
return CPU_INTERRUPT_SMI;
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
!(env->hflags2 & HF2_NMI_MASK)) {
return CPU_INTERRUPT_NMI;
} else if (interrupt_request & CPU_INTERRUPT_MCE) {
return CPU_INTERRUPT_MCE;
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
(((env->hflags2 & HF2_VINTR_MASK) &&
(env->hflags2 & HF2_HIF_MASK)) ||
(!(env->hflags2 & HF2_VINTR_MASK) &&
(env->eflags & IF_MASK &&
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
return CPU_INTERRUPT_HARD;
#if !defined(CONFIG_USER_ONLY)
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
(env->eflags & IF_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
return CPU_INTERRUPT_VIRQ;
#endif
}
}
return 0;
}
static bool x86_cpu_has_work(CPUState *cs)
{
return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
} }
static void x86_disas_set_info(CPUState *cs, disassemble_info *info) static void x86_disas_set_info(CPUState *cs, disassemble_info *info)

View file

@ -1485,6 +1485,7 @@ extern struct VMStateDescription vmstate_x86_cpu;
*/ */
void x86_cpu_do_interrupt(CPUState *cpu); void x86_cpu_do_interrupt(CPUState *cpu);
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
int cpuid, void *opaque); int cpuid, void *opaque);

View file

@ -1319,74 +1319,66 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{ {
X86CPU *cpu = X86_CPU(cs); X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env; CPUX86State *env = &cpu->env;
bool ret = false; int intno;
interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
if (!interrupt_request) {
return false;
}
/* Don't process multiple interrupt requests in a single call.
* This is required to make icount-driven execution deterministic.
*/
switch (interrupt_request) {
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
if (interrupt_request & CPU_INTERRUPT_POLL) { case CPU_INTERRUPT_POLL:
cs->interrupt_request &= ~CPU_INTERRUPT_POLL; cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(cpu->apic_state); apic_poll_irq(cpu->apic_state);
/* Don't process multiple interrupt requests in a single call. break;
This is required to make icount-driven execution deterministic. */
return true;
}
#endif #endif
if (interrupt_request & CPU_INTERRUPT_SIPI) { case CPU_INTERRUPT_SIPI:
do_cpu_sipi(cpu); do_cpu_sipi(cpu);
ret = true; break;
} else if (env->hflags2 & HF2_GIF_MASK) { case CPU_INTERRUPT_SMI:
if ((interrupt_request & CPU_INTERRUPT_SMI) && cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
!(env->hflags & HF_SMM_MASK)) { cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); do_smm_enter(cpu);
cs->interrupt_request &= ~CPU_INTERRUPT_SMI; break;
do_smm_enter(cpu); case CPU_INTERRUPT_NMI:
ret = true; cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
} else if ((interrupt_request & CPU_INTERRUPT_NMI) && cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
!(env->hflags2 & HF2_NMI_MASK)) { env->hflags2 |= HF2_NMI_MASK;
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
cs->interrupt_request &= ~CPU_INTERRUPT_NMI; break;
env->hflags2 |= HF2_NMI_MASK; case CPU_INTERRUPT_MCE:
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
ret = true; do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
} else if (interrupt_request & CPU_INTERRUPT_MCE) { break;
cs->interrupt_request &= ~CPU_INTERRUPT_MCE; case CPU_INTERRUPT_HARD:
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
ret = true; cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
} else if ((interrupt_request & CPU_INTERRUPT_HARD) && CPU_INTERRUPT_VIRQ);
(((env->hflags2 & HF2_VINTR_MASK) && intno = cpu_get_pic_interrupt(env);
(env->hflags2 & HF2_HIF_MASK)) || qemu_log_mask(CPU_LOG_TB_IN_ASM,
(!(env->hflags2 & HF2_VINTR_MASK) && "Servicing hardware INT=0x%02x\n", intno);
(env->eflags & IF_MASK && do_interrupt_x86_hardirq(env, intno, 1);
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) { break;
int intno;
cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
CPU_INTERRUPT_VIRQ);
intno = cpu_get_pic_interrupt(env);
qemu_log_mask(CPU_LOG_TB_IN_ASM,
"Servicing hardware INT=0x%02x\n", intno);
do_interrupt_x86_hardirq(env, intno, 1);
/* ensure that no TB jump will be modified as
the program flow was changed */
ret = true;
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && case CPU_INTERRUPT_VIRQ:
(env->eflags & IF_MASK) && /* FIXME: this should respect TPR */
!(env->hflags & HF_INHIBIT_IRQ_MASK)) { cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
int intno; intno = x86_ldl_phys(cs, env->vm_vmcb
/* FIXME: this should respect TPR */
cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
intno = x86_ldl_phys(cs, env->vm_vmcb
+ offsetof(struct vmcb, control.int_vector)); + offsetof(struct vmcb, control.int_vector));
qemu_log_mask(CPU_LOG_TB_IN_ASM, qemu_log_mask(CPU_LOG_TB_IN_ASM,
"Servicing virtual hardware INT=0x%02x\n", intno); "Servicing virtual hardware INT=0x%02x\n", intno);
do_interrupt_x86_hardirq(env, intno, 1); do_interrupt_x86_hardirq(env, intno, 1);
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
ret = true; break;
#endif #endif
}
} }
return ret; /* Ensure that no TB jump will be modified as the program flow was changed. */
return true;
} }
void helper_lldt(CPUX86State *env, int selector) void helper_lldt(CPUX86State *env, int selector)