QOM CPUState refactorings / X86CPU

* Deadlock fix for exit requests around CPU reset
 * X86CPU x2apic for KVM
 * X86CPU model subclasses
 * SPARCCPU preparations for model subclasses
 * -cpu arguments for arm, cris, lm32, moxie, openrisc, ppc, sh4, uc32
 * m68k assertion cleanups
 * CPUClass hooks for cpu.h inline functions
 * Field movements from CPU_COMMON to CPUState and follow-up cleanups
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJTIgHQAAoJEPou0S0+fgE/ZrUP/iLHo6pF5WA2Y7jpgIqA1eFL
 xXvgQ772y7DgYndL8fyjhcNUtpmjnN+fBZHeuqerkMc0qIPK8WUuzOJlnR6AbXoM
 7bBjEtzL4yjzSfOjAZ11j5/mZPziKEeQ/U+IiQ/pByL3o19EJoKrdqC/3dTP+YQJ
 0qgcQv6zEBxC1E1ymIuVw0ey1Dc12Jp0dg28rC8sDa6UnwSMjlJihIoIELHad2px
 JYU9GGFeLLPiicMb8Xn5QZ4SoGJ8kLkSOdqi3zFBKCG5CQSMOd/F9S+U17EZZrDS
 vbN7dca4Q9jugahXz8K0Rv6ScKxRU9RNtEzr9aC1hq7fNVu3Dg5ADNNU9NWvkr1d
 clGUG1HTEvOhtUPXV1Ns/J76nWRr1tjQ0eW8SUx7jnno0bj6DSi/MkZaH83Go56t
 yep4c0gVb+2EL3YZ3FAg5dE7MNSnqO/UJsQ0GjcnfL1w8Vl0fj7XHrsZdMqlKuot
 QCF2bdRj+0qL3ewqP9PT2vSH0HRLIc7figxCuVWagdvKfwmjomWTU8gcZ9K98OQR
 x2ULR/XjQEGcjjrNWQF5uoqwAinv8naZFNUq3g/asegGHLjaoNqNqN/Ck7e0J59H
 /y9pravyqePQY49J2TwFCibAXlYjfYIe8F/Xt1OAyFfALyrK9XxyUOTXOYpynxJn
 QnFa4jHRqmomTm2ctr/M
 =NKSK
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/afaerber/tags/qom-cpu-for-2.0' into staging

QOM CPUState refactorings / X86CPU

* Deadlock fix for exit requests around CPU reset
* X86CPU x2apic for KVM
* X86CPU model subclasses
* SPARCCPU preparations for model subclasses
* -cpu arguments for arm, cris, lm32, moxie, openrisc, ppc, sh4, uc32
* m68k assertion cleanups
* CPUClass hooks for cpu.h inline functions
* Field movements from CPU_COMMON to CPUState and follow-up cleanups

# gpg: Signature made Thu 13 Mar 2014 19:06:56 GMT using RSA key ID 3E7E013F
# gpg: Good signature from "Andreas Färber <afaerber@suse.de>"
# gpg:                 aka "Andreas Färber <afaerber@suse.com>"

* remotes/afaerber/tags/qom-cpu-for-2.0: (58 commits)
  user-exec: Change exception_action() argument to CPUState
  cputlb: Change tlb_set_page() argument to CPUState
  cputlb: Change tlb_flush() argument to CPUState
  cputlb: Change tlb_flush_page() argument to CPUState
  target-microblaze: Replace DisasContext::env field with MicroBlazeCPU
  target-cris: Replace DisasContext::env field with CRISCPU
  exec: Change cpu_abort() argument to CPUState
  exec: Change memory_region_section_get_iotlb() argument to CPUState
  cputlb: Change tlb_unprotect_code_phys() argument to CPUState
  cpu-exec: Change cpu_resume_from_signal() argument to CPUState
  exec: Change cpu_breakpoint_{insert,remove{,_by_ref,_all}} argument
  exec: Change cpu_watchpoint_{insert,remove{,_by_ref,_all}} argument
  target-ppc: Use PowerPCCPU in PowerPCCPUClass::handle_mmu_fault hook
  translate-all: Change tb_flush_jmp_cache() argument to CPUState
  translate-all: Change tb_gen_code() argument to CPUState
  translate-all: Change cpu_io_recompile() argument to CPUState
  translate-all: Change tb_check_watchpoint() argument to CPUState
  translate-all: Change cpu_restore_state_from_tb() argument to CPUState
  translate-all: Change cpu_restore_state() argument to CPUState
  cpu-exec: Change cpu_loop_exit() argument to CPUState
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2014-03-13 19:13:33 +00:00
commit bbbd67f0cc
155 changed files with 2455 additions and 1887 deletions

View file

@ -1000,7 +1000,7 @@ int main(int argc, char **argv)
memset(ts, 0, sizeof(TaskState)); memset(ts, 0, sizeof(TaskState));
init_task_state(ts); init_task_state(ts);
ts->info = info; ts->info = info;
env->opaque = ts; cpu->opaque = ts;
#if defined(TARGET_I386) #if defined(TARGET_I386)
cpu_x86_set_cpl(env, 3); cpu_x86_set_cpl(env, 3);

View file

@ -23,29 +23,22 @@
#include "qemu/atomic.h" #include "qemu/atomic.h"
#include "sysemu/qtest.h" #include "sysemu/qtest.h"
bool qemu_cpu_has_work(CPUState *cpu) void cpu_loop_exit(CPUState *cpu)
{ {
return cpu_has_work(cpu);
}
void cpu_loop_exit(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
cpu->current_tb = NULL; cpu->current_tb = NULL;
siglongjmp(env->jmp_env, 1); siglongjmp(cpu->jmp_env, 1);
} }
/* exit the current TB from a signal handler. The host registers are /* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator restored in a state compatible with the CPU emulator
*/ */
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
void cpu_resume_from_signal(CPUArchState *env, void *puc) void cpu_resume_from_signal(CPUState *cpu, void *puc)
{ {
/* XXX: restore cpu registers saved in host registers */ /* XXX: restore cpu registers saved in host registers */
env->exception_index = -1; cpu->exception_index = -1;
siglongjmp(env->jmp_env, 1); siglongjmp(cpu->jmp_env, 1);
} }
#endif #endif
@ -108,7 +101,7 @@ static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
if (max_cycles > CF_COUNT_MASK) if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK; max_cycles = CF_COUNT_MASK;
tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles); max_cycles);
cpu->current_tb = tb; cpu->current_tb = tb;
/* execute the generated code */ /* execute the generated code */
@ -123,6 +116,7 @@ static TranslationBlock *tb_find_slow(CPUArchState *env,
target_ulong cs_base, target_ulong cs_base,
uint64_t flags) uint64_t flags)
{ {
CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb, **ptb1; TranslationBlock *tb, **ptb1;
unsigned int h; unsigned int h;
tb_page_addr_t phys_pc, phys_page1; tb_page_addr_t phys_pc, phys_page1;
@ -160,7 +154,7 @@ static TranslationBlock *tb_find_slow(CPUArchState *env,
} }
not_found: not_found:
/* if no translated code available, then translate it now */ /* if no translated code available, then translate it now */
tb = tb_gen_code(env, pc, cs_base, flags, 0); tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
found: found:
/* Move the last found TB to the head of the list */ /* Move the last found TB to the head of the list */
@ -170,12 +164,13 @@ static TranslationBlock *tb_find_slow(CPUArchState *env,
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb; tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
} }
/* we add the TB in the virtual pc hash table */ /* we add the TB in the virtual pc hash table */
env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
return tb; return tb;
} }
static inline TranslationBlock *tb_find_fast(CPUArchState *env) static inline TranslationBlock *tb_find_fast(CPUArchState *env)
{ {
CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; target_ulong cs_base, pc;
int flags; int flags;
@ -184,7 +179,7 @@ static inline TranslationBlock *tb_find_fast(CPUArchState *env)
always be the same before a given translated block always be the same before a given translated block
is executed. */ is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) { tb->flags != flags)) {
tb = tb_find_slow(env, pc, cs_base, flags); tb = tb_find_slow(env, pc, cs_base, flags);
@ -201,10 +196,11 @@ void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
static void cpu_handle_debug_exception(CPUArchState *env) static void cpu_handle_debug_exception(CPUArchState *env)
{ {
CPUState *cpu = ENV_GET_CPU(env);
CPUWatchpoint *wp; CPUWatchpoint *wp;
if (!env->watchpoint_hit) { if (!cpu->watchpoint_hit) {
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
wp->flags &= ~BP_WATCHPOINT_HIT; wp->flags &= ~BP_WATCHPOINT_HIT;
} }
} }
@ -283,16 +279,16 @@ int cpu_exec(CPUArchState *env)
#else #else
#error unsupported target CPU #error unsupported target CPU
#endif #endif
env->exception_index = -1; cpu->exception_index = -1;
/* prepare setjmp context for exception handling */ /* prepare setjmp context for exception handling */
for(;;) { for(;;) {
if (sigsetjmp(env->jmp_env, 0) == 0) { if (sigsetjmp(cpu->jmp_env, 0) == 0) {
/* if an exception is pending, we execute it here */ /* if an exception is pending, we execute it here */
if (env->exception_index >= 0) { if (cpu->exception_index >= 0) {
if (env->exception_index >= EXCP_INTERRUPT) { if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */ /* exit request from the cpu execution loop */
ret = env->exception_index; ret = cpu->exception_index;
if (ret == EXCP_DEBUG) { if (ret == EXCP_DEBUG) {
cpu_handle_debug_exception(env); cpu_handle_debug_exception(env);
} }
@ -305,11 +301,11 @@ int cpu_exec(CPUArchState *env)
#if defined(TARGET_I386) #if defined(TARGET_I386)
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
#endif #endif
ret = env->exception_index; ret = cpu->exception_index;
break; break;
#else #else
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
env->exception_index = -1; cpu->exception_index = -1;
#endif #endif
} }
} }
@ -324,8 +320,8 @@ int cpu_exec(CPUArchState *env)
} }
if (interrupt_request & CPU_INTERRUPT_DEBUG) { if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
env->exception_index = EXCP_DEBUG; cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(env); cpu_loop_exit(cpu);
} }
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \ #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \ defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
@ -333,8 +329,8 @@ int cpu_exec(CPUArchState *env)
if (interrupt_request & CPU_INTERRUPT_HALT) { if (interrupt_request & CPU_INTERRUPT_HALT) {
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1; cpu->halted = 1;
env->exception_index = EXCP_HLT; cpu->exception_index = EXCP_HLT;
cpu_loop_exit(env); cpu_loop_exit(cpu);
} }
#endif #endif
#if defined(TARGET_I386) #if defined(TARGET_I386)
@ -348,8 +344,8 @@ int cpu_exec(CPUArchState *env)
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
0); 0);
do_cpu_init(x86_cpu); do_cpu_init(x86_cpu);
env->exception_index = EXCP_HALTED; cpu->exception_index = EXCP_HALTED;
cpu_loop_exit(env); cpu_loop_exit(cpu);
} else if (interrupt_request & CPU_INTERRUPT_SIPI) { } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
do_cpu_sipi(x86_cpu); do_cpu_sipi(x86_cpu);
} else if (env->hflags2 & HF2_GIF_MASK) { } else if (env->hflags2 & HF2_GIF_MASK) {
@ -420,7 +416,7 @@ int cpu_exec(CPUArchState *env)
#elif defined(TARGET_LM32) #elif defined(TARGET_LM32)
if ((interrupt_request & CPU_INTERRUPT_HARD) if ((interrupt_request & CPU_INTERRUPT_HARD)
&& (env->ie & IE_IE)) { && (env->ie & IE_IE)) {
env->exception_index = EXCP_IRQ; cpu->exception_index = EXCP_IRQ;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
@ -429,7 +425,7 @@ int cpu_exec(CPUArchState *env)
&& (env->sregs[SR_MSR] & MSR_IE) && (env->sregs[SR_MSR] & MSR_IE)
&& !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP)) && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
&& !(env->iflags & (D_FLAG | IMM_FLAG))) { && !(env->iflags & (D_FLAG | IMM_FLAG))) {
env->exception_index = EXCP_IRQ; cpu->exception_index = EXCP_IRQ;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
@ -437,7 +433,7 @@ int cpu_exec(CPUArchState *env)
if ((interrupt_request & CPU_INTERRUPT_HARD) && if ((interrupt_request & CPU_INTERRUPT_HARD) &&
cpu_mips_hw_interrupts_pending(env)) { cpu_mips_hw_interrupts_pending(env)) {
/* Raise it */ /* Raise it */
env->exception_index = EXCP_EXT_INTERRUPT; cpu->exception_index = EXCP_EXT_INTERRUPT;
env->error_code = 0; env->error_code = 0;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
@ -454,7 +450,7 @@ int cpu_exec(CPUArchState *env)
idx = EXCP_TICK; idx = EXCP_TICK;
} }
if (idx >= 0) { if (idx >= 0) {
env->exception_index = idx; cpu->exception_index = idx;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
@ -469,7 +465,7 @@ int cpu_exec(CPUArchState *env)
if (((type == TT_EXTINT) && if (((type == TT_EXTINT) &&
cpu_pil_allowed(env, pil)) || cpu_pil_allowed(env, pil)) ||
type != TT_EXTINT) { type != TT_EXTINT) {
env->exception_index = env->interrupt_index; cpu->exception_index = env->interrupt_index;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
@ -478,7 +474,7 @@ int cpu_exec(CPUArchState *env)
#elif defined(TARGET_ARM) #elif defined(TARGET_ARM)
if (interrupt_request & CPU_INTERRUPT_FIQ if (interrupt_request & CPU_INTERRUPT_FIQ
&& !(env->daif & PSTATE_F)) { && !(env->daif & PSTATE_F)) {
env->exception_index = EXCP_FIQ; cpu->exception_index = EXCP_FIQ;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
@ -494,14 +490,14 @@ int cpu_exec(CPUArchState *env)
if (interrupt_request & CPU_INTERRUPT_HARD if (interrupt_request & CPU_INTERRUPT_HARD
&& ((IS_M(env) && env->regs[15] < 0xfffffff0) && ((IS_M(env) && env->regs[15] < 0xfffffff0)
|| !(env->daif & PSTATE_I))) { || !(env->daif & PSTATE_I))) {
env->exception_index = EXCP_IRQ; cpu->exception_index = EXCP_IRQ;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
#elif defined(TARGET_UNICORE32) #elif defined(TARGET_UNICORE32)
if (interrupt_request & CPU_INTERRUPT_HARD if (interrupt_request & CPU_INTERRUPT_HARD
&& !(env->uncached_asr & ASR_I)) { && !(env->uncached_asr & ASR_I)) {
env->exception_index = UC32_EXCP_INTR; cpu->exception_index = UC32_EXCP_INTR;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
@ -536,7 +532,7 @@ int cpu_exec(CPUArchState *env)
} }
} }
if (idx >= 0) { if (idx >= 0) {
env->exception_index = idx; cpu->exception_index = idx;
env->error_code = 0; env->error_code = 0;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
@ -546,7 +542,7 @@ int cpu_exec(CPUArchState *env)
if (interrupt_request & CPU_INTERRUPT_HARD if (interrupt_request & CPU_INTERRUPT_HARD
&& (env->pregs[PR_CCS] & I_FLAG) && (env->pregs[PR_CCS] & I_FLAG)
&& !env->locked_irq) { && !env->locked_irq) {
env->exception_index = EXCP_IRQ; cpu->exception_index = EXCP_IRQ;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
@ -558,7 +554,7 @@ int cpu_exec(CPUArchState *env)
m_flag_archval = M_FLAG_V32; m_flag_archval = M_FLAG_V32;
} }
if ((env->pregs[PR_CCS] & m_flag_archval)) { if ((env->pregs[PR_CCS] & m_flag_archval)) {
env->exception_index = EXCP_NMI; cpu->exception_index = EXCP_NMI;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
@ -572,7 +568,7 @@ int cpu_exec(CPUArchState *env)
hardware doesn't rely on this, so we hardware doesn't rely on this, so we
provide/save the vector when the interrupt is provide/save the vector when the interrupt is
first signalled. */ first signalled. */
env->exception_index = env->pending_vector; cpu->exception_index = env->pending_vector;
do_interrupt_m68k_hardirq(env); do_interrupt_m68k_hardirq(env);
next_tb = 0; next_tb = 0;
} }
@ -584,7 +580,7 @@ int cpu_exec(CPUArchState *env)
} }
#elif defined(TARGET_XTENSA) #elif defined(TARGET_XTENSA)
if (interrupt_request & CPU_INTERRUPT_HARD) { if (interrupt_request & CPU_INTERRUPT_HARD) {
env->exception_index = EXC_IRQ; cpu->exception_index = EXC_IRQ;
cc->do_interrupt(cpu); cc->do_interrupt(cpu);
next_tb = 0; next_tb = 0;
} }
@ -600,8 +596,8 @@ int cpu_exec(CPUArchState *env)
} }
if (unlikely(cpu->exit_request)) { if (unlikely(cpu->exit_request)) {
cpu->exit_request = 0; cpu->exit_request = 0;
env->exception_index = EXCP_INTERRUPT; cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(env); cpu_loop_exit(cpu);
} }
spin_lock(&tcg_ctx.tb_ctx.tb_lock); spin_lock(&tcg_ctx.tb_ctx.tb_lock);
tb = tb_find_fast(env); tb = tb_find_fast(env);
@ -654,25 +650,25 @@ int cpu_exec(CPUArchState *env)
/* Instruction counter expired. */ /* Instruction counter expired. */
int insns_left; int insns_left;
tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
insns_left = env->icount_decr.u32; insns_left = cpu->icount_decr.u32;
if (env->icount_extra && insns_left >= 0) { if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */ /* Refill decrementer and continue execution. */
env->icount_extra += insns_left; cpu->icount_extra += insns_left;
if (env->icount_extra > 0xffff) { if (cpu->icount_extra > 0xffff) {
insns_left = 0xffff; insns_left = 0xffff;
} else { } else {
insns_left = env->icount_extra; insns_left = cpu->icount_extra;
} }
env->icount_extra -= insns_left; cpu->icount_extra -= insns_left;
env->icount_decr.u16.low = insns_left; cpu->icount_decr.u16.low = insns_left;
} else { } else {
if (insns_left > 0) { if (insns_left > 0) {
/* Execute remaining instructions. */ /* Execute remaining instructions. */
cpu_exec_nocache(env, insns_left, tb); cpu_exec_nocache(env, insns_left, tb);
} }
env->exception_index = EXCP_INTERRUPT; cpu->exception_index = EXCP_INTERRUPT;
next_tb = 0; next_tb = 0;
cpu_loop_exit(env); cpu_loop_exit(cpu);
} }
break; break;
} }

25
cpus.c
View file

@ -76,7 +76,7 @@ static bool cpu_thread_is_idle(CPUState *cpu)
if (cpu_is_stopped(cpu)) { if (cpu_is_stopped(cpu)) {
return true; return true;
} }
if (!cpu->halted || qemu_cpu_has_work(cpu) || if (!cpu->halted || cpu_has_work(cpu) ||
kvm_halt_in_kernel()) { kvm_halt_in_kernel()) {
return false; return false;
} }
@ -139,11 +139,10 @@ static int64_t cpu_get_icount_locked(void)
icount = qemu_icount; icount = qemu_icount;
if (cpu) { if (cpu) {
CPUArchState *env = cpu->env_ptr; if (!cpu_can_do_io(cpu)) {
if (!can_do_io(env)) {
fprintf(stderr, "Bad clock read\n"); fprintf(stderr, "Bad clock read\n");
} }
icount -= (env->icount_decr.u16.low + env->icount_extra); icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
} }
return qemu_icount_bias + (icount << icount_time_shift); return qemu_icount_bias + (icount << icount_time_shift);
} }
@ -1236,6 +1235,7 @@ int vm_stop_force_state(RunState state)
static int tcg_cpu_exec(CPUArchState *env) static int tcg_cpu_exec(CPUArchState *env)
{ {
CPUState *cpu = ENV_GET_CPU(env);
int ret; int ret;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
int64_t ti; int64_t ti;
@ -1248,9 +1248,9 @@ static int tcg_cpu_exec(CPUArchState *env)
int64_t count; int64_t count;
int64_t deadline; int64_t deadline;
int decr; int decr;
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); qemu_icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
env->icount_decr.u16.low = 0; cpu->icount_decr.u16.low = 0;
env->icount_extra = 0; cpu->icount_extra = 0;
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
/* Maintain prior (possibly buggy) behaviour where if no deadline /* Maintain prior (possibly buggy) behaviour where if no deadline
@ -1266,8 +1266,8 @@ static int tcg_cpu_exec(CPUArchState *env)
qemu_icount += count; qemu_icount += count;
decr = (count > 0xffff) ? 0xffff : count; decr = (count > 0xffff) ? 0xffff : count;
count -= decr; count -= decr;
env->icount_decr.u16.low = decr; cpu->icount_decr.u16.low = decr;
env->icount_extra = count; cpu->icount_extra = count;
} }
ret = cpu_exec(env); ret = cpu_exec(env);
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
@ -1276,10 +1276,9 @@ static int tcg_cpu_exec(CPUArchState *env)
if (use_icount) { if (use_icount) {
/* Fold pending instructions back into the /* Fold pending instructions back into the
instruction counter, and clear the interrupt flag. */ instruction counter, and clear the interrupt flag. */
qemu_icount -= (env->icount_decr.u16.low qemu_icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
+ env->icount_extra); cpu->icount_decr.u32 = 0;
env->icount_decr.u32 = 0; cpu->icount_extra = 0;
env->icount_extra = 0;
} }
return ret; return ret;
} }

View file

@ -46,9 +46,9 @@ int tlb_flush_count;
* entries from the TLB at any time, so flushing more entries than * entries from the TLB at any time, so flushing more entries than
* required is only an efficiency issue, not a correctness issue. * required is only an efficiency issue, not a correctness issue.
*/ */
void tlb_flush(CPUArchState *env, int flush_global) void tlb_flush(CPUState *cpu, int flush_global)
{ {
CPUState *cpu = ENV_GET_CPU(env); CPUArchState *env = cpu->env_ptr;
#if defined(DEBUG_TLB) #if defined(DEBUG_TLB)
printf("tlb_flush:\n"); printf("tlb_flush:\n");
@ -58,7 +58,7 @@ void tlb_flush(CPUArchState *env, int flush_global)
cpu->current_tb = NULL; cpu->current_tb = NULL;
memset(env->tlb_table, -1, sizeof(env->tlb_table)); memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache)); memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
env->tlb_flush_addr = -1; env->tlb_flush_addr = -1;
env->tlb_flush_mask = 0; env->tlb_flush_mask = 0;
@ -77,9 +77,9 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
} }
} }
void tlb_flush_page(CPUArchState *env, target_ulong addr) void tlb_flush_page(CPUState *cpu, target_ulong addr)
{ {
CPUState *cpu = ENV_GET_CPU(env); CPUArchState *env = cpu->env_ptr;
int i; int i;
int mmu_idx; int mmu_idx;
@ -93,7 +93,7 @@ void tlb_flush_page(CPUArchState *env, target_ulong addr)
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
env->tlb_flush_addr, env->tlb_flush_mask); env->tlb_flush_addr, env->tlb_flush_mask);
#endif #endif
tlb_flush(env, 1); tlb_flush(cpu, 1);
return; return;
} }
/* must reset current TB so that interrupts cannot modify the /* must reset current TB so that interrupts cannot modify the
@ -106,7 +106,7 @@ void tlb_flush_page(CPUArchState *env, target_ulong addr)
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
} }
tb_flush_jmp_cache(env, addr); tb_flush_jmp_cache(cpu, addr);
} }
/* update the TLBs so that writes to code in the virtual page 'addr' /* update the TLBs so that writes to code in the virtual page 'addr'
@ -119,7 +119,7 @@ void tlb_protect_code(ram_addr_t ram_addr)
/* update the TLB so that writes in physical page 'phys_addr' are no longer /* update the TLB so that writes in physical page 'phys_addr' are no longer
tested for self modifying code */ tested for self modifying code */
void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr, void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
target_ulong vaddr) target_ulong vaddr)
{ {
cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
@ -221,10 +221,11 @@ static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
/* Add a new TLB entry. At most one entry for a given virtual address /* Add a new TLB entry. At most one entry for a given virtual address
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
supplied size is only used by tlb_flush_page. */ supplied size is only used by tlb_flush_page. */
void tlb_set_page(CPUArchState *env, target_ulong vaddr, void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot, hwaddr paddr, int prot,
int mmu_idx, target_ulong size) int mmu_idx, target_ulong size)
{ {
CPUArchState *env = cpu->env_ptr;
MemoryRegionSection *section; MemoryRegionSection *section;
unsigned int index; unsigned int index;
target_ulong address; target_ulong address;
@ -232,7 +233,6 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr,
uintptr_t addend; uintptr_t addend;
CPUTLBEntry *te; CPUTLBEntry *te;
hwaddr iotlb, xlat, sz; hwaddr iotlb, xlat, sz;
CPUState *cpu = ENV_GET_CPU(env);
assert(size >= TARGET_PAGE_SIZE); assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) { if (size != TARGET_PAGE_SIZE) {
@ -261,7 +261,7 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr,
} }
code_address = address; code_address = address;
iotlb = memory_region_section_get_iotlb(env, section, vaddr, paddr, xlat, iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
prot, &address); prot, &address);
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@ -322,7 +322,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
if (cc->do_unassigned_access) { if (cc->do_unassigned_access) {
cc->do_unassigned_access(cpu, addr, false, true, 0, 4); cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
} else { } else {
cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
TARGET_FMT_lx "\n", addr); TARGET_FMT_lx "\n", addr);
} }
} }

118
exec.c
View file

@ -33,6 +33,7 @@
#include "hw/xen/xen.h" #include "hw/xen/xen.h"
#include "qemu/timer.h" #include "qemu/timer.h"
#include "qemu/config-file.h" #include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "exec/memory.h" #include "exec/memory.h"
#include "sysemu/dma.h" #include "sysemu/dma.h"
#include "exec/address-spaces.h" #include "exec/address-spaces.h"
@ -484,8 +485,8 @@ void cpu_exec_init(CPUArchState *env)
} }
cpu->cpu_index = cpu_index; cpu->cpu_index = cpu_index;
cpu->numa_node = 0; cpu->numa_node = 0;
QTAILQ_INIT(&env->breakpoints); QTAILQ_INIT(&cpu->breakpoints);
QTAILQ_INIT(&env->watchpoints); QTAILQ_INIT(&cpu->watchpoints);
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
cpu->as = &address_space_memory; cpu->as = &address_space_memory;
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
@ -527,29 +528,29 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
#endif /* TARGET_HAS_ICE */ #endif /* TARGET_HAS_ICE */
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
void cpu_watchpoint_remove_all(CPUArchState *env, int mask) void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{ {
} }
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint) int flags, CPUWatchpoint **watchpoint)
{ {
return -ENOSYS; return -ENOSYS;
} }
#else #else
/* Add a watchpoint. */ /* Add a watchpoint. */
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint) int flags, CPUWatchpoint **watchpoint)
{ {
target_ulong len_mask = ~(len - 1); vaddr len_mask = ~(len - 1);
CPUWatchpoint *wp; CPUWatchpoint *wp;
/* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
if ((len & (len - 1)) || (addr & ~len_mask) || if ((len & (len - 1)) || (addr & ~len_mask) ||
len == 0 || len > TARGET_PAGE_SIZE) { len == 0 || len > TARGET_PAGE_SIZE) {
fprintf(stderr, "qemu: tried to set invalid watchpoint at " error_report("tried to set invalid watchpoint at %"
TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
return -EINVAL; return -EINVAL;
} }
wp = g_malloc(sizeof(*wp)); wp = g_malloc(sizeof(*wp));
@ -559,12 +560,13 @@ int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len
wp->flags = flags; wp->flags = flags;
/* keep all GDB-injected watchpoints in front */ /* keep all GDB-injected watchpoints in front */
if (flags & BP_GDB) if (flags & BP_GDB) {
QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
else } else {
QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
}
tlb_flush_page(env, addr); tlb_flush_page(cpu, addr);
if (watchpoint) if (watchpoint)
*watchpoint = wp; *watchpoint = wp;
@ -572,16 +574,16 @@ int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len
} }
/* Remove a specific watchpoint. */ /* Remove a specific watchpoint. */
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len, int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
int flags) int flags)
{ {
target_ulong len_mask = ~(len - 1); vaddr len_mask = ~(len - 1);
CPUWatchpoint *wp; CPUWatchpoint *wp;
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (addr == wp->vaddr && len_mask == wp->len_mask if (addr == wp->vaddr && len_mask == wp->len_mask
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_remove_by_ref(env, wp); cpu_watchpoint_remove_by_ref(cpu, wp);
return 0; return 0;
} }
} }
@ -589,29 +591,30 @@ int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len
} }
/* Remove a specific watchpoint by reference. */ /* Remove a specific watchpoint by reference. */
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint) void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
{ {
QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
tlb_flush_page(env, watchpoint->vaddr); tlb_flush_page(cpu, watchpoint->vaddr);
g_free(watchpoint); g_free(watchpoint);
} }
/* Remove all matching watchpoints. */ /* Remove all matching watchpoints. */
void cpu_watchpoint_remove_all(CPUArchState *env, int mask) void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{ {
CPUWatchpoint *wp, *next; CPUWatchpoint *wp, *next;
QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
if (wp->flags & mask) if (wp->flags & mask) {
cpu_watchpoint_remove_by_ref(env, wp); cpu_watchpoint_remove_by_ref(cpu, wp);
}
} }
} }
#endif #endif
/* Add a breakpoint. */ /* Add a breakpoint. */
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint) CPUBreakpoint **breakpoint)
{ {
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
@ -624,12 +627,12 @@ int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
/* keep all GDB-injected breakpoints in front */ /* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB) { if (flags & BP_GDB) {
QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
} else { } else {
QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
} }
breakpoint_invalidate(ENV_GET_CPU(env), pc); breakpoint_invalidate(cpu, pc);
if (breakpoint) { if (breakpoint) {
*breakpoint = bp; *breakpoint = bp;
@ -641,14 +644,14 @@ int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
} }
/* Remove a specific breakpoint. */ /* Remove a specific breakpoint. */
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags) int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
{ {
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp; CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) { if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(env, bp); cpu_breakpoint_remove_by_ref(cpu, bp);
return 0; return 0;
} }
} }
@ -659,26 +662,27 @@ int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
} }
/* Remove a specific breakpoint by reference. */ /* Remove a specific breakpoint by reference. */
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint) void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
{ {
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc); breakpoint_invalidate(cpu, breakpoint->pc);
g_free(breakpoint); g_free(breakpoint);
#endif #endif
} }
/* Remove all matching breakpoints. */ /* Remove all matching breakpoints. */
void cpu_breakpoint_remove_all(CPUArchState *env, int mask) void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
{ {
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next; CPUBreakpoint *bp, *next;
QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
if (bp->flags & mask) if (bp->flags & mask) {
cpu_breakpoint_remove_by_ref(env, bp); cpu_breakpoint_remove_by_ref(cpu, bp);
}
} }
#endif #endif
} }
@ -702,9 +706,8 @@ void cpu_single_step(CPUState *cpu, int enabled)
#endif #endif
} }
void cpu_abort(CPUArchState *env, const char *fmt, ...) void cpu_abort(CPUState *cpu, const char *fmt, ...)
{ {
CPUState *cpu = ENV_GET_CPU(env);
va_list ap; va_list ap;
va_list ap2; va_list ap2;
@ -792,7 +795,7 @@ static void cpu_physical_memory_set_dirty_tracking(bool enable)
in_migration = enable; in_migration = enable;
} }
hwaddr memory_region_section_get_iotlb(CPUArchState *env, hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section, MemoryRegionSection *section,
target_ulong vaddr, target_ulong vaddr,
hwaddr paddr, hwaddr xlat, hwaddr paddr, hwaddr xlat,
@ -818,7 +821,7 @@ hwaddr memory_region_section_get_iotlb(CPUArchState *env,
/* Make accesses to pages with watchpoints go via the /* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */ watchpoint trap routines. */
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
/* Avoid trapping reads of pages with a write breakpoint. */ /* Avoid trapping reads of pages with a write breakpoint. */
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
@ -1553,7 +1556,7 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
flushed */ flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) { if (!cpu_physical_memory_is_clean(ram_addr)) {
CPUArchState *env = current_cpu->env_ptr; CPUArchState *env = current_cpu->env_ptr;
tlb_set_dirty(env, env->mem_io_vaddr); tlb_set_dirty(env, current_cpu->mem_io_vaddr);
} }
} }
@ -1572,34 +1575,35 @@ static const MemoryRegionOps notdirty_mem_ops = {
/* Generate a debug exception if a watchpoint has been hit. */ /* Generate a debug exception if a watchpoint has been hit. */
static void check_watchpoint(int offset, int len_mask, int flags) static void check_watchpoint(int offset, int len_mask, int flags)
{ {
CPUArchState *env = current_cpu->env_ptr; CPUState *cpu = current_cpu;
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base; target_ulong pc, cs_base;
target_ulong vaddr; target_ulong vaddr;
CPUWatchpoint *wp; CPUWatchpoint *wp;
int cpu_flags; int cpu_flags;
if (env->watchpoint_hit) { if (cpu->watchpoint_hit) {
/* We re-entered the check after replacing the TB. Now raise /* We re-entered the check after replacing the TB. Now raise
* the debug interrupt so that is will trigger after the * the debug interrupt so that is will trigger after the
* current instruction. */ * current instruction. */
cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG); cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
return; return;
} }
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if ((vaddr == (wp->vaddr & len_mask) || if ((vaddr == (wp->vaddr & len_mask) ||
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
wp->flags |= BP_WATCHPOINT_HIT; wp->flags |= BP_WATCHPOINT_HIT;
if (!env->watchpoint_hit) { if (!cpu->watchpoint_hit) {
env->watchpoint_hit = wp; cpu->watchpoint_hit = wp;
tb_check_watchpoint(env); tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) { if (wp->flags & BP_STOP_BEFORE_ACCESS) {
env->exception_index = EXCP_DEBUG; cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(env); cpu_loop_exit(cpu);
} else { } else {
cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
tb_gen_code(env, pc, cs_base, cpu_flags, 1); tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
cpu_resume_from_signal(env, NULL); cpu_resume_from_signal(cpu, NULL);
} }
} }
} else { } else {
@ -1830,14 +1834,12 @@ static void tcg_commit(MemoryListener *listener)
reset the modified entries */ reset the modified entries */
/* XXX: slow ! */ /* XXX: slow ! */
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
CPUArchState *env = cpu->env_ptr;
/* FIXME: Disentangle the cpu.h circular files deps so we can /* FIXME: Disentangle the cpu.h circular files deps so we can
directly get the right CPU from listener. */ directly get the right CPU from listener. */
if (cpu->tcg_as_listener != listener) { if (cpu->tcg_as_listener != listener) {
continue; continue;
} }
tlb_flush(env, 1); tlb_flush(cpu, 1);
} }
} }

View file

@ -635,7 +635,6 @@ static const int xlat_gdb_type[] = {
static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type) static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
{ {
CPUState *cpu; CPUState *cpu;
CPUArchState *env;
int err = 0; int err = 0;
if (kvm_enabled()) { if (kvm_enabled()) {
@ -646,10 +645,10 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
case GDB_BREAKPOINT_SW: case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW: case GDB_BREAKPOINT_HW:
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
env = cpu->env_ptr; err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL); if (err) {
if (err)
break; break;
}
} }
return err; return err;
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
@ -657,8 +656,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
case GDB_WATCHPOINT_READ: case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS: case GDB_WATCHPOINT_ACCESS:
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
env = cpu->env_ptr; err = cpu_watchpoint_insert(cpu, addr, len, xlat_gdb_type[type],
err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
NULL); NULL);
if (err) if (err)
break; break;
@ -673,7 +671,6 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type) static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
{ {
CPUState *cpu; CPUState *cpu;
CPUArchState *env;
int err = 0; int err = 0;
if (kvm_enabled()) { if (kvm_enabled()) {
@ -684,10 +681,10 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
case GDB_BREAKPOINT_SW: case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW: case GDB_BREAKPOINT_HW:
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
env = cpu->env_ptr; err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
err = cpu_breakpoint_remove(env, addr, BP_GDB); if (err) {
if (err)
break; break;
}
} }
return err; return err;
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
@ -695,8 +692,7 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
case GDB_WATCHPOINT_READ: case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS: case GDB_WATCHPOINT_ACCESS:
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
env = cpu->env_ptr; err = cpu_watchpoint_remove(cpu, addr, len, xlat_gdb_type[type]);
err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
if (err) if (err)
break; break;
} }
@ -710,7 +706,6 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
static void gdb_breakpoint_remove_all(void) static void gdb_breakpoint_remove_all(void)
{ {
CPUState *cpu; CPUState *cpu;
CPUArchState *env;
if (kvm_enabled()) { if (kvm_enabled()) {
kvm_remove_all_breakpoints(gdbserver_state->c_cpu); kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
@ -718,10 +713,9 @@ static void gdb_breakpoint_remove_all(void)
} }
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
env = cpu->env_ptr; cpu_breakpoint_remove_all(cpu, BP_GDB);
cpu_breakpoint_remove_all(env, BP_GDB);
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
cpu_watchpoint_remove_all(env, BP_GDB); cpu_watchpoint_remove_all(cpu, BP_GDB);
#endif #endif
} }
} }
@ -1086,8 +1080,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
} }
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
else if (strncmp(p, "Offsets", 7) == 0) { else if (strncmp(p, "Offsets", 7) == 0) {
CPUArchState *env = s->c_cpu->env_ptr; TaskState *ts = s->c_cpu->opaque;
TaskState *ts = env->opaque;
snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
"Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
@ -1205,8 +1198,8 @@ static void gdb_vm_state_change(void *opaque, int running, RunState state)
} }
switch (state) { switch (state) {
case RUN_STATE_DEBUG: case RUN_STATE_DEBUG:
if (env->watchpoint_hit) { if (cpu->watchpoint_hit) {
switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) { switch (cpu->watchpoint_hit->flags & BP_MEM_ACCESS) {
case BP_MEM_READ: case BP_MEM_READ:
type = "r"; type = "r";
break; break;
@ -1220,8 +1213,8 @@ static void gdb_vm_state_change(void *opaque, int running, RunState state)
snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
"T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";", "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
GDB_SIGNAL_TRAP, cpu_index(cpu), type, GDB_SIGNAL_TRAP, cpu_index(cpu), type,
env->watchpoint_hit->vaddr); (target_ulong)cpu->watchpoint_hit->vaddr);
env->watchpoint_hit = NULL; cpu->watchpoint_hit = NULL;
goto send_packet; goto send_packet;
} }
tb_flush(env); tb_flush(env);
@ -1594,13 +1587,16 @@ int gdbserver_start(int port)
/* Disable gdb stub for child processes. */ /* Disable gdb stub for child processes. */
void gdbserver_fork(CPUArchState *env) void gdbserver_fork(CPUArchState *env)
{ {
CPUState *cpu = ENV_GET_CPU(env);
GDBState *s = gdbserver_state; GDBState *s = gdbserver_state;
if (gdbserver_fd < 0 || s->fd < 0)
return; if (gdbserver_fd < 0 || s->fd < 0) {
return;
}
close(s->fd); close(s->fd);
s->fd = -1; s->fd = -1;
cpu_breakpoint_remove_all(env, BP_GDB); cpu_breakpoint_remove_all(cpu, BP_GDB);
cpu_watchpoint_remove_all(env, BP_GDB); cpu_watchpoint_remove_all(cpu, BP_GDB);
} }
#else #else
static int gdb_chr_can_receive(void *opaque) static int gdb_chr_can_receive(void *opaque)

View file

@ -406,7 +406,7 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
} }
if (!kvm_enabled()) { if (!kvm_enabled()) {
cpu_restore_state(env, env->mem_io_pc); cpu_restore_state(cs, cs->mem_io_pc);
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base, cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
&current_flags); &current_flags);
} }
@ -448,8 +448,8 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
if (!kvm_enabled()) { if (!kvm_enabled()) {
cs->current_tb = NULL; cs->current_tb = NULL;
tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
cpu_resume_from_signal(env, NULL); cpu_resume_from_signal(cs, NULL);
} }
} }

View file

@ -267,6 +267,7 @@ static void pc_compat_1_7(QEMUMachineInitArgs *args)
smbios_type1_defaults = false; smbios_type1_defaults = false;
gigabyte_align = false; gigabyte_align = false;
option_rom_has_mr = true; option_rom_has_mr = true;
x86_cpu_compat_disable_kvm_features(FEAT_1_ECX, CPUID_EXT_X2APIC);
} }
static void pc_compat_1_6(QEMUMachineInitArgs *args) static void pc_compat_1_6(QEMUMachineInitArgs *args)
@ -299,7 +300,7 @@ static void pc_compat_1_3(QEMUMachineInitArgs *args)
static void pc_compat_1_2(QEMUMachineInitArgs *args) static void pc_compat_1_2(QEMUMachineInitArgs *args)
{ {
pc_compat_1_3(args); pc_compat_1_3(args);
disable_kvm_pv_eoi(); x86_cpu_compat_disable_kvm_features(FEAT_KVM, KVM_FEATURE_PV_EOI);
} }
static void pc_init_pci_1_7(QEMUMachineInitArgs *args) static void pc_init_pci_1_7(QEMUMachineInitArgs *args)
@ -345,7 +346,7 @@ static void pc_init_pci_no_kvmclock(QEMUMachineInitArgs *args)
has_pci_info = false; has_pci_info = false;
has_acpi_build = false; has_acpi_build = false;
smbios_type1_defaults = false; smbios_type1_defaults = false;
disable_kvm_pv_eoi(); x86_cpu_compat_disable_kvm_features(FEAT_KVM, KVM_FEATURE_PV_EOI);
enable_compat_apic_id_mode(); enable_compat_apic_id_mode();
pc_init1(args, 1, 0); pc_init1(args, 1, 0);
} }
@ -358,7 +359,7 @@ static void pc_init_isa(QEMUMachineInitArgs *args)
if (!args->cpu_model) { if (!args->cpu_model) {
args->cpu_model = "486"; args->cpu_model = "486";
} }
disable_kvm_pv_eoi(); x86_cpu_compat_disable_kvm_features(FEAT_KVM, KVM_FEATURE_PV_EOI);
enable_compat_apic_id_mode(); enable_compat_apic_id_mode();
pc_init1(args, 0, 1); pc_init1(args, 0, 1);
} }

View file

@ -245,6 +245,7 @@ static void pc_compat_1_7(QEMUMachineInitArgs *args)
smbios_type1_defaults = false; smbios_type1_defaults = false;
gigabyte_align = false; gigabyte_align = false;
option_rom_has_mr = true; option_rom_has_mr = true;
x86_cpu_compat_disable_kvm_features(FEAT_1_ECX, CPUID_EXT_X2APIC);
} }
static void pc_compat_1_6(QEMUMachineInitArgs *args) static void pc_compat_1_6(QEMUMachineInitArgs *args)

View file

@ -472,14 +472,13 @@ static void ppce500_cpu_reset_sec(void *opaque)
{ {
PowerPCCPU *cpu = opaque; PowerPCCPU *cpu = opaque;
CPUState *cs = CPU(cpu); CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
cpu_reset(cs); cpu_reset(cs);
/* Secondary CPU starts in halted state for now. Needs to change when /* Secondary CPU starts in halted state for now. Needs to change when
implementing non-kernel boot. */ implementing non-kernel boot. */
cs->halted = 1; cs->halted = 1;
env->exception_index = EXCP_HLT; cs->exception_index = EXCP_HLT;
} }
static void ppce500_cpu_reset(void *opaque) static void ppce500_cpu_reset(void *opaque)

View file

@ -44,7 +44,7 @@
ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd, ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd,
uint32_t flags) uint32_t flags)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(ppc_env_get_cpu(env));
ram_addr_t bdloc; ram_addr_t bdloc;
int i, n; int i, n;

View file

@ -117,7 +117,7 @@ static void spin_kick(void *data)
mmubooke_create_initial_mapping(env, 0, map_start, map_size); mmubooke_create_initial_mapping(env, 0, map_start, map_size);
cpu->halted = 0; cpu->halted = 0;
env->exception_index = -1; cpu->exception_index = -1;
cpu->stopped = false; cpu->stopped = false;
qemu_cpu_kick(cpu); qemu_cpu_kick(cpu);
} }

View file

@ -356,7 +356,7 @@ static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa) static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(ppc_env_get_cpu(env));
uint16_t size; uint16_t size;
uint8_t tmp; uint8_t tmp;
@ -406,7 +406,7 @@ static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr) static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(ppc_env_get_cpu(env));
uint32_t size; uint32_t size;
if (addr == 0) { if (addr == 0) {
@ -442,7 +442,7 @@ static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
static target_ulong register_dtl(CPUPPCState *env, target_ulong addr) static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(ppc_env_get_cpu(env));
uint32_t size; uint32_t size;
if (addr == 0) { if (addr == 0) {
@ -529,7 +529,7 @@ static target_ulong h_cede(PowerPCCPU *cpu, sPAPREnvironment *spapr,
hreg_compute_hflags(env); hreg_compute_hflags(env);
if (!cpu_has_work(cs)) { if (!cpu_has_work(cs)) {
cs->halted = 1; cs->halted = 1;
env->exception_index = EXCP_HLT; cs->exception_index = EXCP_HLT;
cs->exit_request = 1; cs->exit_request = 1;
} }
return H_SUCCESS; return H_SUCCESS;

View file

@ -135,25 +135,23 @@ static unsigned s390_running_cpus;
void s390_add_running_cpu(S390CPU *cpu) void s390_add_running_cpu(S390CPU *cpu)
{ {
CPUState *cs = CPU(cpu); CPUState *cs = CPU(cpu);
CPUS390XState *env = &cpu->env;
if (cs->halted) { if (cs->halted) {
s390_running_cpus++; s390_running_cpus++;
cs->halted = 0; cs->halted = 0;
env->exception_index = -1; cs->exception_index = -1;
} }
} }
unsigned s390_del_running_cpu(S390CPU *cpu) unsigned s390_del_running_cpu(S390CPU *cpu)
{ {
CPUState *cs = CPU(cpu); CPUState *cs = CPU(cpu);
CPUS390XState *env = &cpu->env;
if (cs->halted == 0) { if (cs->halted == 0) {
assert(s390_running_cpus >= 1); assert(s390_running_cpus >= 1);
s390_running_cpus--; s390_running_cpus--;
cs->halted = 1; cs->halted = 1;
env->exception_index = EXCP_HLT; cs->exception_index = EXCP_HLT;
} }
return s390_running_cpus; return s390_running_cpus;
} }
@ -196,7 +194,7 @@ void s390_init_cpus(const char *cpu_model, uint8_t *storage_keys)
ipi_states[i] = cpu; ipi_states[i] = cpu;
cs->halted = 1; cs->halted = 1;
cpu->env.exception_index = EXCP_HLT; cs->exception_index = EXCP_HLT;
cpu->env.storage_keys = storage_keys; cpu->env.storage_keys = storage_keys;
} }
} }

View file

@ -416,7 +416,7 @@ static void sh7750_mem_writel(void *opaque, hwaddr addr,
case SH7750_PTEH_A7: case SH7750_PTEH_A7:
/* If asid changes, clear all registered tlb entries. */ /* If asid changes, clear all registered tlb entries. */
if ((s->cpu->env.pteh & 0xff) != (mem_value & 0xff)) { if ((s->cpu->env.pteh & 0xff) != (mem_value & 0xff)) {
tlb_flush(&s->cpu->env, 1); tlb_flush(CPU(s->cpu), 1);
} }
s->cpu->env.pteh = mem_value; s->cpu->env.pteh = mem_value;
return; return;

View file

@ -360,9 +360,6 @@ int page_check_range(target_ulong start, target_ulong len, int flags);
CPUArchState *cpu_copy(CPUArchState *env); CPUArchState *cpu_copy(CPUArchState *env);
void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
/* Flags for use in ENV->INTERRUPT_PENDING. /* Flags for use in ENV->INTERRUPT_PENDING.
The numbers assigned here are non-sequential in order to preserve The numbers assigned here are non-sequential in order to preserve
@ -413,27 +410,6 @@ void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
| CPU_INTERRUPT_TGT_EXT_3 \ | CPU_INTERRUPT_TGT_EXT_3 \
| CPU_INTERRUPT_TGT_EXT_4) | CPU_INTERRUPT_TGT_EXT_4)
/* Breakpoint/watchpoint flags */
#define BP_MEM_READ 0x01
#define BP_MEM_WRITE 0x02
#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
#define BP_STOP_BEFORE_ACCESS 0x04
#define BP_WATCHPOINT_HIT 0x08
#define BP_GDB 0x10
#define BP_CPU 0x20
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
CPUBreakpoint **breakpoint);
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
target_ulong len, int flags);
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
/* memory API */ /* memory API */

View file

@ -24,7 +24,6 @@
#endif #endif
#include "config.h" #include "config.h"
#include <setjmp.h>
#include <inttypes.h> #include <inttypes.h>
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/queue.h" #include "qemu/queue.h"
@ -61,9 +60,6 @@ typedef uint64_t target_ulong;
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
#define TB_JMP_CACHE_BITS 12
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for /* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */ TLB invalidation to quickly clear a subset of the hash table. */
@ -118,66 +114,9 @@ QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
#endif #endif
#ifdef HOST_WORDS_BIGENDIAN
typedef struct icount_decr_u16 {
uint16_t high;
uint16_t low;
} icount_decr_u16;
#else
typedef struct icount_decr_u16 {
uint16_t low;
uint16_t high;
} icount_decr_u16;
#endif
typedef struct CPUBreakpoint {
target_ulong pc;
int flags; /* BP_* */
QTAILQ_ENTRY(CPUBreakpoint) entry;
} CPUBreakpoint;
typedef struct CPUWatchpoint {
target_ulong vaddr;
target_ulong len_mask;
int flags; /* BP_* */
QTAILQ_ENTRY(CPUWatchpoint) entry;
} CPUWatchpoint;
#define CPU_TEMP_BUF_NLONGS 128 #define CPU_TEMP_BUF_NLONGS 128
#define CPU_COMMON \ #define CPU_COMMON \
/* soft mmu support */ \ /* soft mmu support */ \
/* in order to avoid passing too many arguments to the MMIO \
helpers, we store some rarely used information in the CPU \
context) */ \
uintptr_t mem_io_pc; /* host pc at which the memory was \
accessed */ \
target_ulong mem_io_vaddr; /* target virtual addr at which the \
memory was accessed */ \
CPU_COMMON_TLB \ CPU_COMMON_TLB \
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
\
int64_t icount_extra; /* Instructions until next timer event. */ \
/* Number of cycles left, with interrupt flag in high bit. \
This allows a single read-compare-cbranch-write sequence to test \
for both decrementer underflow and exceptions. */ \
union { \
uint32_t u32; \
icount_decr_u16 u16; \
} icount_decr; \
uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \
\
/* from this point: preserved by CPU reset */ \
/* ice debug support */ \
QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
\
QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
CPUWatchpoint *watchpoint_hit; \
\
/* Core interrupt code */ \
sigjmp_buf jmp_env; \
int exception_index; \
\
/* user data */ \
void *opaque; \
#endif #endif

View file

@ -22,7 +22,7 @@
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
/* cputlb.c */ /* cputlb.c */
void tlb_protect_code(ram_addr_t ram_addr); void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr, void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
target_ulong vaddr); target_ulong vaddr);
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
uintptr_t length); uintptr_t length);
@ -31,12 +31,12 @@ void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
extern int tlb_flush_count; extern int tlb_flush_count;
/* exec.c */ /* exec.c */
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr); void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
MemoryRegionSection * MemoryRegionSection *
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat, address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
hwaddr *plen); hwaddr *plen);
hwaddr memory_region_section_get_iotlb(CPUArchState *env, hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section, MemoryRegionSection *section,
target_ulong vaddr, target_ulong vaddr,
hwaddr paddr, hwaddr xlat, hwaddr paddr, hwaddr xlat,

View file

@ -80,16 +80,16 @@ void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
void cpu_gen_init(void); void cpu_gen_init(void);
int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb, int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
int *gen_code_size_ptr); int *gen_code_size_ptr);
bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc); bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
void page_size_init(void); void page_size_init(void);
void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc); void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr); void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
TranslationBlock *tb_gen_code(CPUArchState *env, TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base, int flags, target_ulong pc, target_ulong cs_base, int flags,
int cflags); int cflags);
void cpu_exec_init(CPUArchState *env); void cpu_exec_init(CPUArchState *env);
void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1); void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
int page_unprotect(target_ulong address, uintptr_t pc, void *puc); int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access); int is_cpu_write_access);
@ -98,18 +98,18 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as); void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */ /* cputlb.c */
void tlb_flush_page(CPUArchState *env, target_ulong addr); void tlb_flush_page(CPUState *cpu, target_ulong addr);
void tlb_flush(CPUArchState *env, int flush_global); void tlb_flush(CPUState *cpu, int flush_global);
void tlb_set_page(CPUArchState *env, target_ulong vaddr, void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot, hwaddr paddr, int prot,
int mmu_idx, target_ulong size); int mmu_idx, target_ulong size);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
#else #else
static inline void tlb_flush_page(CPUArchState *env, target_ulong addr) static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
{ {
} }
static inline void tlb_flush(CPUArchState *env, int flush_global) static inline void tlb_flush(CPUState *cpu, int flush_global)
{ {
} }
#endif #endif
@ -332,7 +332,7 @@ bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
bool io_mem_write(struct MemoryRegion *mr, hwaddr addr, bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
uint64_t value, unsigned size); uint64_t value, unsigned size);
void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx, void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr); uintptr_t retaddr);
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
@ -380,20 +380,25 @@ extern int singlestep;
/* cpu-exec.c */ /* cpu-exec.c */
extern volatile sig_atomic_t exit_request; extern volatile sig_atomic_t exit_request;
/* Deterministic execution requires that IO only be performed on the last /**
instruction of a TB so that interrupts take effect immediately. */ * cpu_can_do_io:
static inline int can_do_io(CPUArchState *env) * @cpu: The CPU for which to check IO.
*
* Deterministic execution requires that IO only be performed on the last
* instruction of a TB so that interrupts take effect immediately.
*
* Returns: %true if memory-mapped IO is safe, %false otherwise.
*/
static inline bool cpu_can_do_io(CPUState *cpu)
{ {
CPUState *cpu = ENV_GET_CPU(env);
if (!use_icount) { if (!use_icount) {
return 1; return true;
} }
/* If not executing code then assume we are ok. */ /* If not executing code then assume we are ok. */
if (cpu->current_tb == NULL) { if (cpu->current_tb == NULL) {
return 1; return true;
} }
return env->can_do_io != 0; return cpu->can_do_io != 0;
} }
#endif #endif

View file

@ -26,13 +26,15 @@ static inline void gen_tb_start(void)
icount_label = gen_new_label(); icount_label = gen_new_label();
count = tcg_temp_local_new_i32(); count = tcg_temp_local_new_i32();
tcg_gen_ld_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u32)); tcg_gen_ld_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
/* This is a horrid hack to allow fixing up the value later. */ /* This is a horrid hack to allow fixing up the value later. */
icount_arg = tcg_ctx.gen_opparam_ptr + 1; icount_arg = tcg_ctx.gen_opparam_ptr + 1;
tcg_gen_subi_i32(count, count, 0xdeadbeef); tcg_gen_subi_i32(count, count, 0xdeadbeef);
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label); tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
tcg_gen_st16_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u16.low)); tcg_gen_st16_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
tcg_temp_free_i32(count); tcg_temp_free_i32(count);
} }
@ -51,14 +53,14 @@ static void gen_tb_end(TranslationBlock *tb, int num_insns)
static inline void gen_io_start(void) static inline void gen_io_start(void)
{ {
TCGv_i32 tmp = tcg_const_i32(1); TCGv_i32 tmp = tcg_const_i32(1);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io)); tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
} }
static inline void gen_io_end(void) static inline void gen_io_end(void)
{ {
TCGv_i32 tmp = tcg_const_i32(0); TCGv_i32 tmp = tcg_const_i32(0);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io)); tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
} }

View file

@ -126,12 +126,12 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr; physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
env->mem_io_pc = retaddr; cpu->mem_io_pc = retaddr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) { if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
cpu_io_recompile(env, retaddr); cpu_io_recompile(cpu, retaddr);
} }
env->mem_io_vaddr = addr; cpu->mem_io_vaddr = addr;
io_mem_read(mr, physaddr, &val, 1 << SHIFT); io_mem_read(mr, physaddr, &val, 1 << SHIFT);
return val; return val;
} }
@ -158,7 +158,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
} }
#endif #endif
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
} }
@ -240,7 +240,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
} }
#endif #endif
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
} }
@ -333,12 +333,12 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr; physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) { if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
cpu_io_recompile(env, retaddr); cpu_io_recompile(cpu, retaddr);
} }
env->mem_io_vaddr = addr; cpu->mem_io_vaddr = addr;
env->mem_io_pc = retaddr; cpu->mem_io_pc = retaddr;
io_mem_write(mr, physaddr, val, 1 << SHIFT); io_mem_write(mr, physaddr, val, 1 << SHIFT);
} }
@ -360,7 +360,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
do_unaligned_access(env, addr, 1, mmu_idx, retaddr); do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
} }
#endif #endif
tlb_fill(env, addr, 1, mmu_idx, retaddr); tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].addr_write; tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
} }
@ -436,7 +436,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
do_unaligned_access(env, addr, 1, mmu_idx, retaddr); do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
} }
#endif #endif
tlb_fill(env, addr, 1, mmu_idx, retaddr); tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].addr_write; tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
} }

View file

@ -21,6 +21,7 @@
#define QEMU_CPU_H #define QEMU_CPU_H
#include <signal.h> #include <signal.h>
#include <setjmp.h>
#include "hw/qdev-core.h" #include "hw/qdev-core.h"
#include "exec/hwaddr.h" #include "exec/hwaddr.h"
#include "qemu/queue.h" #include "qemu/queue.h"
@ -68,8 +69,10 @@ struct TranslationBlock;
* CPUClass: * CPUClass:
* @class_by_name: Callback to map -cpu command line model name to an * @class_by_name: Callback to map -cpu command line model name to an
* instantiatable CPU type. * instantiatable CPU type.
* @parse_features: Callback to parse command line arguments.
* @reset: Callback to reset the #CPUState to its initial state. * @reset: Callback to reset the #CPUState to its initial state.
* @reset_dump_flags: #CPUDumpFlags to use for reset logging. * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
* @has_work: Callback for checking if there is work to do.
* @do_interrupt: Callback for interrupt handling. * @do_interrupt: Callback for interrupt handling.
* @do_unassigned_access: Callback for unassigned access handling. * @do_unassigned_access: Callback for unassigned access handling.
* @memory_rw_debug: Callback for GDB memory access. * @memory_rw_debug: Callback for GDB memory access.
@ -81,6 +84,7 @@ struct TranslationBlock;
* @set_pc: Callback for setting the Program Counter register. * @set_pc: Callback for setting the Program Counter register.
* @synchronize_from_tb: Callback for synchronizing state from a TCG * @synchronize_from_tb: Callback for synchronizing state from a TCG
* #TranslationBlock. * #TranslationBlock.
* @handle_mmu_fault: Callback for handling an MMU fault.
* @get_phys_page_debug: Callback for obtaining a physical address. * @get_phys_page_debug: Callback for obtaining a physical address.
* @gdb_read_register: Callback for letting GDB read a register. * @gdb_read_register: Callback for letting GDB read a register.
* @gdb_write_register: Callback for letting GDB write a register. * @gdb_write_register: Callback for letting GDB write a register.
@ -96,9 +100,11 @@ typedef struct CPUClass {
/*< public >*/ /*< public >*/
ObjectClass *(*class_by_name)(const char *cpu_model); ObjectClass *(*class_by_name)(const char *cpu_model);
void (*parse_features)(CPUState *cpu, char *str, Error **errp);
void (*reset)(CPUState *cpu); void (*reset)(CPUState *cpu);
int reset_dump_flags; int reset_dump_flags;
bool (*has_work)(CPUState *cpu);
void (*do_interrupt)(CPUState *cpu); void (*do_interrupt)(CPUState *cpu);
CPUUnassignedAccess do_unassigned_access; CPUUnassignedAccess do_unassigned_access;
int (*memory_rw_debug)(CPUState *cpu, vaddr addr, int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
@ -113,6 +119,8 @@ typedef struct CPUClass {
Error **errp); Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value); void (*set_pc)(CPUState *cpu, vaddr value);
void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
int mmu_index);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg); int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg); int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
@ -131,9 +139,37 @@ typedef struct CPUClass {
const char *gdb_core_xml_file; const char *gdb_core_xml_file;
} CPUClass; } CPUClass;
#ifdef HOST_WORDS_BIGENDIAN
typedef struct icount_decr_u16 {
uint16_t high;
uint16_t low;
} icount_decr_u16;
#else
typedef struct icount_decr_u16 {
uint16_t low;
uint16_t high;
} icount_decr_u16;
#endif
typedef struct CPUBreakpoint {
vaddr pc;
int flags; /* BP_* */
QTAILQ_ENTRY(CPUBreakpoint) entry;
} CPUBreakpoint;
typedef struct CPUWatchpoint {
vaddr vaddr;
vaddr len_mask;
int flags; /* BP_* */
QTAILQ_ENTRY(CPUWatchpoint) entry;
} CPUWatchpoint;
struct KVMState; struct KVMState;
struct kvm_run; struct kvm_run;
#define TB_JMP_CACHE_BITS 12
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/** /**
* CPUState: * CPUState:
* @cpu_index: CPU index (informative). * @cpu_index: CPU index (informative).
@ -150,12 +186,20 @@ struct kvm_run;
* @tcg_exit_req: Set to force TCG to stop executing linked TBs for this * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
* CPU and return to its top level loop. * CPU and return to its top level loop.
* @singlestep_enabled: Flags for single-stepping. * @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
* @icount_decr: Number of cycles left, with interrupt flag in high bit.
* This allows a single read-compare-cbranch-write sequence to test
* for both decrementer underflow and exceptions.
* @can_do_io: Nonzero if memory-mapped IO is safe.
* @env_ptr: Pointer to subclass-specific CPUArchState field. * @env_ptr: Pointer to subclass-specific CPUArchState field.
* @current_tb: Currently executing TB. * @current_tb: Currently executing TB.
* @gdb_regs: Additional GDB registers. * @gdb_regs: Additional GDB registers.
* @gdb_num_regs: Number of total registers accessible to GDB. * @gdb_num_regs: Number of total registers accessible to GDB.
* @gdb_num_g_regs: Number of registers in GDB 'g' packets. * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
* @next_cpu: Next CPU sharing TB cache. * @next_cpu: Next CPU sharing TB cache.
* @opaque: User data.
* @mem_io_pc: Host Program Counter at which the memory was accessed.
* @mem_io_vaddr: Target virtual address at which the memory was accessed.
* @kvm_fd: vCPU file descriptor for KVM. * @kvm_fd: vCPU file descriptor for KVM.
* *
* State of one CPU core or thread. * State of one CPU core or thread.
@ -186,17 +230,34 @@ struct CPUState {
volatile sig_atomic_t tcg_exit_req; volatile sig_atomic_t tcg_exit_req;
uint32_t interrupt_request; uint32_t interrupt_request;
int singlestep_enabled; int singlestep_enabled;
int64_t icount_extra;
sigjmp_buf jmp_env;
AddressSpace *as; AddressSpace *as;
MemoryListener *tcg_as_listener; MemoryListener *tcg_as_listener;
void *env_ptr; /* CPUArchState */ void *env_ptr; /* CPUArchState */
struct TranslationBlock *current_tb; struct TranslationBlock *current_tb;
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs; struct GDBRegisterState *gdb_regs;
int gdb_num_regs; int gdb_num_regs;
int gdb_num_g_regs; int gdb_num_g_regs;
QTAILQ_ENTRY(CPUState) node; QTAILQ_ENTRY(CPUState) node;
/* ice debug support */
QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
CPUWatchpoint *watchpoint_hit;
void *opaque;
/* In order to avoid passing too many arguments to the MMIO helpers,
* we store some rarely used information in the CPU context.
*/
uintptr_t mem_io_pc;
vaddr mem_io_vaddr;
int kvm_fd; int kvm_fd;
bool kvm_vcpu_dirty; bool kvm_vcpu_dirty;
struct KVMState *kvm_state; struct KVMState *kvm_state;
@ -205,6 +266,12 @@ struct CPUState {
/* TODO Move common fields from CPUArchState here. */ /* TODO Move common fields from CPUArchState here. */
int cpu_index; /* used by alpha TCG */ int cpu_index; /* used by alpha TCG */
uint32_t halted; /* used by alpha, cris, ppc TCG */ uint32_t halted; /* used by alpha, cris, ppc TCG */
union {
uint32_t u32;
icount_decr_u16 u16;
} icount_decr;
uint32_t can_do_io;
int32_t exception_index; /* used by m68k TCG */
}; };
QTAILQ_HEAD(CPUTailQ, CPUState); QTAILQ_HEAD(CPUTailQ, CPUState);
@ -348,14 +415,31 @@ void cpu_reset(CPUState *cpu);
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model); ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
/** /**
* qemu_cpu_has_work: * cpu_generic_init:
* @typename: The CPU base type.
* @cpu_model: The model string including optional parameters.
*
* Instantiates a CPU, processes optional parameters and realizes the CPU.
*
* Returns: A #CPUState or %NULL if an error occurred.
*/
CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
/**
* cpu_has_work:
* @cpu: The vCPU to check. * @cpu: The vCPU to check.
* *
* Checks whether the CPU has work to do. * Checks whether the CPU has work to do.
* *
* Returns: %true if the CPU has work, %false otherwise. * Returns: %true if the CPU has work, %false otherwise.
*/ */
bool qemu_cpu_has_work(CPUState *cpu); static inline bool cpu_has_work(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
g_assert(cc->has_work);
return cc->has_work(cpu);
}
/** /**
* qemu_cpu_is_self: * qemu_cpu_is_self:
@ -511,6 +595,31 @@ void qemu_init_vcpu(CPUState *cpu);
*/ */
void cpu_single_step(CPUState *cpu, int enabled); void cpu_single_step(CPUState *cpu, int enabled);
/* Breakpoint/watchpoint flags */
#define BP_MEM_READ 0x01
#define BP_MEM_WRITE 0x02
#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
#define BP_STOP_BEFORE_ACCESS 0x04
#define BP_WATCHPOINT_HIT 0x08
#define BP_GDB 0x10
#define BP_CPU 0x20
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint);
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
vaddr len, int flags);
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
extern const struct VMStateDescription vmstate_cpu_common; extern const struct VMStateDescription vmstate_cpu_common;
#else #else

View file

@ -2621,7 +2621,8 @@ static int write_note(struct memelfnote *men, int fd)
static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env) static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
{ {
TaskState *ts = (TaskState *)env->opaque; CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
TaskState *ts = (TaskState *)cpu->opaque;
struct elf_thread_status *ets; struct elf_thread_status *ets;
ets = g_malloc0(sizeof (*ets)); ets = g_malloc0(sizeof (*ets));
@ -2650,8 +2651,8 @@ static int fill_note_info(struct elf_note_info *info,
long signr, const CPUArchState *env) long signr, const CPUArchState *env)
{ {
#define NUMNOTES 3 #define NUMNOTES 3
CPUState *cpu = NULL; CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
TaskState *ts = (TaskState *)env->opaque; TaskState *ts = (TaskState *)cpu->opaque;
int i; int i;
info->notes = g_malloc0(NUMNOTES * sizeof (struct memelfnote)); info->notes = g_malloc0(NUMNOTES * sizeof (struct memelfnote));
@ -2775,7 +2776,8 @@ static int write_note_info(struct elf_note_info *info, int fd)
*/ */
static int elf_core_dump(int signr, const CPUArchState *env) static int elf_core_dump(int signr, const CPUArchState *env)
{ {
const TaskState *ts = (const TaskState *)env->opaque; const CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
const TaskState *ts = (const TaskState *)cpu->opaque;
struct vm_area_struct *vma = NULL; struct vm_area_struct *vma = NULL;
char corefile[PATH_MAX]; char corefile[PATH_MAX];
struct elf_note_info info; struct elf_note_info info;

View file

@ -89,8 +89,7 @@ static int prepare_binprm(struct linux_binprm *bprm)
abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
abi_ulong stringp, int push_ptr) abi_ulong stringp, int push_ptr)
{ {
CPUArchState *env = thread_cpu->env_ptr; TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = (TaskState *)env->opaque;
int n = sizeof(abi_ulong); int n = sizeof(abi_ulong);
abi_ulong envp; abi_ulong envp;
abi_ulong argv; abi_ulong argv;

View file

@ -98,6 +98,7 @@ static int translate_openflags(int flags)
#define ARG(x) tswap32(args[x]) #define ARG(x) tswap32(args[x])
void do_m68k_simcall(CPUM68KState *env, int nr) void do_m68k_simcall(CPUM68KState *env, int nr)
{ {
M68kCPU *cpu = m68k_env_get_cpu(env);
uint32_t *args; uint32_t *args;
args = (uint32_t *)(unsigned long)(env->aregs[7] + 4); args = (uint32_t *)(unsigned long)(env->aregs[7] + 4);
@ -165,6 +166,6 @@ void do_m68k_simcall(CPUM68KState *env, int nr)
check_err(env, lseek(ARG(0), (int32_t)ARG(1), ARG(2))); check_err(env, lseek(ARG(0), (int32_t)ARG(1), ARG(2)));
break; break;
default: default:
cpu_abort(env, "Unsupported m68k sim syscall %d\n", nr); cpu_abort(CPU(cpu), "Unsupported m68k sim syscall %d\n", nr);
} }
} }

View file

@ -31,7 +31,9 @@ static inline void cpu_clone_regs(CPUM68KState *env, target_ulong newsp)
static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls) static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(m68k_env_get_cpu(env));
TaskState *ts = cs->opaque;
ts->tp_value = newtls; ts->tp_value = newtls;
} }

View file

@ -685,7 +685,7 @@ void cpu_loop(CPUARMState *env)
switch(trapnr) { switch(trapnr) {
case EXCP_UDEF: case EXCP_UDEF:
{ {
TaskState *ts = env->opaque; TaskState *ts = cs->opaque;
uint32_t opcode; uint32_t opcode;
int rc; int rc;
@ -1577,11 +1577,11 @@ void cpu_loop(CPUPPCState *env)
/* Just go on */ /* Just go on */
break; break;
case POWERPC_EXCP_CRITICAL: /* Critical input */ case POWERPC_EXCP_CRITICAL: /* Critical input */
cpu_abort(env, "Critical interrupt while in user mode. " cpu_abort(cs, "Critical interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_MCHECK: /* Machine check exception */ case POWERPC_EXCP_MCHECK: /* Machine check exception */
cpu_abort(env, "Machine check exception while in user mode. " cpu_abort(cs, "Machine check exception while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_DSI: /* Data storage exception */ case POWERPC_EXCP_DSI: /* Data storage exception */
@ -1645,7 +1645,7 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info); queue_signal(env, info.si_signo, &info);
break; break;
case POWERPC_EXCP_EXTERNAL: /* External input */ case POWERPC_EXCP_EXTERNAL: /* External input */
cpu_abort(env, "External interrupt while in user mode. " cpu_abort(cs, "External interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_ALIGN: /* Alignment exception */ case POWERPC_EXCP_ALIGN: /* Alignment exception */
@ -1739,11 +1739,11 @@ void cpu_loop(CPUPPCState *env)
} }
break; break;
case POWERPC_EXCP_TRAP: case POWERPC_EXCP_TRAP:
cpu_abort(env, "Tried to call a TRAP\n"); cpu_abort(cs, "Tried to call a TRAP\n");
break; break;
default: default:
/* Should not happen ! */ /* Should not happen ! */
cpu_abort(env, "Unknown program exception (%02x)\n", cpu_abort(cs, "Unknown program exception (%02x)\n",
env->error_code); env->error_code);
break; break;
} }
@ -1759,7 +1759,7 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info); queue_signal(env, info.si_signo, &info);
break; break;
case POWERPC_EXCP_SYSCALL: /* System call exception */ case POWERPC_EXCP_SYSCALL: /* System call exception */
cpu_abort(env, "Syscall exception while in user mode. " cpu_abort(cs, "Syscall exception while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
@ -1771,23 +1771,23 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info); queue_signal(env, info.si_signo, &info);
break; break;
case POWERPC_EXCP_DECR: /* Decrementer exception */ case POWERPC_EXCP_DECR: /* Decrementer exception */
cpu_abort(env, "Decrementer interrupt while in user mode. " cpu_abort(cs, "Decrementer interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
cpu_abort(env, "Fix interval timer interrupt while in user mode. " cpu_abort(cs, "Fix interval timer interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
cpu_abort(env, "Watchdog timer interrupt while in user mode. " cpu_abort(cs, "Watchdog timer interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_DTLB: /* Data TLB error */ case POWERPC_EXCP_DTLB: /* Data TLB error */
cpu_abort(env, "Data TLB exception while in user mode. " cpu_abort(cs, "Data TLB exception while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_ITLB: /* Instruction TLB error */ case POWERPC_EXCP_ITLB: /* Instruction TLB error */
cpu_abort(env, "Instruction TLB exception while in user mode. " cpu_abort(cs, "Instruction TLB exception while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */ case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
@ -1799,37 +1799,37 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info); queue_signal(env, info.si_signo, &info);
break; break;
case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */ case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */
cpu_abort(env, "Embedded floating-point data IRQ not handled\n"); cpu_abort(cs, "Embedded floating-point data IRQ not handled\n");
break; break;
case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */ case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */
cpu_abort(env, "Embedded floating-point round IRQ not handled\n"); cpu_abort(cs, "Embedded floating-point round IRQ not handled\n");
break; break;
case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */ case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */
cpu_abort(env, "Performance monitor exception not handled\n"); cpu_abort(cs, "Performance monitor exception not handled\n");
break; break;
case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
cpu_abort(env, "Doorbell interrupt while in user mode. " cpu_abort(cs, "Doorbell interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
cpu_abort(env, "Doorbell critical interrupt while in user mode. " cpu_abort(cs, "Doorbell critical interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_RESET: /* System reset exception */ case POWERPC_EXCP_RESET: /* System reset exception */
cpu_abort(env, "Reset interrupt while in user mode. " cpu_abort(cs, "Reset interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_DSEG: /* Data segment exception */ case POWERPC_EXCP_DSEG: /* Data segment exception */
cpu_abort(env, "Data segment exception while in user mode. " cpu_abort(cs, "Data segment exception while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_ISEG: /* Instruction segment exception */ case POWERPC_EXCP_ISEG: /* Instruction segment exception */
cpu_abort(env, "Instruction segment exception " cpu_abort(cs, "Instruction segment exception "
"while in user mode. Aborting\n"); "while in user mode. Aborting\n");
break; break;
/* PowerPC 64 with hypervisor mode support */ /* PowerPC 64 with hypervisor mode support */
case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
cpu_abort(env, "Hypervisor decrementer interrupt " cpu_abort(cs, "Hypervisor decrementer interrupt "
"while in user mode. Aborting\n"); "while in user mode. Aborting\n");
break; break;
case POWERPC_EXCP_TRACE: /* Trace exception */ case POWERPC_EXCP_TRACE: /* Trace exception */
@ -1839,19 +1839,19 @@ void cpu_loop(CPUPPCState *env)
break; break;
/* PowerPC 64 with hypervisor mode support */ /* PowerPC 64 with hypervisor mode support */
case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
cpu_abort(env, "Hypervisor data storage exception " cpu_abort(cs, "Hypervisor data storage exception "
"while in user mode. Aborting\n"); "while in user mode. Aborting\n");
break; break;
case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */ case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */
cpu_abort(env, "Hypervisor instruction storage exception " cpu_abort(cs, "Hypervisor instruction storage exception "
"while in user mode. Aborting\n"); "while in user mode. Aborting\n");
break; break;
case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
cpu_abort(env, "Hypervisor data segment exception " cpu_abort(cs, "Hypervisor data segment exception "
"while in user mode. Aborting\n"); "while in user mode. Aborting\n");
break; break;
case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */ case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */
cpu_abort(env, "Hypervisor instruction segment exception " cpu_abort(cs, "Hypervisor instruction segment exception "
"while in user mode. Aborting\n"); "while in user mode. Aborting\n");
break; break;
case POWERPC_EXCP_VPU: /* Vector unavailable exception */ case POWERPC_EXCP_VPU: /* Vector unavailable exception */
@ -1863,58 +1863,58 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info); queue_signal(env, info.si_signo, &info);
break; break;
case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */ case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */
cpu_abort(env, "Programmable interval timer interrupt " cpu_abort(cs, "Programmable interval timer interrupt "
"while in user mode. Aborting\n"); "while in user mode. Aborting\n");
break; break;
case POWERPC_EXCP_IO: /* IO error exception */ case POWERPC_EXCP_IO: /* IO error exception */
cpu_abort(env, "IO error exception while in user mode. " cpu_abort(cs, "IO error exception while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_RUNM: /* Run mode exception */ case POWERPC_EXCP_RUNM: /* Run mode exception */
cpu_abort(env, "Run mode exception while in user mode. " cpu_abort(cs, "Run mode exception while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_EMUL: /* Emulation trap exception */ case POWERPC_EXCP_EMUL: /* Emulation trap exception */
cpu_abort(env, "Emulation trap exception not handled\n"); cpu_abort(cs, "Emulation trap exception not handled\n");
break; break;
case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
cpu_abort(env, "Instruction fetch TLB exception " cpu_abort(cs, "Instruction fetch TLB exception "
"while in user-mode. Aborting"); "while in user-mode. Aborting");
break; break;
case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
cpu_abort(env, "Data load TLB exception while in user-mode. " cpu_abort(cs, "Data load TLB exception while in user-mode. "
"Aborting"); "Aborting");
break; break;
case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
cpu_abort(env, "Data store TLB exception while in user-mode. " cpu_abort(cs, "Data store TLB exception while in user-mode. "
"Aborting"); "Aborting");
break; break;
case POWERPC_EXCP_FPA: /* Floating-point assist exception */ case POWERPC_EXCP_FPA: /* Floating-point assist exception */
cpu_abort(env, "Floating-point assist exception not handled\n"); cpu_abort(cs, "Floating-point assist exception not handled\n");
break; break;
case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
cpu_abort(env, "Instruction address breakpoint exception " cpu_abort(cs, "Instruction address breakpoint exception "
"not handled\n"); "not handled\n");
break; break;
case POWERPC_EXCP_SMI: /* System management interrupt */ case POWERPC_EXCP_SMI: /* System management interrupt */
cpu_abort(env, "System management interrupt while in user mode. " cpu_abort(cs, "System management interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_THERM: /* Thermal interrupt */ case POWERPC_EXCP_THERM: /* Thermal interrupt */
cpu_abort(env, "Thermal interrupt interrupt while in user mode. " cpu_abort(cs, "Thermal interrupt interrupt while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */ case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */
cpu_abort(env, "Performance monitor exception not handled\n"); cpu_abort(cs, "Performance monitor exception not handled\n");
break; break;
case POWERPC_EXCP_VPUA: /* Vector assist exception */ case POWERPC_EXCP_VPUA: /* Vector assist exception */
cpu_abort(env, "Vector assist exception not handled\n"); cpu_abort(cs, "Vector assist exception not handled\n");
break; break;
case POWERPC_EXCP_SOFTP: /* Soft patch exception */ case POWERPC_EXCP_SOFTP: /* Soft patch exception */
cpu_abort(env, "Soft patch exception not handled\n"); cpu_abort(cs, "Soft patch exception not handled\n");
break; break;
case POWERPC_EXCP_MAINT: /* Maintenance exception */ case POWERPC_EXCP_MAINT: /* Maintenance exception */
cpu_abort(env, "Maintenance exception while in user mode. " cpu_abort(cs, "Maintenance exception while in user mode. "
"Aborting\n"); "Aborting\n");
break; break;
case POWERPC_EXCP_STOP: /* stop translation */ case POWERPC_EXCP_STOP: /* stop translation */
@ -1970,7 +1970,7 @@ void cpu_loop(CPUPPCState *env)
/* just indicate that signals should be handled asap */ /* just indicate that signals should be handled asap */
break; break;
default: default:
cpu_abort(env, "Unknown exception 0x%d. Aborting\n", trapnr); cpu_abort(cs, "Unknown exception 0x%d. Aborting\n", trapnr);
break; break;
} }
process_pending_signals(env); process_pending_signals(env);
@ -2965,7 +2965,7 @@ void cpu_loop(CPUM68KState *env)
int trapnr; int trapnr;
unsigned int n; unsigned int n;
target_siginfo_t info; target_siginfo_t info;
TaskState *ts = env->opaque; TaskState *ts = cs->opaque;
for(;;) { for(;;) {
trapnr = cpu_m68k_exec(env); trapnr = cpu_m68k_exec(env);
@ -3435,28 +3435,30 @@ void init_task_state(TaskState *ts)
CPUArchState *cpu_copy(CPUArchState *env) CPUArchState *cpu_copy(CPUArchState *env)
{ {
CPUState *cpu = ENV_GET_CPU(env);
CPUArchState *new_env = cpu_init(cpu_model); CPUArchState *new_env = cpu_init(cpu_model);
CPUState *new_cpu = ENV_GET_CPU(new_env);
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp; CPUBreakpoint *bp;
CPUWatchpoint *wp; CPUWatchpoint *wp;
#endif #endif
/* Reset non arch specific state */ /* Reset non arch specific state */
cpu_reset(ENV_GET_CPU(new_env)); cpu_reset(new_cpu);
memcpy(new_env, env, sizeof(CPUArchState)); memcpy(new_env, env, sizeof(CPUArchState));
/* Clone all break/watchpoints. /* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */ BP_CPU break/watchpoints are handled correctly on clone. */
QTAILQ_INIT(&env->breakpoints); QTAILQ_INIT(&cpu->breakpoints);
QTAILQ_INIT(&env->watchpoints); QTAILQ_INIT(&cpu->watchpoints);
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
} }
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, cpu_watchpoint_insert(new_cpu, wp->vaddr, (~wp->len_mask) + 1,
wp->flags, NULL); wp->flags, NULL);
} }
#endif #endif
@ -4001,7 +4003,7 @@ int main(int argc, char **argv, char **envp)
/* build Task State */ /* build Task State */
ts->info = info; ts->info = info;
ts->bprm = &bprm; ts->bprm = &bprm;
env->opaque = ts; cpu->opaque = ts;
task_settid(ts); task_settid(ts);
execfd = qemu_getauxval(AT_EXECFD); execfd = qemu_getauxval(AT_EXECFD);

View file

@ -370,7 +370,8 @@ void signal_init(void)
static inline struct sigqueue *alloc_sigqueue(CPUArchState *env) static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
{ {
TaskState *ts = env->opaque; CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque;
struct sigqueue *q = ts->first_free; struct sigqueue *q = ts->first_free;
if (!q) if (!q)
return NULL; return NULL;
@ -380,7 +381,9 @@ static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q) static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
{ {
TaskState *ts = env->opaque; CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque;
q->next = ts->first_free; q->next = ts->first_free;
ts->first_free = q; ts->first_free = q;
} }
@ -388,8 +391,9 @@ static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
/* abort execution with signal */ /* abort execution with signal */
static void QEMU_NORETURN force_sig(int target_sig) static void QEMU_NORETURN force_sig(int target_sig)
{ {
CPUArchState *env = thread_cpu->env_ptr; CPUState *cpu = thread_cpu;
TaskState *ts = (TaskState *)env->opaque; CPUArchState *env = cpu->env_ptr;
TaskState *ts = (TaskState *)cpu->opaque;
int host_sig, core_dumped = 0; int host_sig, core_dumped = 0;
struct sigaction act; struct sigaction act;
host_sig = target_to_host_signal(target_sig); host_sig = target_to_host_signal(target_sig);
@ -440,7 +444,8 @@ static void QEMU_NORETURN force_sig(int target_sig)
as possible */ as possible */
int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info) int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
{ {
TaskState *ts = env->opaque; CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque;
struct emulated_sigtable *k; struct emulated_sigtable *k;
struct sigqueue *q, **pq; struct sigqueue *q, **pq;
abi_ulong handler; abi_ulong handler;
@ -774,8 +779,9 @@ static int
setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate, setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate,
CPUX86State *env, abi_ulong mask, abi_ulong fpstate_addr) CPUX86State *env, abi_ulong mask, abi_ulong fpstate_addr)
{ {
int err = 0; CPUState *cs = CPU(x86_env_get_cpu(env));
uint16_t magic; int err = 0;
uint16_t magic;
/* already locked in setup_frame() */ /* already locked in setup_frame() */
err |= __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); err |= __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
@ -790,7 +796,7 @@ setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate,
err |= __put_user(env->regs[R_EDX], &sc->edx); err |= __put_user(env->regs[R_EDX], &sc->edx);
err |= __put_user(env->regs[R_ECX], &sc->ecx); err |= __put_user(env->regs[R_ECX], &sc->ecx);
err |= __put_user(env->regs[R_EAX], &sc->eax); err |= __put_user(env->regs[R_EAX], &sc->eax);
err |= __put_user(env->exception_index, &sc->trapno); err |= __put_user(cs->exception_index, &sc->trapno);
err |= __put_user(env->error_code, &sc->err); err |= __put_user(env->error_code, &sc->err);
err |= __put_user(env->eip, &sc->eip); err |= __put_user(env->eip, &sc->eip);
err |= __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); err |= __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
@ -5675,7 +5681,7 @@ void process_pending_signals(CPUArchState *cpu_env)
struct emulated_sigtable *k; struct emulated_sigtable *k;
struct target_sigaction *sa; struct target_sigaction *sa;
struct sigqueue *q; struct sigqueue *q;
TaskState *ts = cpu_env->opaque; TaskState *ts = cpu->opaque;
if (!ts->signal_pending) if (!ts->signal_pending)
return; return;

View file

@ -4243,7 +4243,7 @@ static void *clone_func(void *arg)
env = info->env; env = info->env;
cpu = ENV_GET_CPU(env); cpu = ENV_GET_CPU(env);
thread_cpu = cpu; thread_cpu = cpu;
ts = (TaskState *)env->opaque; ts = (TaskState *)cpu->opaque;
info->tid = gettid(); info->tid = gettid();
cpu->host_tid = info->tid; cpu->host_tid = info->tid;
task_settid(ts); task_settid(ts);
@ -4271,8 +4271,10 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
abi_ulong parent_tidptr, target_ulong newtls, abi_ulong parent_tidptr, target_ulong newtls,
abi_ulong child_tidptr) abi_ulong child_tidptr)
{ {
CPUState *cpu = ENV_GET_CPU(env);
int ret; int ret;
TaskState *ts; TaskState *ts;
CPUState *new_cpu;
CPUArchState *new_env; CPUArchState *new_env;
unsigned int nptl_flags; unsigned int nptl_flags;
sigset_t sigmask; sigset_t sigmask;
@ -4282,7 +4284,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
flags &= ~(CLONE_VFORK | CLONE_VM); flags &= ~(CLONE_VFORK | CLONE_VM);
if (flags & CLONE_VM) { if (flags & CLONE_VM) {
TaskState *parent_ts = (TaskState *)env->opaque; TaskState *parent_ts = (TaskState *)cpu->opaque;
new_thread_info info; new_thread_info info;
pthread_attr_t attr; pthread_attr_t attr;
@ -4292,7 +4294,8 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
new_env = cpu_copy(env); new_env = cpu_copy(env);
/* Init regs that differ from the parent. */ /* Init regs that differ from the parent. */
cpu_clone_regs(new_env, newsp); cpu_clone_regs(new_env, newsp);
new_env->opaque = ts; new_cpu = ENV_GET_CPU(new_env);
new_cpu->opaque = ts;
ts->bprm = parent_ts->bprm; ts->bprm = parent_ts->bprm;
ts->info = parent_ts->info; ts->info = parent_ts->info;
nptl_flags = flags; nptl_flags = flags;
@ -4364,7 +4367,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
put_user_u32(gettid(), child_tidptr); put_user_u32(gettid(), child_tidptr);
if (flags & CLONE_PARENT_SETTID) if (flags & CLONE_PARENT_SETTID)
put_user_u32(gettid(), parent_tidptr); put_user_u32(gettid(), parent_tidptr);
ts = (TaskState *)env->opaque; ts = (TaskState *)cpu->opaque;
if (flags & CLONE_SETTLS) if (flags & CLONE_SETTLS)
cpu_set_tls (env, newtls); cpu_set_tls (env, newtls);
if (flags & CLONE_CHILD_CLEARTID) if (flags & CLONE_CHILD_CLEARTID)
@ -4974,7 +4977,8 @@ void init_qemu_uname_release(void)
static int open_self_maps(void *cpu_env, int fd) static int open_self_maps(void *cpu_env, int fd)
{ {
#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
TaskState *ts = ((CPUArchState *)cpu_env)->opaque; CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
TaskState *ts = cpu->opaque;
#endif #endif
FILE *fp; FILE *fp;
char *line = NULL; char *line = NULL;
@ -5026,7 +5030,8 @@ static int open_self_maps(void *cpu_env, int fd)
static int open_self_stat(void *cpu_env, int fd) static int open_self_stat(void *cpu_env, int fd)
{ {
TaskState *ts = ((CPUArchState *)cpu_env)->opaque; CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
TaskState *ts = cpu->opaque;
abi_ulong start_stack = ts->info->start_stack; abi_ulong start_stack = ts->info->start_stack;
int i; int i;
@ -5062,7 +5067,8 @@ static int open_self_stat(void *cpu_env, int fd)
static int open_self_auxv(void *cpu_env, int fd) static int open_self_auxv(void *cpu_env, int fd)
{ {
TaskState *ts = ((CPUArchState *)cpu_env)->opaque; CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
TaskState *ts = cpu->opaque;
abi_ulong auxv = ts->info->saved_auxv; abi_ulong auxv = ts->info->saved_auxv;
abi_ulong len = ts->info->auxv_len; abi_ulong len = ts->info->auxv_len;
char *ptr; char *ptr;
@ -5244,14 +5250,14 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
/* Remove the CPU from the list. */ /* Remove the CPU from the list. */
QTAILQ_REMOVE(&cpus, cpu, node); QTAILQ_REMOVE(&cpus, cpu, node);
cpu_list_unlock(); cpu_list_unlock();
ts = ((CPUArchState *)cpu_env)->opaque; ts = cpu->opaque;
if (ts->child_tidptr) { if (ts->child_tidptr) {
put_user_u32(0, ts->child_tidptr); put_user_u32(0, ts->child_tidptr);
sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
NULL, NULL, 0); NULL, NULL, 0);
} }
thread_cpu = NULL; thread_cpu = NULL;
object_unref(OBJECT(ENV_GET_CPU(cpu_env))); object_unref(OBJECT(cpu));
g_free(ts); g_free(ts);
pthread_exit(NULL); pthread_exit(NULL);
} }
@ -6555,7 +6561,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break; break;
case TARGET_NR_mprotect: case TARGET_NR_mprotect:
{ {
TaskState *ts = ((CPUArchState *)cpu_env)->opaque; TaskState *ts = cpu->opaque;
/* Special hack to detect libc making the stack executable. */ /* Special hack to detect libc making the stack executable. */
if ((arg3 & PROT_GROWSDOWN) if ((arg3 & PROT_GROWSDOWN)
&& arg1 >= ts->info->stack_limit && arg1 >= ts->info->stack_limit
@ -8647,7 +8653,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break; break;
#elif defined(TARGET_M68K) #elif defined(TARGET_M68K)
{ {
TaskState *ts = ((CPUArchState *)cpu_env)->opaque; TaskState *ts = cpu->opaque;
ts->tp_value = arg1; ts->tp_value = arg1;
ret = 0; ret = 0;
break; break;
@ -8663,7 +8669,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break; break;
#elif defined(TARGET_M68K) #elif defined(TARGET_M68K)
{ {
TaskState *ts = ((CPUArchState *)cpu_env)->opaque; TaskState *ts = cpu->opaque;
ret = ts->tp_value; ret = ts->tp_value;
break; break;
} }

View file

@ -72,7 +72,8 @@ static inline unsigned int vm_getl(uint32_t segptr, unsigned int reg16)
void save_v86_state(CPUX86State *env) void save_v86_state(CPUX86State *env)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(x86_env_get_cpu(env));
TaskState *ts = cs->opaque;
struct target_vm86plus_struct * target_v86; struct target_vm86plus_struct * target_v86;
if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0)) if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0))
@ -131,7 +132,8 @@ static inline void return_to_32bit(CPUX86State *env, int retval)
static inline int set_IF(CPUX86State *env) static inline int set_IF(CPUX86State *env)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(x86_env_get_cpu(env));
TaskState *ts = cs->opaque;
ts->v86flags |= VIF_MASK; ts->v86flags |= VIF_MASK;
if (ts->v86flags & VIP_MASK) { if (ts->v86flags & VIP_MASK) {
@ -143,7 +145,8 @@ static inline int set_IF(CPUX86State *env)
static inline void clear_IF(CPUX86State *env) static inline void clear_IF(CPUX86State *env)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(x86_env_get_cpu(env));
TaskState *ts = cs->opaque;
ts->v86flags &= ~VIF_MASK; ts->v86flags &= ~VIF_MASK;
} }
@ -160,7 +163,8 @@ static inline void clear_AC(CPUX86State *env)
static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(x86_env_get_cpu(env));
TaskState *ts = cs->opaque;
set_flags(ts->v86flags, eflags, ts->v86mask); set_flags(ts->v86flags, eflags, ts->v86mask);
set_flags(env->eflags, eflags, SAFE_MASK); set_flags(env->eflags, eflags, SAFE_MASK);
@ -173,7 +177,8 @@ static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
static inline int set_vflags_short(unsigned short flags, CPUX86State *env) static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(x86_env_get_cpu(env));
TaskState *ts = cs->opaque;
set_flags(ts->v86flags, flags, ts->v86mask & 0xffff); set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
set_flags(env->eflags, flags, SAFE_MASK); set_flags(env->eflags, flags, SAFE_MASK);
@ -186,7 +191,8 @@ static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
static inline unsigned int get_vflags(CPUX86State *env) static inline unsigned int get_vflags(CPUX86State *env)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(x86_env_get_cpu(env));
TaskState *ts = cs->opaque;
unsigned int flags; unsigned int flags;
flags = env->eflags & RETURN_MASK; flags = env->eflags & RETURN_MASK;
@ -202,7 +208,8 @@ static inline unsigned int get_vflags(CPUX86State *env)
support TSS interrupt revectoring, so this code is always executed) */ support TSS interrupt revectoring, so this code is always executed) */
static void do_int(CPUX86State *env, int intno) static void do_int(CPUX86State *env, int intno)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(x86_env_get_cpu(env));
TaskState *ts = cs->opaque;
uint32_t int_addr, segoffs, ssp; uint32_t int_addr, segoffs, ssp;
unsigned int sp; unsigned int sp;
@ -260,7 +267,8 @@ void handle_vm86_trap(CPUX86State *env, int trapno)
void handle_vm86_fault(CPUX86State *env) void handle_vm86_fault(CPUX86State *env)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(x86_env_get_cpu(env));
TaskState *ts = cs->opaque;
uint32_t csp, ssp; uint32_t csp, ssp;
unsigned int ip, sp, newflags, newip, newcs, opcode, intno; unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
int data32, pref_done; int data32, pref_done;
@ -384,7 +392,8 @@ void handle_vm86_fault(CPUX86State *env)
int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr) int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(x86_env_get_cpu(env));
TaskState *ts = cs->opaque;
struct target_vm86plus_struct * target_v86; struct target_vm86plus_struct * target_v86;
int ret; int ret;

View file

@ -1,7 +1,7 @@
/* /*
* QEMU CPU model * QEMU CPU model
* *
* Copyright (c) 2012 SUSE LINUX Products GmbH * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
@ -23,6 +23,7 @@
#include "sysemu/kvm.h" #include "sysemu/kvm.h"
#include "qemu/notify.h" #include "qemu/notify.h"
#include "qemu/log.h" #include "qemu/log.h"
#include "qemu/error-report.h"
#include "sysemu/sysemu.h" #include "sysemu/sysemu.h"
bool cpu_exists(int64_t id) bool cpu_exists(int64_t id)
@ -39,6 +40,46 @@ bool cpu_exists(int64_t id)
return false; return false;
} }
CPUState *cpu_generic_init(const char *typename, const char *cpu_model)
{
char *str, *name, *featurestr;
CPUState *cpu;
ObjectClass *oc;
CPUClass *cc;
Error *err = NULL;
str = g_strdup(cpu_model);
name = strtok(str, ",");
oc = cpu_class_by_name(typename, name);
if (oc == NULL) {
g_free(str);
return NULL;
}
cpu = CPU(object_new(object_class_get_name(oc)));
cc = CPU_GET_CLASS(cpu);
featurestr = strtok(NULL, ",");
cc->parse_features(cpu, featurestr, &err);
g_free(str);
if (err != NULL) {
goto out;
}
object_property_set_bool(OBJECT(cpu), true, "realized", &err);
out:
if (err != NULL) {
error_report("%s", error_get_pretty(err));
error_free(err);
object_unref(OBJECT(cpu));
return NULL;
}
return cpu;
}
bool cpu_paging_enabled(const CPUState *cpu) bool cpu_paging_enabled(const CPUState *cpu)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
@ -195,10 +236,20 @@ static void cpu_common_reset(CPUState *cpu)
log_cpu_state(cpu, cc->reset_dump_flags); log_cpu_state(cpu, cc->reset_dump_flags);
} }
cpu->exit_request = 0;
cpu->interrupt_request = 0; cpu->interrupt_request = 0;
cpu->current_tb = NULL; cpu->current_tb = NULL;
cpu->halted = 0; cpu->halted = 0;
cpu->mem_io_pc = 0;
cpu->mem_io_vaddr = 0;
cpu->icount_extra = 0;
cpu->icount_decr.u32 = 0;
cpu->can_do_io = 0;
memset(cpu->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
}
static bool cpu_common_has_work(CPUState *cs)
{
return false;
} }
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
@ -213,6 +264,34 @@ static ObjectClass *cpu_common_class_by_name(const char *cpu_model)
return NULL; return NULL;
} }
static void cpu_common_parse_features(CPUState *cpu, char *features,
Error **errp)
{
char *featurestr; /* Single "key=value" string being parsed */
char *val;
Error *err = NULL;
featurestr = features ? strtok(features, ",") : NULL;
while (featurestr) {
val = strchr(featurestr, '=');
if (val) {
*val = 0;
val++;
object_property_parse(OBJECT(cpu), val, featurestr, &err);
if (err) {
error_propagate(errp, err);
return;
}
} else {
error_setg(errp, "Expected key=value format, found %s.",
featurestr);
return;
}
featurestr = strtok(NULL, ",");
}
}
static void cpu_common_realizefn(DeviceState *dev, Error **errp) static void cpu_common_realizefn(DeviceState *dev, Error **errp)
{ {
CPUState *cpu = CPU(dev); CPUState *cpu = CPU(dev);
@ -243,8 +322,10 @@ static void cpu_class_init(ObjectClass *klass, void *data)
CPUClass *k = CPU_CLASS(klass); CPUClass *k = CPU_CLASS(klass);
k->class_by_name = cpu_common_class_by_name; k->class_by_name = cpu_common_class_by_name;
k->parse_features = cpu_common_parse_features;
k->reset = cpu_common_reset; k->reset = cpu_common_reset;
k->get_arch_id = cpu_common_get_arch_id; k->get_arch_id = cpu_common_get_arch_id;
k->has_work = cpu_common_has_work;
k->get_paging_enabled = cpu_common_get_paging_enabled; k->get_paging_enabled = cpu_common_get_paging_enabled;
k->get_memory_mapping = cpu_common_get_memory_mapping; k->get_memory_mapping = cpu_common_get_memory_mapping;
k->write_elf32_qemunote = cpu_common_write_elf32_qemunote; k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;

View file

@ -31,6 +31,21 @@ static void alpha_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value; cpu->env.pc = value;
} }
static bool alpha_cpu_has_work(CPUState *cs)
{
/* Here we are checking to see if the CPU should wake up from HALT.
We will have gotten into this state only for WTINT from PALmode. */
/* ??? I'm not sure how the IPL state works with WTINT to keep a CPU
asleep even if (some) interrupts have been asserted. For now,
assume that if a CPU really wants to stay asleep, it will mask
interrupts at the chipset level, which will prevent these bits
from being set in the first place. */
return cs->interrupt_request & (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_TIMER
| CPU_INTERRUPT_SMP
| CPU_INTERRUPT_MCHK);
}
static void alpha_cpu_realizefn(DeviceState *dev, Error **errp) static void alpha_cpu_realizefn(DeviceState *dev, Error **errp)
{ {
CPUState *cs = CPU(dev); CPUState *cs = CPU(dev);
@ -243,7 +258,7 @@ static void alpha_cpu_initfn(Object *obj)
cs->env_ptr = env; cs->env_ptr = env;
cpu_exec_init(env); cpu_exec_init(env);
tlb_flush(env, 1); tlb_flush(cs, 1);
alpha_translate_init(); alpha_translate_init();
@ -267,12 +282,15 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
dc->realize = alpha_cpu_realizefn; dc->realize = alpha_cpu_realizefn;
cc->class_by_name = alpha_cpu_class_by_name; cc->class_by_name = alpha_cpu_class_by_name;
cc->has_work = alpha_cpu_has_work;
cc->do_interrupt = alpha_cpu_do_interrupt; cc->do_interrupt = alpha_cpu_do_interrupt;
cc->dump_state = alpha_cpu_dump_state; cc->dump_state = alpha_cpu_dump_state;
cc->set_pc = alpha_cpu_set_pc; cc->set_pc = alpha_cpu_set_pc;
cc->gdb_read_register = alpha_cpu_gdb_read_register; cc->gdb_read_register = alpha_cpu_gdb_read_register;
cc->gdb_write_register = alpha_cpu_gdb_write_register; cc->gdb_write_register = alpha_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = alpha_cpu_handle_mmu_fault;
#else
cc->do_unassigned_access = alpha_cpu_unassigned_access; cc->do_unassigned_access = alpha_cpu_unassigned_access;
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug; cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_alpha_cpu; dc->vmsd = &vmstate_alpha_cpu;

View file

@ -446,9 +446,8 @@ int cpu_alpha_exec(CPUAlphaState *s);
is returned if the signal was handled by the virtual CPU. */ is returned if the signal was handled by the virtual CPU. */
int cpu_alpha_signal_handler(int host_signum, void *pinfo, int cpu_alpha_signal_handler(int host_signum, void *pinfo,
void *puc); void *puc);
int cpu_alpha_handle_mmu_fault (CPUAlphaState *env, uint64_t address, int rw, int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx); int mmu_idx);
#define cpu_handle_mmu_fault cpu_alpha_handle_mmu_fault
void do_restore_state(CPUAlphaState *, uintptr_t retaddr); void do_restore_state(CPUAlphaState *, uintptr_t retaddr);
void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int); void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int);
void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t); void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);
@ -498,21 +497,6 @@ static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
*pflags = flags; *pflags = flags;
} }
static inline bool cpu_has_work(CPUState *cpu)
{
/* Here we are checking to see if the CPU should wake up from HALT.
We will have gotten into this state only for WTINT from PALmode. */
/* ??? I'm not sure how the IPL state works with WTINT to keep a CPU
asleep even if (some) interrupts have been asserted. For now,
assume that if a CPU really wants to stay asleep, it will mask
interrupts at the chipset level, which will prevent these bits
from being set in the first place. */
return cpu->interrupt_request & (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_TIMER
| CPU_INTERRUPT_SMP
| CPU_INTERRUPT_MCHK);
}
#include "exec/exec-all.h" #include "exec/exec-all.h"
#endif /* !defined (__CPU_ALPHA_H__) */ #endif /* !defined (__CPU_ALPHA_H__) */

View file

@ -168,11 +168,13 @@ void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
} }
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
int cpu_alpha_handle_mmu_fault(CPUAlphaState *env, target_ulong address, int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int rw, int mmu_idx) int rw, int mmu_idx)
{ {
env->exception_index = EXCP_MMFAULT; AlphaCPU *cpu = ALPHA_CPU(cs);
env->trap_arg0 = address;
cs->exception_index = EXCP_MMFAULT;
cpu->env.trap_arg0 = address;
return 1; return 1;
} }
#else #else
@ -213,7 +215,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
int prot_need, int mmu_idx, int prot_need, int mmu_idx,
target_ulong *pphys, int *pprot) target_ulong *pphys, int *pprot)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(alpha_env_get_cpu(env));
target_long saddr = addr; target_long saddr = addr;
target_ulong phys = 0; target_ulong phys = 0;
target_ulong L1pte, L2pte, L3pte; target_ulong L1pte, L2pte, L3pte;
@ -326,22 +328,24 @@ hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return (fail >= 0 ? -1 : phys); return (fail >= 0 ? -1 : phys);
} }
int cpu_alpha_handle_mmu_fault(CPUAlphaState *env, target_ulong addr, int rw, int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw,
int mmu_idx) int mmu_idx)
{ {
AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env;
target_ulong phys; target_ulong phys;
int prot, fail; int prot, fail;
fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot); fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
if (unlikely(fail >= 0)) { if (unlikely(fail >= 0)) {
env->exception_index = EXCP_MMFAULT; cs->exception_index = EXCP_MMFAULT;
env->trap_arg0 = addr; env->trap_arg0 = addr;
env->trap_arg1 = fail; env->trap_arg1 = fail;
env->trap_arg2 = (rw == 2 ? -1 : rw); env->trap_arg2 = (rw == 2 ? -1 : rw);
return 1; return 1;
} }
tlb_set_page(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE); prot, mmu_idx, TARGET_PAGE_SIZE);
return 0; return 0;
} }
@ -351,7 +355,7 @@ void alpha_cpu_do_interrupt(CPUState *cs)
{ {
AlphaCPU *cpu = ALPHA_CPU(cs); AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env; CPUAlphaState *env = &cpu->env;
int i = env->exception_index; int i = cs->exception_index;
if (qemu_loglevel_mask(CPU_LOG_INT)) { if (qemu_loglevel_mask(CPU_LOG_INT)) {
static int count; static int count;
@ -402,7 +406,7 @@ void alpha_cpu_do_interrupt(CPUState *cs)
++count, name, env->error_code, env->pc, env->ir[IR_SP]); ++count, name, env->error_code, env->pc, env->ir[IR_SP]);
} }
env->exception_index = -1; cs->exception_index = -1;
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
switch (i) { switch (i) {
@ -448,7 +452,7 @@ void alpha_cpu_do_interrupt(CPUState *cs)
} }
break; break;
default: default:
cpu_abort(env, "Unhandled CPU exception"); cpu_abort(cs, "Unhandled CPU exception");
} }
/* Remember where the exception happened. Emulate real hardware in /* Remember where the exception happened. Emulate real hardware in
@ -504,21 +508,27 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
We expect that ENV->PC has already been updated. */ We expect that ENV->PC has already been updated. */
void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error) void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
{ {
env->exception_index = excp; AlphaCPU *cpu = alpha_env_get_cpu(env);
CPUState *cs = CPU(cpu);
cs->exception_index = excp;
env->error_code = error; env->error_code = error;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */ /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr, void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
int excp, int error) int excp, int error)
{ {
env->exception_index = excp; AlphaCPU *cpu = alpha_env_get_cpu(env);
CPUState *cs = CPU(cpu);
cs->exception_index = excp;
env->error_code = error; env->error_code = error;
if (retaddr) { if (retaddr) {
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr, void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,

View file

@ -26,45 +26,45 @@
uint64_t helper_ldl_phys(CPUAlphaState *env, uint64_t p) uint64_t helper_ldl_phys(CPUAlphaState *env, uint64_t p)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(alpha_env_get_cpu(env));
return (int32_t)ldl_phys(cs->as, p); return (int32_t)ldl_phys(cs->as, p);
} }
uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p) uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(alpha_env_get_cpu(env));
return ldq_phys(cs->as, p); return ldq_phys(cs->as, p);
} }
uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p) uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(alpha_env_get_cpu(env));
env->lock_addr = p; env->lock_addr = p;
return env->lock_value = (int32_t)ldl_phys(cs->as, p); return env->lock_value = (int32_t)ldl_phys(cs->as, p);
} }
uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p) uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(alpha_env_get_cpu(env));
env->lock_addr = p; env->lock_addr = p;
return env->lock_value = ldq_phys(cs->as, p); return env->lock_value = ldq_phys(cs->as, p);
} }
void helper_stl_phys(CPUAlphaState *env, uint64_t p, uint64_t v) void helper_stl_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(alpha_env_get_cpu(env));
stl_phys(cs->as, p, v); stl_phys(cs->as, p, v);
} }
void helper_stq_phys(CPUAlphaState *env, uint64_t p, uint64_t v) void helper_stq_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(alpha_env_get_cpu(env));
stq_phys(cs->as, p, v); stq_phys(cs->as, p, v);
} }
uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(alpha_env_get_cpu(env));
uint64_t ret = 0; uint64_t ret = 0;
if (p == env->lock_addr) { if (p == env->lock_addr) {
@ -81,7 +81,7 @@ uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(alpha_env_get_cpu(env));
uint64_t ret = 0; uint64_t ret = 0;
if (p == env->lock_addr) { if (p == env->lock_addr) {
@ -99,11 +99,13 @@ uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
static void do_unaligned_access(CPUAlphaState *env, target_ulong addr, static void do_unaligned_access(CPUAlphaState *env, target_ulong addr,
int is_write, int is_user, uintptr_t retaddr) int is_write, int is_user, uintptr_t retaddr)
{ {
AlphaCPU *cpu = alpha_env_get_cpu(env);
CPUState *cs = CPU(cpu);
uint64_t pc; uint64_t pc;
uint32_t insn; uint32_t insn;
if (retaddr) { if (retaddr) {
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
pc = env->pc; pc = env->pc;
@ -112,9 +114,9 @@ static void do_unaligned_access(CPUAlphaState *env, target_ulong addr,
env->trap_arg0 = addr; env->trap_arg0 = addr;
env->trap_arg1 = insn >> 26; /* opcode */ env->trap_arg1 = insn >> 26; /* opcode */
env->trap_arg2 = (insn >> 21) & 31; /* dest regno */ env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
env->exception_index = EXCP_UNALIGN; cs->exception_index = EXCP_UNALIGN;
env->error_code = 0; env->error_code = 0;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr, void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr,
@ -150,18 +152,18 @@ void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr,
NULL, it means that the function was called in C code (i.e. not NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */ from generated code or from helper.c) */
/* XXX: fix it to restore all registers */ /* XXX: fix it to restore all registers */
void tlb_fill(CPUAlphaState *env, target_ulong addr, int is_write, void tlb_fill(CPUState *cs, target_ulong addr, int is_write,
int mmu_idx, uintptr_t retaddr) int mmu_idx, uintptr_t retaddr)
{ {
int ret; int ret;
ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = alpha_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (retaddr) { if (retaddr) {
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
/* Exception index and error code are already set */ /* Exception index and error code are already set */
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */

View file

@ -64,12 +64,12 @@ void helper_call_pal(CPUAlphaState *env, uint64_t pc, uint64_t entry_ofs)
void helper_tbia(CPUAlphaState *env) void helper_tbia(CPUAlphaState *env)
{ {
tlb_flush(env, 1); tlb_flush(CPU(alpha_env_get_cpu(env)), 1);
} }
void helper_tbis(CPUAlphaState *env, uint64_t p) void helper_tbis(CPUAlphaState *env, uint64_t p)
{ {
tlb_flush_page(env, p); tlb_flush_page(CPU(alpha_env_get_cpu(env)), p);
} }
void helper_tb_flush(CPUAlphaState *env) void helper_tb_flush(CPUAlphaState *env)

View file

@ -3463,8 +3463,8 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
gen_tb_start(); gen_tb_start();
do { do {
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == ctx.pc) { if (bp->pc == ctx.pc) {
gen_excp(&ctx, EXCP_DEBUG, 0); gen_excp(&ctx, EXCP_DEBUG, 0);
break; break;

View file

@ -127,7 +127,7 @@ static void arm_semi_cb(CPUState *cs, target_ulong ret, target_ulong err)
ARMCPU *cpu = ARM_CPU(cs); ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
TaskState *ts = env->opaque; TaskState *ts = cs->opaque;
#endif #endif
if (ret == (target_ulong)-1) { if (ret == (target_ulong)-1) {
@ -164,7 +164,7 @@ static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err)
cpu_memory_rw_debug(cs, env->regs[13]-64+32, (uint8_t *)&size, 4, 0); cpu_memory_rw_debug(cs, env->regs[13]-64+32, (uint8_t *)&size, 4, 0);
env->regs[0] = be32_to_cpu(size); env->regs[0] = be32_to_cpu(size);
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
((TaskState *)env->opaque)->swi_errno = err; ((TaskState *)cs->opaque)->swi_errno = err;
#else #else
syscall_err = err; syscall_err = err;
#endif #endif
@ -183,6 +183,7 @@ static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err)
uint32_t do_arm_semihosting(CPUARMState *env) uint32_t do_arm_semihosting(CPUARMState *env)
{ {
ARMCPU *cpu = arm_env_get_cpu(env); ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
target_ulong args; target_ulong args;
target_ulong arg0, arg1, arg2, arg3; target_ulong arg0, arg1, arg2, arg3;
char * s; char * s;
@ -190,7 +191,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
uint32_t ret; uint32_t ret;
uint32_t len; uint32_t len;
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
TaskState *ts = env->opaque; TaskState *ts = cs->opaque;
#else #else
CPUARMState *ts = env; CPUARMState *ts = env;
#endif #endif
@ -554,7 +555,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
exit(0); exit(0);
default: default:
fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr); fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr);
cpu_dump_state(CPU(cpu), stderr, fprintf, 0); cpu_dump_state(cs, stderr, fprintf, 0);
abort(); abort();
} }
} }

View file

@ -36,6 +36,12 @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.regs[15] = value; cpu->env.regs[15] = value;
} }
static bool arm_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request &
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
}
static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
{ {
/* Reset a single ARMCPRegInfo register */ /* Reset a single ARMCPRegInfo register */
@ -76,7 +82,7 @@ static void arm_cpu_reset(CPUState *s)
acc->parent_reset(s); acc->parent_reset(s);
memset(env, 0, offsetof(CPUARMState, breakpoints)); memset(env, 0, offsetof(CPUARMState, features));
g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu); g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0; env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
@ -143,7 +149,7 @@ static void arm_cpu_reset(CPUState *s)
&env->vfp.fp_status); &env->vfp.fp_status);
set_float_detect_tininess(float_tininess_before_rounding, set_float_detect_tininess(float_tininess_before_rounding,
&env->vfp.standard_fp_status); &env->vfp.standard_fp_status);
tlb_flush(env, 1); tlb_flush(s, 1);
/* Reset is a state change for some CPUARMState fields which we /* Reset is a state change for some CPUARMState fields which we
* bake assumptions about into translated code, so we need to * bake assumptions about into translated code, so we need to
* tb_flush(). * tb_flush().
@ -1001,12 +1007,15 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = arm_cpu_reset; cc->reset = arm_cpu_reset;
cc->class_by_name = arm_cpu_class_by_name; cc->class_by_name = arm_cpu_class_by_name;
cc->has_work = arm_cpu_has_work;
cc->do_interrupt = arm_cpu_do_interrupt; cc->do_interrupt = arm_cpu_do_interrupt;
cc->dump_state = arm_cpu_dump_state; cc->dump_state = arm_cpu_dump_state;
cc->set_pc = arm_cpu_set_pc; cc->set_pc = arm_cpu_set_pc;
cc->gdb_read_register = arm_cpu_gdb_read_register; cc->gdb_read_register = arm_cpu_gdb_read_register;
cc->gdb_write_register = arm_cpu_gdb_write_register; cc->gdb_write_register = arm_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
#else
cc->get_phys_page_debug = arm_cpu_get_phys_page_debug; cc->get_phys_page_debug = arm_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_arm_cpu; cc->vmsd = &vmstate_arm_cpu;
#endif #endif

View file

@ -339,9 +339,8 @@ static inline bool is_a64(CPUARMState *env)
is returned if the signal was handled by the virtual CPU. */ is returned if the signal was handled by the virtual CPU. */
int cpu_arm_signal_handler(int host_signum, void *pinfo, int cpu_arm_signal_handler(int host_signum, void *pinfo,
void *puc); void *puc);
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw, int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx); int mmu_idx);
#define cpu_handle_mmu_fault cpu_arm_handle_mmu_fault
/* SCTLR bit meanings. Several bits have been reused in newer /* SCTLR bit meanings. Several bits have been reused in newer
* versions of the architecture; in that case we define constants * versions of the architecture; in that case we define constants
@ -1166,12 +1165,6 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
*cs_base = 0; *cs_base = 0;
} }
static inline bool cpu_has_work(CPUState *cpu)
{
return cpu->interrupt_request &
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
}
#include "exec/exec-all.h" #include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb) static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb)

View file

@ -303,17 +303,21 @@ void init_cpreg_list(ARMCPU *cpu)
static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{ {
ARMCPU *cpu = arm_env_get_cpu(env);
env->cp15.c3 = value; env->cp15.c3 = value;
tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */ tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
} }
static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{ {
ARMCPU *cpu = arm_env_get_cpu(env);
if (env->cp15.c13_fcse != value) { if (env->cp15.c13_fcse != value) {
/* Unlike real hardware the qemu TLB uses virtual addresses, /* Unlike real hardware the qemu TLB uses virtual addresses,
* not modified virtual addresses, so this causes a TLB flush. * not modified virtual addresses, so this causes a TLB flush.
*/ */
tlb_flush(env, 1); tlb_flush(CPU(cpu), 1);
env->cp15.c13_fcse = value; env->cp15.c13_fcse = value;
} }
} }
@ -321,12 +325,14 @@ static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
ARMCPU *cpu = arm_env_get_cpu(env);
if (env->cp15.c13_context != value && !arm_feature(env, ARM_FEATURE_MPU)) { if (env->cp15.c13_context != value && !arm_feature(env, ARM_FEATURE_MPU)) {
/* For VMSA (when not using the LPAE long descriptor page table /* For VMSA (when not using the LPAE long descriptor page table
* format) this register includes the ASID, so do a TLB flush. * format) this register includes the ASID, so do a TLB flush.
* For PMSA it is purely a process ID and no action is needed. * For PMSA it is purely a process ID and no action is needed.
*/ */
tlb_flush(env, 1); tlb_flush(CPU(cpu), 1);
} }
env->cp15.c13_context = value; env->cp15.c13_context = value;
} }
@ -335,28 +341,36 @@ static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
/* Invalidate all (TLBIALL) */ /* Invalidate all (TLBIALL) */
tlb_flush(env, 1); ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush(CPU(cpu), 1);
} }
static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
tlb_flush_page(env, value & TARGET_PAGE_MASK); ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
} }
static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
/* Invalidate by ASID (TLBIASID) */ /* Invalidate by ASID (TLBIASID) */
tlb_flush(env, value == 0); ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush(CPU(cpu), value == 0);
} }
static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
tlb_flush_page(env, value & TARGET_PAGE_MASK); ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
} }
static const ARMCPRegInfo cp_reginfo[] = { static const ARMCPRegInfo cp_reginfo[] = {
@ -1348,11 +1362,13 @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
ARMCPU *cpu = arm_env_get_cpu(env);
if (arm_feature(env, ARM_FEATURE_LPAE)) { if (arm_feature(env, ARM_FEATURE_LPAE)) {
/* With LPAE the TTBCR could result in a change of ASID /* With LPAE the TTBCR could result in a change of ASID
* via the TTBCR.A1 bit, so do a TLB flush. * via the TTBCR.A1 bit, so do a TLB flush.
*/ */
tlb_flush(env, 1); tlb_flush(CPU(cpu), 1);
} }
vmsa_ttbcr_raw_write(env, ri, value); vmsa_ttbcr_raw_write(env, ri, value);
} }
@ -1367,8 +1383,10 @@ static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
ARMCPU *cpu = arm_env_get_cpu(env);
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
tlb_flush(env, 1); tlb_flush(CPU(cpu), 1);
env->cp15.c2_control = value; env->cp15.c2_control = value;
} }
@ -1379,7 +1397,9 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
* must flush the TLB. * must flush the TLB.
*/ */
if (cpreg_field_is_64bit(ri)) { if (cpreg_field_is_64bit(ri)) {
tlb_flush(env, 1); ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush(CPU(cpu), 1);
} }
raw_write(env, ri, value); raw_write(env, ri, value);
} }
@ -1686,24 +1706,27 @@ static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
/* Invalidate by VA (AArch64 version) */ /* Invalidate by VA (AArch64 version) */
ARMCPU *cpu = arm_env_get_cpu(env);
uint64_t pageaddr = value << 12; uint64_t pageaddr = value << 12;
tlb_flush_page(env, pageaddr); tlb_flush_page(CPU(cpu), pageaddr);
} }
static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
/* Invalidate by VA, all ASIDs (AArch64 version) */ /* Invalidate by VA, all ASIDs (AArch64 version) */
ARMCPU *cpu = arm_env_get_cpu(env);
uint64_t pageaddr = value << 12; uint64_t pageaddr = value << 12;
tlb_flush_page(env, pageaddr); tlb_flush_page(CPU(cpu), pageaddr);
} }
static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
/* Invalidate by ASID (AArch64 version) */ /* Invalidate by ASID (AArch64 version) */
ARMCPU *cpu = arm_env_get_cpu(env);
int asid = extract64(value, 48, 16); int asid = extract64(value, 48, 16);
tlb_flush(env, asid == 0); tlb_flush(CPU(cpu), asid == 0);
} }
static const ARMCPRegInfo v8_cp_reginfo[] = { static const ARMCPRegInfo v8_cp_reginfo[] = {
@ -1829,10 +1852,12 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
ARMCPU *cpu = arm_env_get_cpu(env);
env->cp15.c1_sys = value; env->cp15.c1_sys = value;
/* ??? Lots of these bits are not implemented. */ /* ??? Lots of these bits are not implemented. */
/* This may enable/disable the MMU, so do a TLB flush. */ /* This may enable/disable the MMU, so do a TLB flush. */
tlb_flush(env, 1); tlb_flush(CPU(cpu), 1);
} }
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri) static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
@ -2186,19 +2211,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
ARMCPU *cpu_arm_init(const char *cpu_model) ARMCPU *cpu_arm_init(const char *cpu_model)
{ {
ARMCPU *cpu; return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
ObjectClass *oc;
oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
if (!oc) {
return NULL;
}
cpu = ARM_CPU(object_new(object_class_get_name(oc)));
/* TODO this should be set centrally, once possible */
object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
return cpu;
} }
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
@ -2660,21 +2673,21 @@ uint32_t HELPER(rbit)(uint32_t x)
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
void arm_cpu_do_interrupt(CPUState *cs) void arm_cpu_do_interrupt(CPUState *cs)
{
cs->exception_index = -1;
}
int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{ {
ARMCPU *cpu = ARM_CPU(cs); ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
env->exception_index = -1;
}
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
int mmu_idx)
{
if (rw == 2) { if (rw == 2) {
env->exception_index = EXCP_PREFETCH_ABORT; cs->exception_index = EXCP_PREFETCH_ABORT;
env->cp15.c6_insn = address; env->cp15.c6_insn = address;
} else { } else {
env->exception_index = EXCP_DATA_ABORT; cs->exception_index = EXCP_DATA_ABORT;
env->cp15.c6_data = address; env->cp15.c6_data = address;
} }
return 1; return 1;
@ -2683,29 +2696,40 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
/* These should probably raise undefined insn exceptions. */ /* These should probably raise undefined insn exceptions. */
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{ {
cpu_abort(env, "v7m_mrs %d\n", reg); ARMCPU *cpu = arm_env_get_cpu(env);
cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
} }
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{ {
cpu_abort(env, "v7m_mrs %d\n", reg); ARMCPU *cpu = arm_env_get_cpu(env);
cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
return 0; return 0;
} }
void switch_mode(CPUARMState *env, int mode) void switch_mode(CPUARMState *env, int mode)
{ {
if (mode != ARM_CPU_MODE_USR) ARMCPU *cpu = arm_env_get_cpu(env);
cpu_abort(env, "Tried to switch out of user mode\n");
if (mode != ARM_CPU_MODE_USR) {
cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
}
} }
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
{ {
cpu_abort(env, "banked r13 write\n"); ARMCPU *cpu = arm_env_get_cpu(env);
cpu_abort(CPU(cpu), "banked r13 write\n");
} }
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
{ {
cpu_abort(env, "banked r13 read\n"); ARMCPU *cpu = arm_env_get_cpu(env);
cpu_abort(CPU(cpu), "banked r13 read\n");
return 0; return 0;
} }
@ -2762,15 +2786,17 @@ void switch_mode(CPUARMState *env, int mode)
static void v7m_push(CPUARMState *env, uint32_t val) static void v7m_push(CPUARMState *env, uint32_t val)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(arm_env_get_cpu(env));
env->regs[13] -= 4; env->regs[13] -= 4;
stl_phys(cs->as, env->regs[13], val); stl_phys(cs->as, env->regs[13], val);
} }
static uint32_t v7m_pop(CPUARMState *env) static uint32_t v7m_pop(CPUARMState *env)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(arm_env_get_cpu(env));
uint32_t val; uint32_t val;
val = ldl_phys(cs->as, env->regs[13]); val = ldl_phys(cs->as, env->regs[13]);
env->regs[13] += 4; env->regs[13] += 4;
return val; return val;
@ -2858,7 +2884,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
uint32_t lr; uint32_t lr;
uint32_t addr; uint32_t addr;
arm_log_exception(env->exception_index); arm_log_exception(cs->exception_index);
lr = 0xfffffff1; lr = 0xfffffff1;
if (env->v7m.current_sp) if (env->v7m.current_sp)
@ -2870,7 +2896,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
handle it. */ handle it. */
/* TODO: Need to escalate if the current priority is higher than the /* TODO: Need to escalate if the current priority is higher than the
one we're raising. */ one we're raising. */
switch (env->exception_index) { switch (cs->exception_index) {
case EXCP_UDEF: case EXCP_UDEF:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
return; return;
@ -2902,7 +2928,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
do_v7m_exception_exit(env); do_v7m_exception_exit(env);
return; return;
default: default:
cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */ return; /* Never happens. Keep compiler happy. */
} }
@ -2943,10 +2969,10 @@ void arm_cpu_do_interrupt(CPUState *cs)
assert(!IS_M(env)); assert(!IS_M(env));
arm_log_exception(env->exception_index); arm_log_exception(cs->exception_index);
/* TODO: Vectored interrupt controller. */ /* TODO: Vectored interrupt controller. */
switch (env->exception_index) { switch (cs->exception_index) {
case EXCP_UDEF: case EXCP_UDEF:
new_mode = ARM_CPU_MODE_UND; new_mode = ARM_CPU_MODE_UND;
addr = 0x04; addr = 0x04;
@ -3027,7 +3053,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
offset = 4; offset = 4;
break; break;
default: default:
cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */ return; /* Never happens. Keep compiler happy. */
} }
/* High vectors. */ /* High vectors. */
@ -3134,7 +3160,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
int is_user, hwaddr *phys_ptr, int is_user, hwaddr *phys_ptr,
int *prot, target_ulong *page_size) int *prot, target_ulong *page_size)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(arm_env_get_cpu(env));
int code; int code;
uint32_t table; uint32_t table;
uint32_t desc; uint32_t desc;
@ -3230,7 +3256,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
int is_user, hwaddr *phys_ptr, int is_user, hwaddr *phys_ptr,
int *prot, target_ulong *page_size) int *prot, target_ulong *page_size)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(arm_env_get_cpu(env));
int code; int code;
uint32_t table; uint32_t table;
uint32_t desc; uint32_t desc;
@ -3353,7 +3379,7 @@ static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
hwaddr *phys_ptr, int *prot, hwaddr *phys_ptr, int *prot,
target_ulong *page_size_ptr) target_ulong *page_size_ptr)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(arm_env_get_cpu(env));
/* Read an LPAE long-descriptor translation table. */ /* Read an LPAE long-descriptor translation table. */
MMUFaultType fault_type = translation_fault; MMUFaultType fault_type = translation_fault;
uint32_t level = 1; uint32_t level = 1;
@ -3633,9 +3659,11 @@ static inline int get_phys_addr(CPUARMState *env, uint32_t address,
} }
} }
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int access_type, int mmu_idx) int access_type, int mmu_idx)
{ {
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
hwaddr phys_addr; hwaddr phys_addr;
target_ulong page_size; target_ulong page_size;
int prot; int prot;
@ -3648,20 +3676,20 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
/* Map a single [sub]page. */ /* Map a single [sub]page. */
phys_addr &= ~(hwaddr)0x3ff; phys_addr &= ~(hwaddr)0x3ff;
address &= ~(uint32_t)0x3ff; address &= ~(uint32_t)0x3ff;
tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size); tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
return 0; return 0;
} }
if (access_type == 2) { if (access_type == 2) {
env->cp15.c5_insn = ret; env->cp15.c5_insn = ret;
env->cp15.c6_insn = address; env->cp15.c6_insn = address;
env->exception_index = EXCP_PREFETCH_ABORT; cs->exception_index = EXCP_PREFETCH_ABORT;
} else { } else {
env->cp15.c5_data = ret; env->cp15.c5_data = ret;
if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
env->cp15.c5_data |= (1 << 11); env->cp15.c5_data |= (1 << 11);
env->cp15.c6_data = address; env->cp15.c6_data = address;
env->exception_index = EXCP_DATA_ABORT; cs->exception_index = EXCP_DATA_ABORT;
} }
return 1; return 1;
} }
@ -3703,6 +3731,8 @@ uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{ {
ARMCPU *cpu = arm_env_get_cpu(env);
switch (reg) { switch (reg) {
case 0: /* APSR */ case 0: /* APSR */
return xpsr_read(env) & 0xf8000000; return xpsr_read(env) & 0xf8000000;
@ -3733,13 +3763,15 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
return env->v7m.control; return env->v7m.control;
default: default:
/* ??? For debugging only. */ /* ??? For debugging only. */
cpu_abort(env, "Unimplemented system register read (%d)\n", reg); cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
return 0; return 0;
} }
} }
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{ {
ARMCPU *cpu = arm_env_get_cpu(env);
switch (reg) { switch (reg) {
case 0: /* APSR */ case 0: /* APSR */
xpsr_write(env, val, 0xf8000000); xpsr_write(env, val, 0xf8000000);
@ -3802,7 +3834,7 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
break; break;
default: default:
/* ??? For debugging only. */ /* ??? For debugging only. */
cpu_abort(env, "Unimplemented system register write (%d)\n", reg); cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
return; return;
} }
} }

View file

@ -24,8 +24,11 @@
static void raise_exception(CPUARMState *env, int tt) static void raise_exception(CPUARMState *env, int tt)
{ {
env->exception_index = tt; ARMCPU *cpu = arm_env_get_cpu(env);
cpu_loop_exit(env); CPUState *cs = CPU(cpu);
cs->exception_index = tt;
cpu_loop_exit(cs);
} }
uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def, uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
@ -69,20 +72,24 @@ uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
#include "exec/softmmu_template.h" #include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is /* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not * NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */ * from generated code or from helper.c)
void tlb_fill(CPUARMState *env, target_ulong addr, int is_write, int mmu_idx, */
void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr) uintptr_t retaddr)
{ {
int ret; int ret;
ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) { if (unlikely(ret)) {
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
if (retaddr) { if (retaddr) {
/* now we have a real cpu fault */ /* now we have a real cpu fault */
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
raise_exception(env, env->exception_index); raise_exception(env, cs->exception_index);
} }
} }
#endif #endif
@ -220,24 +227,28 @@ void HELPER(wfi)(CPUARMState *env)
{ {
CPUState *cs = CPU(arm_env_get_cpu(env)); CPUState *cs = CPU(arm_env_get_cpu(env));
env->exception_index = EXCP_HLT; cs->exception_index = EXCP_HLT;
cs->halted = 1; cs->halted = 1;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
void HELPER(wfe)(CPUARMState *env) void HELPER(wfe)(CPUARMState *env)
{ {
CPUState *cs = CPU(arm_env_get_cpu(env));
/* Don't actually halt the CPU, just yield back to top /* Don't actually halt the CPU, just yield back to top
* level loop * level loop
*/ */
env->exception_index = EXCP_YIELD; cs->exception_index = EXCP_YIELD;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
void HELPER(exception)(CPUARMState *env, uint32_t excp) void HELPER(exception)(CPUARMState *env, uint32_t excp)
{ {
env->exception_index = excp; CPUState *cs = CPU(arm_env_get_cpu(env));
cpu_loop_exit(env);
cs->exception_index = excp;
cpu_loop_exit(cs);
} }
uint32_t HELPER(cpsr_read)(CPUARMState *env) uint32_t HELPER(cpsr_read)(CPUARMState *env)

View file

@ -9061,8 +9061,8 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
tcg_clear_temp_count(); tcg_clear_temp_count();
do { do {
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) { if (bp->pc == dc->pc) {
gen_exception_insn(dc, 0, EXCP_DEBUG); gen_exception_insn(dc, 0, EXCP_DEBUG);
/* Advance PC so that clearing the breakpoint will /* Advance PC so that clearing the breakpoint will

View file

@ -10733,8 +10733,8 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
} }
#endif #endif
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) { if (bp->pc == dc->pc) {
gen_exception_insn(dc, 0, EXCP_DEBUG); gen_exception_insn(dc, 0, EXCP_DEBUG);
/* Advance PC so that clearing the breakpoint will /* Advance PC so that clearing the breakpoint will
@ -10803,7 +10803,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
if (dc->condjmp) { if (dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying /* FIXME: This can theoretically happen with self-modifying
code. */ code. */
cpu_abort(env, "IO on conditional branch instruction"); cpu_abort(cs, "IO on conditional branch instruction");
} }
gen_io_end(); gen_io_end();
} }

View file

@ -33,6 +33,11 @@ static void cris_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value; cpu->env.pc = value;
} }
static bool cris_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
}
/* CPUClass::reset() */ /* CPUClass::reset() */
static void cris_cpu_reset(CPUState *s) static void cris_cpu_reset(CPUState *s)
{ {
@ -44,9 +49,9 @@ static void cris_cpu_reset(CPUState *s)
ccc->parent_reset(s); ccc->parent_reset(s);
vr = env->pregs[PR_VR]; vr = env->pregs[PR_VR];
memset(env, 0, offsetof(CPUCRISState, breakpoints)); memset(env, 0, offsetof(CPUCRISState, load_info));
env->pregs[PR_VR] = vr; env->pregs[PR_VR] = vr;
tlb_flush(env, 1); tlb_flush(s, 1);
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
/* start in user mode with interrupts enabled. */ /* start in user mode with interrupts enabled. */
@ -84,18 +89,7 @@ static ObjectClass *cris_cpu_class_by_name(const char *cpu_model)
CRISCPU *cpu_cris_init(const char *cpu_model) CRISCPU *cpu_cris_init(const char *cpu_model)
{ {
CRISCPU *cpu; return CRIS_CPU(cpu_generic_init(TYPE_CRIS_CPU, cpu_model));
ObjectClass *oc;
oc = cris_cpu_class_by_name(cpu_model);
if (oc == NULL) {
return NULL;
}
cpu = CRIS_CPU(object_new(object_class_get_name(oc)));
object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
return cpu;
} }
/* Sort alphabetically by VR. */ /* Sort alphabetically by VR. */
@ -283,12 +277,15 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = cris_cpu_reset; cc->reset = cris_cpu_reset;
cc->class_by_name = cris_cpu_class_by_name; cc->class_by_name = cris_cpu_class_by_name;
cc->has_work = cris_cpu_has_work;
cc->do_interrupt = cris_cpu_do_interrupt; cc->do_interrupt = cris_cpu_do_interrupt;
cc->dump_state = cris_cpu_dump_state; cc->dump_state = cris_cpu_dump_state;
cc->set_pc = cris_cpu_set_pc; cc->set_pc = cris_cpu_set_pc;
cc->gdb_read_register = cris_cpu_gdb_read_register; cc->gdb_read_register = cris_cpu_gdb_read_register;
cc->gdb_write_register = cris_cpu_gdb_write_register; cc->gdb_write_register = cris_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = cris_cpu_handle_mmu_fault;
#else
cc->get_phys_page_debug = cris_cpu_get_phys_page_debug; cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
#endif #endif

View file

@ -171,8 +171,8 @@ typedef struct CPUCRISState {
CPU_COMMON CPU_COMMON
/* Members after CPU_COMMON are preserved across resets. */ /* Members from load_info on are preserved across resets. */
void *load_info; void *load_info;
} CPUCRISState; } CPUCRISState;
#include "cpu-qom.h" #include "cpu-qom.h"
@ -247,9 +247,8 @@ static inline int cpu_mmu_index (CPUCRISState *env)
return !!(env->pregs[PR_CCS] & U_FLAG); return !!(env->pregs[PR_CCS] & U_FLAG);
} }
int cpu_cris_handle_mmu_fault(CPUCRISState *env, target_ulong address, int rw, int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx); int mmu_idx);
#define cpu_handle_mmu_fault cpu_cris_handle_mmu_fault
/* Support function regs. */ /* Support function regs. */
#define SFR_RW_GC_CFG 0][0 #define SFR_RW_GC_CFG 0][0
@ -276,11 +275,6 @@ static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
#define cpu_list cris_cpu_list #define cpu_list cris_cpu_list
void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf); void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf);
static inline bool cpu_has_work(CPUState *cpu)
{
return cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
}
#include "exec/exec-all.h" #include "exec/exec-all.h"
#endif #endif

View file

@ -41,7 +41,7 @@ void cris_cpu_do_interrupt(CPUState *cs)
CRISCPU *cpu = CRIS_CPU(cs); CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env; CPUCRISState *env = &cpu->env;
env->exception_index = -1; cs->exception_index = -1;
env->pregs[PR_ERP] = env->pc; env->pregs[PR_ERP] = env->pc;
} }
@ -50,14 +50,14 @@ void crisv10_cpu_do_interrupt(CPUState *cs)
cris_cpu_do_interrupt(cs); cris_cpu_do_interrupt(cs);
} }
int cpu_cris_handle_mmu_fault(CPUCRISState * env, target_ulong address, int rw, int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx) int mmu_idx)
{ {
CRISCPU *cpu = cris_env_get_cpu(env); CRISCPU *cpu = CRIS_CPU(cs);
env->exception_index = 0xaa; cs->exception_index = 0xaa;
env->pregs[PR_EDA] = address; cpu->env.pregs[PR_EDA] = address;
cpu_dump_state(CPU(cpu), stderr, fprintf, 0); cpu_dump_state(cs, stderr, fprintf, 0);
return 1; return 1;
} }
@ -73,28 +73,30 @@ static void cris_shift_ccs(CPUCRISState *env)
env->pregs[PR_CCS] = ccs; env->pregs[PR_CCS] = ccs;
} }
int cpu_cris_handle_mmu_fault(CPUCRISState *env, target_ulong address, int rw, int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx) int mmu_idx)
{ {
D(CPUState *cpu = CPU(cris_env_get_cpu(env))); CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env;
struct cris_mmu_result res; struct cris_mmu_result res;
int prot, miss; int prot, miss;
int r = -1; int r = -1;
target_ulong phy; target_ulong phy;
D(printf("%s addr=%x pc=%x rw=%x\n", __func__, address, env->pc, rw)); D(printf("%s addr=%" VADDR_PRIx " pc=%x rw=%x\n",
__func__, address, env->pc, rw));
miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK, miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK,
rw, mmu_idx, 0); rw, mmu_idx, 0);
if (miss) { if (miss) {
if (env->exception_index == EXCP_BUSFAULT) { if (cs->exception_index == EXCP_BUSFAULT) {
cpu_abort(env, cpu_abort(cs,
"CRIS: Illegal recursive bus fault." "CRIS: Illegal recursive bus fault."
"addr=%x rw=%d\n", "addr=%" VADDR_PRIx " rw=%d\n",
address, rw); address, rw);
} }
env->pregs[PR_EDA] = address; env->pregs[PR_EDA] = address;
env->exception_index = EXCP_BUSFAULT; cs->exception_index = EXCP_BUSFAULT;
env->fault_vector = res.bf_vec; env->fault_vector = res.bf_vec;
r = 1; r = 1;
} else { } else {
@ -104,13 +106,13 @@ int cpu_cris_handle_mmu_fault(CPUCRISState *env, target_ulong address, int rw,
*/ */
phy = res.phy & ~0x80000000; phy = res.phy & ~0x80000000;
prot = res.prot; prot = res.prot;
tlb_set_page(env, address & TARGET_PAGE_MASK, phy, tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
prot, mmu_idx, TARGET_PAGE_SIZE); prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0; r = 0;
} }
if (r > 0) { if (r > 0) {
D_LOG("%s returns %d irqreq=%x addr=%x phy=%x vec=%x pc=%x\n", D_LOG("%s returns %d irqreq=%x addr=%" VADDR_PRIx " phy=%x vec=%x"
__func__, r, cpu->interrupt_request, address, res.phy, " pc=%x\n", __func__, r, cs->interrupt_request, address, res.phy,
res.bf_vec, env->pc); res.bf_vec, env->pc);
} }
return r; return r;
@ -123,16 +125,16 @@ void crisv10_cpu_do_interrupt(CPUState *cs)
int ex_vec = -1; int ex_vec = -1;
D_LOG("exception index=%d interrupt_req=%d\n", D_LOG("exception index=%d interrupt_req=%d\n",
env->exception_index, cs->exception_index,
cs->interrupt_request); cs->interrupt_request);
if (env->dslot) { if (env->dslot) {
/* CRISv10 never takes interrupts while in a delay-slot. */ /* CRISv10 never takes interrupts while in a delay-slot. */
cpu_abort(env, "CRIS: Interrupt on delay-slot\n"); cpu_abort(cs, "CRIS: Interrupt on delay-slot\n");
} }
assert(!(env->pregs[PR_CCS] & PFIX_FLAG)); assert(!(env->pregs[PR_CCS] & PFIX_FLAG));
switch (env->exception_index) { switch (cs->exception_index) {
case EXCP_BREAK: case EXCP_BREAK:
/* These exceptions are genereated by the core itself. /* These exceptions are genereated by the core itself.
ERP should point to the insn following the brk. */ ERP should point to the insn following the brk. */
@ -148,7 +150,7 @@ void crisv10_cpu_do_interrupt(CPUState *cs)
break; break;
case EXCP_BUSFAULT: case EXCP_BUSFAULT:
cpu_abort(env, "Unhandled busfault"); cpu_abort(cs, "Unhandled busfault");
break; break;
default: default:
@ -185,10 +187,10 @@ void cris_cpu_do_interrupt(CPUState *cs)
int ex_vec = -1; int ex_vec = -1;
D_LOG("exception index=%d interrupt_req=%d\n", D_LOG("exception index=%d interrupt_req=%d\n",
env->exception_index, cs->exception_index,
cs->interrupt_request); cs->interrupt_request);
switch (env->exception_index) { switch (cs->exception_index) {
case EXCP_BREAK: case EXCP_BREAK:
/* These exceptions are genereated by the core itself. /* These exceptions are genereated by the core itself.
ERP should point to the insn following the brk. */ ERP should point to the insn following the brk. */
@ -251,7 +253,7 @@ void cris_cpu_do_interrupt(CPUState *cs)
/* Clear the excption_index to avoid spurios hw_aborts for recursive /* Clear the excption_index to avoid spurios hw_aborts for recursive
bus faults. */ bus faults. */
env->exception_index = -1; cs->exception_index = -1;
D_LOG("%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n", D_LOG("%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n",
__func__, env->pc, ex_vec, __func__, env->pc, ex_vec,

View file

@ -290,6 +290,7 @@ static int cris_mmu_translate_page(struct cris_mmu_result *res,
void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid) void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid)
{ {
CRISCPU *cpu = cris_env_get_cpu(env);
target_ulong vaddr; target_ulong vaddr;
unsigned int idx; unsigned int idx;
uint32_t lo, hi; uint32_t lo, hi;
@ -315,7 +316,7 @@ void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid)
vaddr = tlb_vpn << TARGET_PAGE_BITS; vaddr = tlb_vpn << TARGET_PAGE_BITS;
D_LOG("flush pid=%x vaddr=%x\n", D_LOG("flush pid=%x vaddr=%x\n",
pid, vaddr); pid, vaddr);
tlb_flush_page(env, vaddr); tlb_flush_page(CPU(cpu), vaddr);
} }
} }
} }

View file

@ -54,23 +54,25 @@
/* Try to fill the TLB and return an exception if error. If retaddr is /* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */ from generated code or from helper.c) */
void tlb_fill(CPUCRISState *env, target_ulong addr, int is_write, int mmu_idx, void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr) uintptr_t retaddr)
{ {
CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env;
int ret; int ret;
D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__, D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__,
env->pc, env->pregs[PR_EDA], (void *)retaddr); env->pc, env->pregs[PR_EDA], (void *)retaddr);
ret = cpu_cris_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = cris_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) { if (unlikely(ret)) {
if (retaddr) { if (retaddr) {
/* now we have a real cpu fault */ /* now we have a real cpu fault */
if (cpu_restore_state(env, retaddr)) { if (cpu_restore_state(cs, retaddr)) {
/* Evaluate flags after retranslation. */ /* Evaluate flags after retranslation. */
helper_top_evaluate_flags(env); helper_top_evaluate_flags(env);
} }
} }
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
} }
@ -78,8 +80,10 @@ void tlb_fill(CPUCRISState *env, target_ulong addr, int is_write, int mmu_idx,
void helper_raise_exception(CPUCRISState *env, uint32_t index) void helper_raise_exception(CPUCRISState *env, uint32_t index)
{ {
env->exception_index = index; CPUState *cs = CPU(cris_env_get_cpu(env));
cpu_loop_exit(env);
cs->exception_index = index;
cpu_loop_exit(cs);
} }
void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid) void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid)
@ -94,8 +98,11 @@ void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid)
void helper_spc_write(CPUCRISState *env, uint32_t new_spc) void helper_spc_write(CPUCRISState *env, uint32_t new_spc)
{ {
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
tlb_flush_page(env, env->pregs[PR_SPC]); CRISCPU *cpu = cris_env_get_cpu(env);
tlb_flush_page(env, new_spc); CPUState *cs = CPU(cpu);
tlb_flush_page(cs, env->pregs[PR_SPC]);
tlb_flush_page(cs, new_spc);
#endif #endif
} }
@ -110,6 +117,9 @@ void helper_dump(uint32_t a0, uint32_t a1, uint32_t a2)
void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg) void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg)
{ {
#if !defined(CONFIG_USER_ONLY)
CRISCPU *cpu = cris_env_get_cpu(env);
#endif
uint32_t srs; uint32_t srs;
srs = env->pregs[PR_SRS]; srs = env->pregs[PR_SRS];
srs &= 3; srs &= 3;
@ -151,7 +161,7 @@ void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg)
D_LOG("tlb flush vaddr=%x v=%d pc=%x\n", D_LOG("tlb flush vaddr=%x v=%d pc=%x\n",
vaddr, tlb_v, env->pc); vaddr, tlb_v, env->pc);
if (tlb_v) { if (tlb_v) {
tlb_flush_page(env, vaddr); tlb_flush_page(CPU(cpu), vaddr);
} }
} }
} }

View file

@ -74,7 +74,7 @@ static TCGv env_pc;
/* This is the state at translation time. */ /* This is the state at translation time. */
typedef struct DisasContext { typedef struct DisasContext {
CPUCRISState *env; CRISCPU *cpu;
target_ulong pc, ppc; target_ulong pc, ppc;
/* Decoder. */ /* Decoder. */
@ -129,7 +129,7 @@ static void gen_BUG(DisasContext *dc, const char *file, int line)
{ {
printf("BUG: pc=%x %s %d\n", dc->pc, file, line); printf("BUG: pc=%x %s %d\n", dc->pc, file, line);
qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line); qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
cpu_abort(dc->env, "%s:%d\n", file, line); cpu_abort(CPU(dc->cpu), "%s:%d\n", file, line);
} }
static const char *regnames[] = static const char *regnames[] =
@ -272,7 +272,7 @@ static int cris_fetch(CPUCRISState *env, DisasContext *dc, uint32_t addr,
break; break;
} }
default: default:
cpu_abort(dc->env, "Invalid fetch size %d\n", size); cpu_abort(CPU(dc->cpu), "Invalid fetch size %d\n", size);
break; break;
} }
return r; return r;
@ -1125,7 +1125,7 @@ static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr) static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
{ {
int mem_index = cpu_mmu_index(dc->env); int mem_index = cpu_mmu_index(&dc->cpu->env);
/* If we get a fault on a delayslot we must keep the jmp state in /* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */ the cpu-state to be able to re-execute the jmp. */
@ -1139,7 +1139,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr, static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
unsigned int size, int sign) unsigned int size, int sign)
{ {
int mem_index = cpu_mmu_index(dc->env); int mem_index = cpu_mmu_index(&dc->cpu->env);
/* If we get a fault on a delayslot we must keep the jmp state in /* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */ the cpu-state to be able to re-execute the jmp. */
@ -1154,7 +1154,7 @@ static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
static void gen_store (DisasContext *dc, TCGv addr, TCGv val, static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
unsigned int size) unsigned int size)
{ {
int mem_index = cpu_mmu_index(dc->env); int mem_index = cpu_mmu_index(&dc->cpu->env);
/* If we get a fault on a delayslot we must keep the jmp state in /* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */ the cpu-state to be able to re-execute the jmp. */
@ -3089,10 +3089,11 @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
static void check_breakpoint(CPUCRISState *env, DisasContext *dc) static void check_breakpoint(CPUCRISState *env, DisasContext *dc)
{ {
CPUState *cs = CPU(cris_env_get_cpu(env));
CPUBreakpoint *bp; CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) { if (bp->pc == dc->pc) {
cris_evaluate_flags(dc); cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc); tcg_gen_movi_tl(env_pc, dc->pc);
@ -3169,7 +3170,7 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
* delayslot, like in real hw. * delayslot, like in real hw.
*/ */
pc_start = tb->pc & ~1; pc_start = tb->pc & ~1;
dc->env = env; dc->cpu = cpu;
dc->tb = tb; dc->tb = tb;
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
@ -3390,7 +3391,7 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
#if !DISAS_CRIS #if !DISAS_CRIS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
log_target_disas(env, pc_start, dc->pc - pc_start, log_target_disas(env, pc_start, dc->pc - pc_start,
dc->env->pregs[PR_VR]); env->pregs[PR_VR]);
qemu_log("\nisize=%d osize=%td\n", qemu_log("\nisize=%d osize=%td\n",
dc->pc - pc_start, tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf); dc->pc - pc_start, tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf);
} }

View file

@ -96,7 +96,7 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val, static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
unsigned int size) unsigned int size)
{ {
int mem_index = cpu_mmu_index(dc->env); int mem_index = cpu_mmu_index(&dc->cpu->env);
/* If we get a fault on a delayslot we must keep the jmp state in /* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */ the cpu-state to be able to re-execute the jmp. */
@ -340,7 +340,7 @@ static unsigned int dec10_quick_imm(DisasContext *dc)
default: default:
LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n", LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
dc->pc, dc->mode, dc->opcode, dc->src, dc->dst); dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
cpu_abort(dc->env, "Unhandled quickimm\n"); cpu_abort(CPU(dc->cpu), "Unhandled quickimm\n");
break; break;
} }
return 2; return 2;
@ -651,7 +651,7 @@ static unsigned int dec10_reg(DisasContext *dc)
case 2: tmp = 1; break; case 2: tmp = 1; break;
case 1: tmp = 0; break; case 1: tmp = 0; break;
default: default:
cpu_abort(dc->env, "Unhandled BIAP"); cpu_abort(CPU(dc->cpu), "Unhandled BIAP");
break; break;
} }
@ -669,7 +669,7 @@ static unsigned int dec10_reg(DisasContext *dc)
default: default:
LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc, LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
dc->opcode, dc->src, dc->dst); dc->opcode, dc->src, dc->dst);
cpu_abort(dc->env, "Unhandled opcode"); cpu_abort(CPU(dc->cpu), "Unhandled opcode");
break; break;
} }
} else { } else {
@ -745,7 +745,7 @@ static unsigned int dec10_reg(DisasContext *dc)
default: default:
LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc, LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
dc->opcode, dc->src, dc->dst); dc->opcode, dc->src, dc->dst);
cpu_abort(dc->env, "Unhandled opcode"); cpu_abort(CPU(dc->cpu), "Unhandled opcode");
break; break;
} }
} }
@ -1006,7 +1006,7 @@ static int dec10_bdap_m(CPUCRISState *env, DisasContext *dc, int size)
if (!dc->postinc && (dc->ir & (1 << 11))) { if (!dc->postinc && (dc->ir & (1 << 11))) {
int simm = dc->ir & 0xff; int simm = dc->ir & 0xff;
/* cpu_abort(dc->env, "Unhandled opcode"); */ /* cpu_abort(CPU(dc->cpu), "Unhandled opcode"); */
/* sign extended. */ /* sign extended. */
simm = (int8_t)simm; simm = (int8_t)simm;
@ -1105,7 +1105,7 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
default: default:
LOG_DIS("pc=%x var-ind.%d %d r%d r%d\n", LOG_DIS("pc=%x var-ind.%d %d r%d r%d\n",
dc->pc, size, dc->opcode, dc->src, dc->dst); dc->pc, size, dc->opcode, dc->src, dc->dst);
cpu_abort(dc->env, "Unhandled opcode"); cpu_abort(CPU(dc->cpu), "Unhandled opcode");
break; break;
} }
return insn_len; return insn_len;
@ -1198,7 +1198,7 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
break; break;
default: default:
LOG_DIS("ERROR pc=%x opcode=%d\n", dc->pc, dc->opcode); LOG_DIS("ERROR pc=%x opcode=%d\n", dc->pc, dc->opcode);
cpu_abort(dc->env, "Unhandled opcode"); cpu_abort(CPU(dc->cpu), "Unhandled opcode");
break; break;
} }

View file

@ -37,8 +37,18 @@
#define X86_CPU_GET_CLASS(obj) \ #define X86_CPU_GET_CLASS(obj) \
OBJECT_GET_CLASS(X86CPUClass, (obj), TYPE_X86_CPU) OBJECT_GET_CLASS(X86CPUClass, (obj), TYPE_X86_CPU)
/**
* X86CPUDefinition:
*
* CPU model definition data that was not converted to QOM per-subclass
* property defaults yet.
*/
typedef struct X86CPUDefinition X86CPUDefinition;
/** /**
* X86CPUClass: * X86CPUClass:
* @cpu_def: CPU model definition
* @kvm_required: Whether CPU model requires KVM to be enabled.
* @parent_realize: The parent class' realize handler. * @parent_realize: The parent class' realize handler.
* @parent_reset: The parent class' reset handler. * @parent_reset: The parent class' reset handler.
* *
@ -49,6 +59,11 @@ typedef struct X86CPUClass {
CPUClass parent_class; CPUClass parent_class;
/*< public >*/ /*< public >*/
/* Should be eventually replaced by subclass-specific property defaults. */
X86CPUDefinition *cpu_def;
bool kvm_required;
DeviceRealize parent_realize; DeviceRealize parent_realize;
void (*parent_reset)(CPUState *cpu); void (*parent_reset)(CPUState *cpu);
} X86CPUClass; } X86CPUClass;

View file

@ -358,17 +358,23 @@ typedef struct model_features_t {
FeatureWord feat_word; FeatureWord feat_word;
} model_features_t; } model_features_t;
static uint32_t kvm_default_features = (1 << KVM_FEATURE_CLOCKSOURCE) | /* KVM-specific features that are automatically added to all CPU models
* when KVM is enabled.
*/
static uint32_t kvm_default_features[FEATURE_WORDS] = {
[FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
(1 << KVM_FEATURE_NOP_IO_DELAY) | (1 << KVM_FEATURE_NOP_IO_DELAY) |
(1 << KVM_FEATURE_CLOCKSOURCE2) | (1 << KVM_FEATURE_CLOCKSOURCE2) |
(1 << KVM_FEATURE_ASYNC_PF) | (1 << KVM_FEATURE_ASYNC_PF) |
(1 << KVM_FEATURE_STEAL_TIME) | (1 << KVM_FEATURE_STEAL_TIME) |
(1 << KVM_FEATURE_PV_EOI) | (1 << KVM_FEATURE_PV_EOI) |
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
[FEAT_1_ECX] = CPUID_EXT_X2APIC,
};
void disable_kvm_pv_eoi(void) void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features)
{ {
kvm_default_features &= ~(1UL << KVM_FEATURE_PV_EOI); kvm_default_features[w] &= ~features;
} }
void host_cpuid(uint32_t function, uint32_t count, void host_cpuid(uint32_t function, uint32_t count,
@ -484,7 +490,35 @@ static void add_flagname_to_bitmaps(const char *flagname,
} }
} }
typedef struct x86_def_t { /* CPU class name definitions: */
#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
/* Return type name for a given CPU model name
* Caller is responsible for freeing the returned string.
*/
static char *x86_cpu_type_name(const char *model_name)
{
return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
}
static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
{
ObjectClass *oc;
char *typename;
if (cpu_model == NULL) {
return NULL;
}
typename = x86_cpu_type_name(cpu_model);
oc = object_class_by_name(typename);
g_free(typename);
return oc;
}
struct X86CPUDefinition {
const char *name; const char *name;
uint32_t level; uint32_t level;
uint32_t xlevel; uint32_t xlevel;
@ -497,7 +531,7 @@ typedef struct x86_def_t {
FeatureWordArray features; FeatureWordArray features;
char model_id[48]; char model_id[48];
bool cache_info_passthrough; bool cache_info_passthrough;
} x86_def_t; };
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
@ -547,9 +581,7 @@ typedef struct x86_def_t {
CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
CPUID_7_0_EBX_RDSEED */ CPUID_7_0_EBX_RDSEED */
/* built-in CPU model definitions static X86CPUDefinition builtin_x86_defs[] = {
*/
static x86_def_t builtin_x86_defs[] = {
{ {
.name = "qemu64", .name = "qemu64",
.level = 4, .level = 4,
@ -1108,7 +1140,7 @@ static x86_def_t builtin_x86_defs[] = {
void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w, void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
uint32_t feat_add, uint32_t feat_remove) uint32_t feat_add, uint32_t feat_remove)
{ {
x86_def_t *def; X86CPUDefinition *def;
int i; int i;
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
def = &builtin_x86_defs[i]; def = &builtin_x86_defs[i];
@ -1119,6 +1151,8 @@ void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
} }
} }
#ifdef CONFIG_KVM
static int cpu_x86_fill_model_id(char *str) static int cpu_x86_fill_model_id(char *str)
{ {
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
@ -1134,44 +1168,68 @@ static int cpu_x86_fill_model_id(char *str)
return 0; return 0;
} }
/* Fill a x86_def_t struct with information about the host CPU, and static X86CPUDefinition host_cpudef;
* the CPU features supported by the host hardware + host kernel
/* class_init for the "host" CPU model
* *
* This function may be called only if KVM is enabled. * This function may be called before KVM is initialized.
*/ */
static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def) static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
{ {
KVMState *s = kvm_state; X86CPUClass *xcc = X86_CPU_CLASS(oc);
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
xcc->kvm_required = true;
host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
host_cpudef.stepping = eax & 0x0F;
cpu_x86_fill_model_id(host_cpudef.model_id);
xcc->cpu_def = &host_cpudef;
host_cpudef.cache_info_passthrough = true;
/* level, xlevel, xlevel2, and the feature words are initialized on
* instance_init, because they require KVM to be initialized.
*/
}
static void host_x86_cpu_initfn(Object *obj)
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
KVMState *s = kvm_state;
FeatureWord w;
assert(kvm_enabled()); assert(kvm_enabled());
x86_cpu_def->name = "host"; env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
x86_cpu_def->cache_info_passthrough = true; env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
x86_cpu_vendor_words2str(x86_cpu_def->vendor, ebx, edx, ecx);
host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
x86_cpu_def->stepping = eax & 0x0F;
x86_cpu_def->level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
x86_cpu_def->xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
x86_cpu_def->xlevel2 =
kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
cpu_x86_fill_model_id(x86_cpu_def->model_id);
FeatureWord w;
for (w = 0; w < FEATURE_WORDS; w++) { for (w = 0; w < FEATURE_WORDS; w++) {
FeatureWordInfo *wi = &feature_word_info[w]; FeatureWordInfo *wi = &feature_word_info[w];
x86_cpu_def->features[w] = env->features[w] =
kvm_arch_get_supported_cpuid(s, wi->cpuid_eax, wi->cpuid_ecx, kvm_arch_get_supported_cpuid(s, wi->cpuid_eax, wi->cpuid_ecx,
wi->cpuid_reg); wi->cpuid_reg);
} }
object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
} }
static const TypeInfo host_x86_cpu_type_info = {
.name = X86_CPU_TYPE_NAME("host"),
.parent = TYPE_X86_CPU,
.instance_init = host_x86_cpu_initfn,
.class_init = host_x86_cpu_class_init,
};
#endif
static int unavailable_host_feature(FeatureWordInfo *f, uint32_t mask) static int unavailable_host_feature(FeatureWordInfo *f, uint32_t mask)
{ {
int i; int i;
@ -1582,32 +1640,6 @@ static PropertyInfo qdev_prop_spinlocks = {
.set = x86_set_hv_spinlocks, .set = x86_set_hv_spinlocks,
}; };
static int cpu_x86_find_by_name(X86CPU *cpu, x86_def_t *x86_cpu_def,
const char *name)
{
x86_def_t *def;
int i;
if (name == NULL) {
return -1;
}
if (kvm_enabled() && strcmp(name, "host") == 0) {
kvm_cpu_fill_host(x86_cpu_def);
object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
return 0;
}
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
def = &builtin_x86_defs[i];
if (strcmp(name, def->name) == 0) {
memcpy(x86_cpu_def, def, sizeof(*def));
return 0;
}
}
return -1;
}
/* Convert all '_' in a feature string option name to '-', to make feature /* Convert all '_' in a feature string option name to '-', to make feature
* name conform to QOM property naming rule, which uses '-' instead of '_'. * name conform to QOM property naming rule, which uses '-' instead of '_'.
*/ */
@ -1620,8 +1652,10 @@ static inline void feat2prop(char *s)
/* Parse "+feature,-feature,feature=foo" CPU feature string /* Parse "+feature,-feature,feature=foo" CPU feature string
*/ */
static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp) static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
Error **errp)
{ {
X86CPU *cpu = X86_CPU(cs);
char *featurestr; /* Single 'key=value" string being parsed */ char *featurestr; /* Single 'key=value" string being parsed */
/* Features to be added */ /* Features to be added */
FeatureWordArray plus_features = { 0 }; FeatureWordArray plus_features = { 0 };
@ -1629,6 +1663,7 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
FeatureWordArray minus_features = { 0 }; FeatureWordArray minus_features = { 0 };
uint32_t numvalue; uint32_t numvalue;
CPUX86State *env = &cpu->env; CPUX86State *env = &cpu->env;
Error *local_err = NULL;
featurestr = features ? strtok(features, ",") : NULL; featurestr = features ? strtok(features, ",") : NULL;
@ -1647,16 +1682,16 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
numvalue = strtoul(val, &err, 0); numvalue = strtoul(val, &err, 0);
if (!*val || *err) { if (!*val || *err) {
error_setg(errp, "bad numerical value %s", val); error_setg(&local_err, "bad numerical value %s", val);
goto out; goto out;
} }
if (numvalue < 0x80000000) { if (numvalue < 0x80000000) {
fprintf(stderr, "xlevel value shall always be >= 0x80000000" error_report("xlevel value shall always be >= 0x80000000"
", fixup will be removed in future versions\n"); ", fixup will be removed in future versions");
numvalue += 0x80000000; numvalue += 0x80000000;
} }
snprintf(num, sizeof(num), "%" PRIu32, numvalue); snprintf(num, sizeof(num), "%" PRIu32, numvalue);
object_property_parse(OBJECT(cpu), num, featurestr, errp); object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
} else if (!strcmp(featurestr, "tsc-freq")) { } else if (!strcmp(featurestr, "tsc-freq")) {
int64_t tsc_freq; int64_t tsc_freq;
char *err; char *err;
@ -1665,36 +1700,38 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
tsc_freq = strtosz_suffix_unit(val, &err, tsc_freq = strtosz_suffix_unit(val, &err,
STRTOSZ_DEFSUFFIX_B, 1000); STRTOSZ_DEFSUFFIX_B, 1000);
if (tsc_freq < 0 || *err) { if (tsc_freq < 0 || *err) {
error_setg(errp, "bad numerical value %s", val); error_setg(&local_err, "bad numerical value %s", val);
goto out; goto out;
} }
snprintf(num, sizeof(num), "%" PRId64, tsc_freq); snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
object_property_parse(OBJECT(cpu), num, "tsc-frequency", errp); object_property_parse(OBJECT(cpu), num, "tsc-frequency",
&local_err);
} else if (!strcmp(featurestr, "hv-spinlocks")) { } else if (!strcmp(featurestr, "hv-spinlocks")) {
char *err; char *err;
const int min = 0xFFF; const int min = 0xFFF;
char num[32]; char num[32];
numvalue = strtoul(val, &err, 0); numvalue = strtoul(val, &err, 0);
if (!*val || *err) { if (!*val || *err) {
error_setg(errp, "bad numerical value %s", val); error_setg(&local_err, "bad numerical value %s", val);
goto out; goto out;
} }
if (numvalue < min) { if (numvalue < min) {
fprintf(stderr, "hv-spinlocks value shall always be >= 0x%x" error_report("hv-spinlocks value shall always be >= 0x%x"
", fixup will be removed in future versions\n", ", fixup will be removed in future versions",
min); min);
numvalue = min; numvalue = min;
} }
snprintf(num, sizeof(num), "%" PRId32, numvalue); snprintf(num, sizeof(num), "%" PRId32, numvalue);
object_property_parse(OBJECT(cpu), num, featurestr, errp); object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
} else { } else {
object_property_parse(OBJECT(cpu), val, featurestr, errp); object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
} }
} else { } else {
feat2prop(featurestr); feat2prop(featurestr);
object_property_parse(OBJECT(cpu), "on", featurestr, errp); object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
} }
if (error_is_set(errp)) { if (local_err) {
error_propagate(errp, local_err);
goto out; goto out;
} }
featurestr = strtok(NULL, ","); featurestr = strtok(NULL, ",");
@ -1753,7 +1790,7 @@ static void listflags(char *buf, int bufsize, uint32_t fbits,
/* generate CPU information. */ /* generate CPU information. */
void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf) void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
{ {
x86_def_t *def; X86CPUDefinition *def;
char buf[256]; char buf[256];
int i; int i;
@ -1780,7 +1817,7 @@ void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
{ {
CpuDefinitionInfoList *cpu_list = NULL; CpuDefinitionInfoList *cpu_list = NULL;
x86_def_t *def; X86CPUDefinition *def;
int i; int i;
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
@ -1817,17 +1854,13 @@ static void filter_features_for_kvm(X86CPU *cpu)
} }
} }
static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp) /* Load data from X86CPUDefinition
*/
static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
{ {
CPUX86State *env = &cpu->env; CPUX86State *env = &cpu->env;
x86_def_t def1, *def = &def1; const char *vendor;
char host_vendor[CPUID_VENDOR_SZ + 1];
memset(def, 0, sizeof(*def));
if (cpu_x86_find_by_name(cpu, def, name) < 0) {
error_setg(errp, "Unable to find CPU definition: %s", name);
return;
}
object_property_set_int(OBJECT(cpu), def->level, "level", errp); object_property_set_int(OBJECT(cpu), def->level, "level", errp);
object_property_set_int(OBJECT(cpu), def->family, "family", errp); object_property_set_int(OBJECT(cpu), def->family, "family", errp);
@ -1847,10 +1880,14 @@ static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
/* Special cases not set in the x86_def_t structs: */ /* Special cases not set in the X86CPUDefinition structs: */
if (kvm_enabled()) { if (kvm_enabled()) {
env->features[FEAT_KVM] |= kvm_default_features; FeatureWord w;
for (w = 0; w < FEATURE_WORDS; w++) {
env->features[w] |= kvm_default_features[w];
}
} }
env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
/* sysenter isn't supported in compatibility mode on AMD, /* sysenter isn't supported in compatibility mode on AMD,
@ -1860,8 +1897,7 @@ static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
* KVM's sysenter/syscall emulation in compatibility mode and * KVM's sysenter/syscall emulation in compatibility mode and
* when doing cross vendor migration * when doing cross vendor migration
*/ */
const char *vendor = def->vendor; vendor = def->vendor;
char host_vendor[CPUID_VENDOR_SZ + 1];
if (kvm_enabled()) { if (kvm_enabled()) {
uint32_t ebx = 0, ecx = 0, edx = 0; uint32_t ebx = 0, ecx = 0, edx = 0;
host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
@ -1877,9 +1913,10 @@ X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
Error **errp) Error **errp)
{ {
X86CPU *cpu = NULL; X86CPU *cpu = NULL;
X86CPUClass *xcc;
ObjectClass *oc;
gchar **model_pieces; gchar **model_pieces;
char *name, *features; char *name, *features;
char *typename;
Error *error = NULL; Error *error = NULL;
model_pieces = g_strsplit(cpu_model, ",", 2); model_pieces = g_strsplit(cpu_model, ",", 2);
@ -1890,7 +1927,20 @@ X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
name = model_pieces[0]; name = model_pieces[0];
features = model_pieces[1]; features = model_pieces[1];
cpu = X86_CPU(object_new(TYPE_X86_CPU)); oc = x86_cpu_class_by_name(name);
if (oc == NULL) {
error_setg(&error, "Unable to find CPU definition: %s", name);
goto out;
}
xcc = X86_CPU_CLASS(oc);
if (xcc->kvm_required && !kvm_enabled()) {
error_setg(&error, "CPU model '%s' requires KVM", name);
goto out;
}
cpu = X86_CPU(object_new(object_class_get_name(oc)));
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
if (icc_bridge == NULL) { if (icc_bridge == NULL) {
error_setg(&error, "Invalid icc-bridge value"); error_setg(&error, "Invalid icc-bridge value");
@ -1900,20 +1950,7 @@ X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
object_unref(OBJECT(cpu)); object_unref(OBJECT(cpu));
#endif #endif
cpu_x86_register(cpu, name, &error); x86_cpu_parse_featurestr(CPU(cpu), features, &error);
if (error) {
goto out;
}
/* Emulate per-model subclasses for global properties */
typename = g_strdup_printf("%s-" TYPE_X86_CPU, name);
qdev_prop_set_globals_for_type(DEVICE(cpu), typename, &error);
g_free(typename);
if (error) {
goto out;
}
cpu_x86_parse_featurestr(cpu, features, &error);
if (error) { if (error) {
goto out; goto out;
} }
@ -1921,8 +1958,10 @@ X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
out: out:
if (error != NULL) { if (error != NULL) {
error_propagate(errp, error); error_propagate(errp, error);
object_unref(OBJECT(cpu)); if (cpu) {
cpu = NULL; object_unref(OBJECT(cpu));
cpu = NULL;
}
} }
g_strfreev(model_pieces); g_strfreev(model_pieces);
return cpu; return cpu;
@ -1952,6 +1991,28 @@ out:
return cpu; return cpu;
} }
static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
{
X86CPUDefinition *cpudef = data;
X86CPUClass *xcc = X86_CPU_CLASS(oc);
xcc->cpu_def = cpudef;
}
static void x86_register_cpudef_type(X86CPUDefinition *def)
{
char *typename = x86_cpu_type_name(def->name);
TypeInfo ti = {
.name = typename,
.parent = TYPE_X86_CPU,
.class_init = x86_cpu_cpudef_class_init,
.class_data = def,
};
type_register(&ti);
g_free(typename);
}
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
void cpu_clear_apic_feature(CPUX86State *env) void cpu_clear_apic_feature(CPUX86State *env)
@ -1969,7 +2030,7 @@ void x86_cpudef_setup(void)
static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" }; static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) { for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
x86_def_t *def = &builtin_x86_defs[i]; X86CPUDefinition *def = &builtin_x86_defs[i];
/* Look for specific "cpudef" models that */ /* Look for specific "cpudef" models that */
/* have the QEMU version in .model_id */ /* have the QEMU version in .model_id */
@ -2349,9 +2410,9 @@ static void x86_cpu_reset(CPUState *s)
xcc->parent_reset(s); xcc->parent_reset(s);
memset(env, 0, offsetof(CPUX86State, breakpoints)); memset(env, 0, offsetof(CPUX86State, pat));
tlb_flush(env, 1); tlb_flush(s, 1);
env->old_exception = -1; env->old_exception = -1;
@ -2412,8 +2473,8 @@ static void x86_cpu_reset(CPUState *s)
memset(env->dr, 0, sizeof(env->dr)); memset(env->dr, 0, sizeof(env->dr));
env->dr[6] = DR6_FIXED_1; env->dr[6] = DR6_FIXED_1;
env->dr[7] = DR7_FIXED_1; env->dr[7] = DR7_FIXED_1;
cpu_breakpoint_remove_all(env, BP_CPU); cpu_breakpoint_remove_all(s, BP_CPU);
cpu_watchpoint_remove_all(env, BP_CPU); cpu_watchpoint_remove_all(s, BP_CPU);
env->tsc_adjust = 0; env->tsc_adjust = 0;
env->tsc = 0; env->tsc = 0;
@ -2613,6 +2674,7 @@ static void x86_cpu_initfn(Object *obj)
{ {
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
X86CPU *cpu = X86_CPU(obj); X86CPU *cpu = X86_CPU(obj);
X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
CPUX86State *env = &cpu->env; CPUX86State *env = &cpu->env;
static int inited; static int inited;
@ -2656,6 +2718,8 @@ static void x86_cpu_initfn(Object *obj)
cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index); env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
/* init various static tables used in TCG mode */ /* init various static tables used in TCG mode */
if (tcg_enabled() && !inited) { if (tcg_enabled() && !inited) {
inited = 1; inited = 1;
@ -2695,6 +2759,20 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
cpu->env.eip = tb->pc - tb->cs_base; cpu->env.eip = tb->pc - tb->cs_base;
} }
static bool x86_cpu_has_work(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
(cs->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
CPU_INTERRUPT_SIPI |
CPU_INTERRUPT_MCE));
}
static Property x86_cpu_properties[] = { static Property x86_cpu_properties[] = {
DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks }, { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
@ -2721,6 +2799,9 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->reset = x86_cpu_reset; cc->reset = x86_cpu_reset;
cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
cc->class_by_name = x86_cpu_class_by_name;
cc->parse_features = x86_cpu_parse_featurestr;
cc->has_work = x86_cpu_has_work;
cc->do_interrupt = x86_cpu_do_interrupt; cc->do_interrupt = x86_cpu_do_interrupt;
cc->dump_state = x86_cpu_dump_state; cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc; cc->set_pc = x86_cpu_set_pc;
@ -2729,7 +2810,9 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = x86_cpu_gdb_write_register; cc->gdb_write_register = x86_cpu_gdb_write_register;
cc->get_arch_id = x86_cpu_get_arch_id; cc->get_arch_id = x86_cpu_get_arch_id;
cc->get_paging_enabled = x86_cpu_get_paging_enabled; cc->get_paging_enabled = x86_cpu_get_paging_enabled;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
#else
cc->get_memory_mapping = x86_cpu_get_memory_mapping; cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
cc->write_elf64_note = x86_cpu_write_elf64_note; cc->write_elf64_note = x86_cpu_write_elf64_note;
@ -2746,14 +2829,22 @@ static const TypeInfo x86_cpu_type_info = {
.parent = TYPE_CPU, .parent = TYPE_CPU,
.instance_size = sizeof(X86CPU), .instance_size = sizeof(X86CPU),
.instance_init = x86_cpu_initfn, .instance_init = x86_cpu_initfn,
.abstract = false, .abstract = true,
.class_size = sizeof(X86CPUClass), .class_size = sizeof(X86CPUClass),
.class_init = x86_cpu_common_class_init, .class_init = x86_cpu_common_class_init,
}; };
static void x86_cpu_register_types(void) static void x86_cpu_register_types(void)
{ {
int i;
type_register_static(&x86_cpu_type_info); type_register_static(&x86_cpu_type_info);
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
x86_register_cpudef_type(&builtin_x86_defs[i]);
}
#ifdef CONFIG_KVM
type_register_static(&host_x86_cpu_type_info);
#endif
} }
type_init(x86_cpu_register_types) type_init(x86_cpu_register_types)

View file

@ -875,8 +875,8 @@ typedef struct CPUX86State {
target_ulong exception_next_eip; target_ulong exception_next_eip;
target_ulong dr[8]; /* debug registers */ target_ulong dr[8]; /* debug registers */
union { union {
CPUBreakpoint *cpu_breakpoint[4]; struct CPUBreakpoint *cpu_breakpoint[4];
CPUWatchpoint *cpu_watchpoint[4]; struct CPUWatchpoint *cpu_watchpoint[4];
}; /* break/watchpoints for dr[0..3] */ }; /* break/watchpoints for dr[0..3] */
uint32_t smbase; uint32_t smbase;
int old_exception; /* exception in flight */ int old_exception; /* exception in flight */
@ -887,6 +887,7 @@ typedef struct CPUX86State {
CPU_COMMON CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
uint64_t pat; uint64_t pat;
/* processor features (e.g. for CPUID insn) */ /* processor features (e.g. for CPUID insn) */
@ -1067,9 +1068,8 @@ void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
/* helper.c */ /* helper.c */
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
int is_write, int mmu_idx); int is_write, int mmu_idx);
#define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault
void x86_cpu_set_a20(X86CPU *cpu, int a20_state); void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index) static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
@ -1186,20 +1186,6 @@ void optimize_flags_init(void);
#include "hw/i386/apic.h" #include "hw/i386/apic.h"
#endif #endif
static inline bool cpu_has_work(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
(cs->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
CPU_INTERRUPT_SIPI |
CPU_INTERRUPT_MCE));
}
#include "exec/exec-all.h" #include "exec/exec-all.h"
static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
@ -1276,11 +1262,11 @@ void do_smm_enter(X86CPU *cpu);
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
void disable_kvm_pv_eoi(void);
void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w, void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
uint32_t feat_add, uint32_t feat_remove); uint32_t feat_add, uint32_t feat_remove);
void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features);
/* Return name of 32-bit register, from a R_* constant */ /* Return name of 32-bit register, from a R_* constant */
const char *get_register_name_32(unsigned int reg); const char *get_register_name_32(unsigned int reg);

View file

@ -94,6 +94,8 @@ static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
int is_int, int error_code, int is_int, int error_code,
int next_eip_addend) int next_eip_addend)
{ {
CPUState *cs = CPU(x86_env_get_cpu(env));
if (!is_int) { if (!is_int) {
cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno, cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
error_code); error_code);
@ -102,11 +104,11 @@ static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0); cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0);
} }
env->exception_index = intno; cs->exception_index = intno;
env->error_code = error_code; env->error_code = error_code;
env->exception_is_int = is_int; env->exception_is_int = is_int;
env->exception_next_eip = env->eip + next_eip_addend; env->exception_next_eip = env->eip + next_eip_addend;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
/* shortcuts to generate exceptions */ /* shortcuts to generate exceptions */

View file

@ -385,22 +385,25 @@ void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
a20_state = (a20_state != 0); a20_state = (a20_state != 0);
if (a20_state != ((env->a20_mask >> 20) & 1)) { if (a20_state != ((env->a20_mask >> 20) & 1)) {
CPUState *cs = CPU(cpu);
#if defined(DEBUG_MMU) #if defined(DEBUG_MMU)
printf("A20 update: a20=%d\n", a20_state); printf("A20 update: a20=%d\n", a20_state);
#endif #endif
/* if the cpu is currently executing code, we must unlink it and /* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */ all the potentially executing TB */
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB); cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
/* when a20 is changed, all the MMU mappings are invalid, so /* when a20 is changed, all the MMU mappings are invalid, so
we must flush everything */ we must flush everything */
tlb_flush(env, 1); tlb_flush(cs, 1);
env->a20_mask = ~(1 << 20) | (a20_state << 20); env->a20_mask = ~(1 << 20) | (a20_state << 20);
} }
} }
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
{ {
X86CPU *cpu = x86_env_get_cpu(env);
int pe_state; int pe_state;
#if defined(DEBUG_MMU) #if defined(DEBUG_MMU)
@ -408,7 +411,7 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
#endif #endif
if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) != if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
(env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
tlb_flush(env, 1); tlb_flush(CPU(cpu), 1);
} }
#ifdef TARGET_X86_64 #ifdef TARGET_X86_64
@ -444,24 +447,28 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
the PDPT */ the PDPT */
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
{ {
X86CPU *cpu = x86_env_get_cpu(env);
env->cr[3] = new_cr3; env->cr[3] = new_cr3;
if (env->cr[0] & CR0_PG_MASK) { if (env->cr[0] & CR0_PG_MASK) {
#if defined(DEBUG_MMU) #if defined(DEBUG_MMU)
printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
#endif #endif
tlb_flush(env, 0); tlb_flush(CPU(cpu), 0);
} }
} }
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
{ {
X86CPU *cpu = x86_env_get_cpu(env);
#if defined(DEBUG_MMU) #if defined(DEBUG_MMU)
printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]); printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
#endif #endif
if ((new_cr4 ^ env->cr[4]) & if ((new_cr4 ^ env->cr[4]) &
(CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK | (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
CR4_SMEP_MASK | CR4_SMAP_MASK)) { CR4_SMEP_MASK | CR4_SMAP_MASK)) {
tlb_flush(env, 1); tlb_flush(CPU(cpu), 1);
} }
/* SSE handling */ /* SSE handling */
if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) { if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
@ -485,15 +492,18 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
int is_write, int mmu_idx) int is_write, int mmu_idx)
{ {
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
/* user mode only emulation */ /* user mode only emulation */
is_write &= 1; is_write &= 1;
env->cr[2] = addr; env->cr[2] = addr;
env->error_code = (is_write << PG_ERROR_W_BIT); env->error_code = (is_write << PG_ERROR_W_BIT);
env->error_code |= PG_ERROR_U_MASK; env->error_code |= PG_ERROR_U_MASK;
env->exception_index = EXCP0E_PAGE; cs->exception_index = EXCP0E_PAGE;
return 1; return 1;
} }
@ -508,14 +518,15 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
# endif # endif
/* return value: /* return value:
-1 = cannot handle fault * -1 = cannot handle fault
0 = nothing more to do * 0 = nothing more to do
1 = generate PF fault * 1 = generate PF fault
*/ */
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
int is_write1, int mmu_idx) int is_write1, int mmu_idx)
{ {
CPUState *cs = ENV_GET_CPU(env); X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
uint64_t ptep, pte; uint64_t ptep, pte;
target_ulong pde_addr, pte_addr; target_ulong pde_addr, pte_addr;
int error_code, is_dirty, prot, page_size, is_write, is_user; int error_code, is_dirty, prot, page_size, is_write, is_user;
@ -525,7 +536,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
is_user = mmu_idx == MMU_USER_IDX; is_user = mmu_idx == MMU_USER_IDX;
#if defined(DEBUG_MMU) #if defined(DEBUG_MMU)
printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
addr, is_write1, is_user, env->eip); addr, is_write1, is_user, env->eip);
#endif #endif
is_write = is_write1 & 1; is_write = is_write1 & 1;
@ -557,7 +568,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
sext = (int64_t)addr >> 47; sext = (int64_t)addr >> 47;
if (sext != 0 && sext != -1) { if (sext != 0 && sext != -1) {
env->error_code = 0; env->error_code = 0;
env->exception_index = EXCP0D_GPF; cs->exception_index = EXCP0D_GPF;
return 1; return 1;
} }
@ -866,7 +877,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
paddr = (pte & TARGET_PAGE_MASK) + page_offset; paddr = (pte & TARGET_PAGE_MASK) + page_offset;
vaddr = virt_addr + page_offset; vaddr = virt_addr + page_offset;
tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size); tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0; return 0;
do_fault_protect: do_fault_protect:
error_code = PG_ERROR_P_MASK; error_code = PG_ERROR_P_MASK;
@ -888,7 +899,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
env->cr[2] = addr; env->cr[2] = addr;
} }
env->error_code = error_code; env->error_code = error_code;
env->exception_index = EXCP0E_PAGE; cs->exception_index = EXCP0E_PAGE;
return 1; return 1;
} }
@ -989,12 +1000,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
void hw_breakpoint_insert(CPUX86State *env, int index) void hw_breakpoint_insert(CPUX86State *env, int index)
{ {
CPUState *cs = CPU(x86_env_get_cpu(env));
int type = 0, err = 0; int type = 0, err = 0;
switch (hw_breakpoint_type(env->dr[7], index)) { switch (hw_breakpoint_type(env->dr[7], index)) {
case DR7_TYPE_BP_INST: case DR7_TYPE_BP_INST:
if (hw_breakpoint_enabled(env->dr[7], index)) { if (hw_breakpoint_enabled(env->dr[7], index)) {
err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU, err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU,
&env->cpu_breakpoint[index]); &env->cpu_breakpoint[index]);
} }
break; break;
@ -1010,7 +1022,7 @@ void hw_breakpoint_insert(CPUX86State *env, int index)
} }
if (type != 0) { if (type != 0) {
err = cpu_watchpoint_insert(env, env->dr[index], err = cpu_watchpoint_insert(cs, env->dr[index],
hw_breakpoint_len(env->dr[7], index), hw_breakpoint_len(env->dr[7], index),
type, &env->cpu_watchpoint[index]); type, &env->cpu_watchpoint[index]);
} }
@ -1022,17 +1034,21 @@ void hw_breakpoint_insert(CPUX86State *env, int index)
void hw_breakpoint_remove(CPUX86State *env, int index) void hw_breakpoint_remove(CPUX86State *env, int index)
{ {
if (!env->cpu_breakpoint[index]) CPUState *cs;
if (!env->cpu_breakpoint[index]) {
return; return;
}
cs = CPU(x86_env_get_cpu(env));
switch (hw_breakpoint_type(env->dr[7], index)) { switch (hw_breakpoint_type(env->dr[7], index)) {
case DR7_TYPE_BP_INST: case DR7_TYPE_BP_INST:
if (hw_breakpoint_enabled(env->dr[7], index)) { if (hw_breakpoint_enabled(env->dr[7], index)) {
cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]); cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
} }
break; break;
case DR7_TYPE_DATA_WR: case DR7_TYPE_DATA_WR:
case DR7_TYPE_DATA_RW: case DR7_TYPE_DATA_RW:
cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]); cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
break; break;
case DR7_TYPE_IO_RW: case DR7_TYPE_IO_RW:
/* No support for I/O watchpoints yet */ /* No support for I/O watchpoints yet */
@ -1084,19 +1100,20 @@ bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
void breakpoint_handler(CPUX86State *env) void breakpoint_handler(CPUX86State *env)
{ {
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUBreakpoint *bp; CPUBreakpoint *bp;
if (env->watchpoint_hit) { if (cs->watchpoint_hit) {
if (env->watchpoint_hit->flags & BP_CPU) { if (cs->watchpoint_hit->flags & BP_CPU) {
env->watchpoint_hit = NULL; cs->watchpoint_hit = NULL;
if (check_hw_breakpoints(env, false)) { if (check_hw_breakpoints(env, false)) {
raise_exception(env, EXCP01_DB); raise_exception(env, EXCP01_DB);
} else { } else {
cpu_resume_from_signal(env, NULL); cpu_resume_from_signal(cs, NULL);
} }
} }
} else { } else {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == env->eip) { if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) { if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, true); check_hw_breakpoints(env, true);
@ -1104,6 +1121,7 @@ void breakpoint_handler(CPUX86State *env)
} }
break; break;
} }
}
} }
} }
@ -1250,13 +1268,14 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
{ {
X86CPU *cpu = x86_env_get_cpu(env); X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
if (kvm_enabled()) { if (kvm_enabled()) {
env->tpr_access_type = access; env->tpr_access_type = access;
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_TPR); cpu_interrupt(cs, CPU_INTERRUPT_TPR);
} else { } else {
cpu_restore_state(env, env->mem_io_pc); cpu_restore_state(cs, cs->mem_io_pc);
apic_handle_tpr_access_report(cpu->apic_state, env->eip, access); apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
} }

View file

@ -2277,13 +2277,13 @@ static int kvm_handle_debug(X86CPU *cpu,
break; break;
case 0x1: case 0x1:
ret = EXCP_DEBUG; ret = EXCP_DEBUG;
env->watchpoint_hit = &hw_watchpoint; cs->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr; hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_WRITE; hw_watchpoint.flags = BP_MEM_WRITE;
break; break;
case 0x3: case 0x3:
ret = EXCP_DEBUG; ret = EXCP_DEBUG;
env->watchpoint_hit = &hw_watchpoint; cs->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr; hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_ACCESS; hw_watchpoint.flags = BP_MEM_ACCESS;
break; break;
@ -2291,11 +2291,11 @@ static int kvm_handle_debug(X86CPU *cpu,
} }
} }
} }
} else if (kvm_find_sw_breakpoint(CPU(cpu), arch_info->pc)) { } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
ret = EXCP_DEBUG; ret = EXCP_DEBUG;
} }
if (ret == 0) { if (ret == 0) {
cpu_synchronize_state(CPU(cpu)); cpu_synchronize_state(cs);
assert(env->exception_injected == -1); assert(env->exception_injected == -1);
/* pass to guest */ /* pass to guest */

View file

@ -290,6 +290,7 @@ static void cpu_pre_save(void *opaque)
static int cpu_post_load(void *opaque, int version_id) static int cpu_post_load(void *opaque, int version_id)
{ {
X86CPU *cpu = opaque; X86CPU *cpu = opaque;
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env; CPUX86State *env = &cpu->env;
int i; int i;
@ -319,12 +320,12 @@ static int cpu_post_load(void *opaque, int version_id)
env->fptags[i] = (env->fptag_vmstate >> i) & 1; env->fptags[i] = (env->fptag_vmstate >> i) & 1;
} }
cpu_breakpoint_remove_all(env, BP_CPU); cpu_breakpoint_remove_all(cs, BP_CPU);
cpu_watchpoint_remove_all(env, BP_CPU); cpu_watchpoint_remove_all(cs, BP_CPU);
for (i = 0; i < DR7_MAX_BP; i++) { for (i = 0; i < DR7_MAX_BP; i++) {
hw_breakpoint_insert(env, i); hw_breakpoint_insert(env, i);
} }
tlb_flush(env, 1); tlb_flush(cs, 1);
return 0; return 0;
} }

View file

@ -129,21 +129,25 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
/* try to fill the TLB and return an exception if error. If retaddr is /* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not * NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */ * from generated code or from helper.c)
*/
/* XXX: fix it to restore all registers */ /* XXX: fix it to restore all registers */
void tlb_fill(CPUX86State *env, target_ulong addr, int is_write, int mmu_idx, void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr) uintptr_t retaddr)
{ {
int ret; int ret;
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = x86_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (ret) { if (ret) {
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
if (retaddr) { if (retaddr) {
/* now we have a real cpu fault */ /* now we have a real cpu fault */
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
raise_exception_err(env, env->exception_index, env->error_code); raise_exception_err(env, cs->exception_index, env->error_code);
} }
} }
#endif #endif

View file

@ -221,8 +221,10 @@ void helper_lmsw(CPUX86State *env, target_ulong t0)
void helper_invlpg(CPUX86State *env, target_ulong addr) void helper_invlpg(CPUX86State *env, target_ulong addr)
{ {
X86CPU *cpu = x86_env_get_cpu(env);
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0); cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
tlb_flush_page(env, addr); tlb_flush_page(CPU(cpu), addr);
} }
void helper_rdtsc(CPUX86State *env) void helper_rdtsc(CPUX86State *env)
@ -568,11 +570,11 @@ void helper_rdmsr(CPUX86State *env)
static void do_pause(X86CPU *cpu) static void do_pause(X86CPU *cpu)
{ {
CPUX86State *env = &cpu->env; CPUState *cs = CPU(cpu);
/* Just let another CPU run. */ /* Just let another CPU run. */
env->exception_index = EXCP_INTERRUPT; cs->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
static void do_hlt(X86CPU *cpu) static void do_hlt(X86CPU *cpu)
@ -582,8 +584,8 @@ static void do_hlt(X86CPU *cpu)
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
cs->halted = 1; cs->halted = 1;
env->exception_index = EXCP_HLT; cs->exception_index = EXCP_HLT;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
void helper_hlt(CPUX86State *env, int next_eip_addend) void helper_hlt(CPUX86State *env, int next_eip_addend)
@ -638,6 +640,8 @@ void helper_pause(CPUX86State *env, int next_eip_addend)
void helper_debug(CPUX86State *env) void helper_debug(CPUX86State *env)
{ {
env->exception_index = EXCP_DEBUG; CPUState *cs = CPU(x86_env_get_cpu(env));
cpu_loop_exit(env);
cs->exception_index = EXCP_DEBUG;
cpu_loop_exit(cs);
} }

View file

@ -95,6 +95,7 @@ static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
uint32_t *esp_ptr, int dpl) uint32_t *esp_ptr, int dpl)
{ {
X86CPU *cpu = x86_env_get_cpu(env);
int type, index, shift; int type, index, shift;
#if 0 #if 0
@ -112,11 +113,11 @@ static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
#endif #endif
if (!(env->tr.flags & DESC_P_MASK)) { if (!(env->tr.flags & DESC_P_MASK)) {
cpu_abort(env, "invalid tss"); cpu_abort(CPU(cpu), "invalid tss");
} }
type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
if ((type & 7) != 1) { if ((type & 7) != 1) {
cpu_abort(env, "invalid tss type"); cpu_abort(CPU(cpu), "invalid tss type");
} }
shift = type >> 3; shift = type >> 3;
index = (dpl * 4 + 2) << shift; index = (dpl * 4 + 2) << shift;
@ -782,6 +783,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
{ {
X86CPU *cpu = x86_env_get_cpu(env);
int index; int index;
#if 0 #if 0
@ -790,7 +792,7 @@ static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
#endif #endif
if (!(env->tr.flags & DESC_P_MASK)) { if (!(env->tr.flags & DESC_P_MASK)) {
cpu_abort(env, "invalid tss"); cpu_abort(CPU(cpu), "invalid tss");
} }
index = 8 * level + 4; index = 8 * level + 4;
if ((index + 7) > env->tr.limit) { if ((index + 7) > env->tr.limit) {
@ -935,9 +937,11 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
void helper_syscall(CPUX86State *env, int next_eip_addend) void helper_syscall(CPUX86State *env, int next_eip_addend)
{ {
env->exception_index = EXCP_SYSCALL; CPUState *cs = CPU(x86_env_get_cpu(env));
cs->exception_index = EXCP_SYSCALL;
env->exception_next_eip = env->eip + next_eip_addend; env->exception_next_eip = env->eip + next_eip_addend;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
#else #else
void helper_syscall(CPUX86State *env, int next_eip_addend) void helper_syscall(CPUX86State *env, int next_eip_addend)
@ -1131,7 +1135,7 @@ static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
static void handle_even_inj(CPUX86State *env, int intno, int is_int, static void handle_even_inj(CPUX86State *env, int intno, int is_int,
int error_code, int is_hw, int rm) int error_code, int is_hw, int rm)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(x86_env_get_cpu(env));
uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj)); control.event_inj));
@ -1248,7 +1252,7 @@ void x86_cpu_do_interrupt(CPUState *cs)
/* if user mode only, we simulate a fake exception /* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution which will be handled outside the cpu execution
loop */ loop */
do_interrupt_user(env, env->exception_index, do_interrupt_user(env, cs->exception_index,
env->exception_is_int, env->exception_is_int,
env->error_code, env->error_code,
env->exception_next_eip); env->exception_next_eip);
@ -1258,7 +1262,7 @@ void x86_cpu_do_interrupt(CPUState *cs)
/* simulate a real cpu exception. On i386, it can /* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle trigger new exceptions, but we do not handle
double or triple faults yet. */ double or triple faults yet. */
do_interrupt_all(cpu, env->exception_index, do_interrupt_all(cpu, cs->exception_index,
env->exception_is_int, env->exception_is_int,
env->error_code, env->error_code,
env->exception_next_eip, 0); env->exception_next_eip, 0);

View file

@ -181,8 +181,8 @@ void do_smm_enter(X86CPU *cpu)
void helper_rsm(CPUX86State *env) void helper_rsm(CPUX86State *env)
{ {
CPUState *cs = ENV_GET_CPU(env);
X86CPU *cpu = x86_env_get_cpu(env); X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
target_ulong sm_state; target_ulong sm_state;
int i, offset; int i, offset;
uint32_t val; uint32_t val;

View file

@ -88,7 +88,8 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
static inline void svm_save_seg(CPUX86State *env, hwaddr addr, static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
const SegmentCache *sc) const SegmentCache *sc)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(x86_env_get_cpu(env));
stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector), stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
sc->selector); sc->selector);
stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base), stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
@ -102,7 +103,7 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
static inline void svm_load_seg(CPUX86State *env, hwaddr addr, static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
SegmentCache *sc) SegmentCache *sc)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(x86_env_get_cpu(env));
unsigned int flags; unsigned int flags;
sc->selector = lduw_phys(cs->as, sc->selector = lduw_phys(cs->as,
@ -125,7 +126,7 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(x86_env_get_cpu(env));
target_ulong addr; target_ulong addr;
uint32_t event_inj; uint32_t event_inj;
uint32_t int_ctl; uint32_t int_ctl;
@ -293,7 +294,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
break; break;
case TLB_CONTROL_FLUSH_ALL_ASID: case TLB_CONTROL_FLUSH_ALL_ASID:
/* FIXME: this is not 100% correct but should work for now */ /* FIXME: this is not 100% correct but should work for now */
tlb_flush(env, 1); tlb_flush(cs, 1);
break; break;
} }
@ -319,7 +320,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
/* FIXME: need to implement valid_err */ /* FIXME: need to implement valid_err */
switch (event_inj & SVM_EVTINJ_TYPE_MASK) { switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
case SVM_EVTINJ_TYPE_INTR: case SVM_EVTINJ_TYPE_INTR:
env->exception_index = vector; cs->exception_index = vector;
env->error_code = event_inj_err; env->error_code = event_inj_err;
env->exception_is_int = 0; env->exception_is_int = 0;
env->exception_next_eip = -1; env->exception_next_eip = -1;
@ -328,31 +329,31 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
do_interrupt_x86_hardirq(env, vector, 1); do_interrupt_x86_hardirq(env, vector, 1);
break; break;
case SVM_EVTINJ_TYPE_NMI: case SVM_EVTINJ_TYPE_NMI:
env->exception_index = EXCP02_NMI; cs->exception_index = EXCP02_NMI;
env->error_code = event_inj_err; env->error_code = event_inj_err;
env->exception_is_int = 0; env->exception_is_int = 0;
env->exception_next_eip = env->eip; env->exception_next_eip = env->eip;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
cpu_loop_exit(env); cpu_loop_exit(cs);
break; break;
case SVM_EVTINJ_TYPE_EXEPT: case SVM_EVTINJ_TYPE_EXEPT:
env->exception_index = vector; cs->exception_index = vector;
env->error_code = event_inj_err; env->error_code = event_inj_err;
env->exception_is_int = 0; env->exception_is_int = 0;
env->exception_next_eip = -1; env->exception_next_eip = -1;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
cpu_loop_exit(env); cpu_loop_exit(cs);
break; break;
case SVM_EVTINJ_TYPE_SOFT: case SVM_EVTINJ_TYPE_SOFT:
env->exception_index = vector; cs->exception_index = vector;
env->error_code = event_inj_err; env->error_code = event_inj_err;
env->exception_is_int = 1; env->exception_is_int = 1;
env->exception_next_eip = env->eip; env->exception_next_eip = env->eip;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
cpu_loop_exit(env); cpu_loop_exit(cs);
break; break;
} }
qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
env->error_code); env->error_code);
} }
} }
@ -365,7 +366,7 @@ void helper_vmmcall(CPUX86State *env)
void helper_vmload(CPUX86State *env, int aflag) void helper_vmload(CPUX86State *env, int aflag)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(x86_env_get_cpu(env));
target_ulong addr; target_ulong addr;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0); cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
@ -405,7 +406,7 @@ void helper_vmload(CPUX86State *env, int aflag)
void helper_vmsave(CPUX86State *env, int aflag) void helper_vmsave(CPUX86State *env, int aflag)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(x86_env_get_cpu(env));
target_ulong addr; target_ulong addr;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0); cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
@ -468,6 +469,7 @@ void helper_skinit(CPUX86State *env)
void helper_invlpga(CPUX86State *env, int aflag) void helper_invlpga(CPUX86State *env, int aflag)
{ {
X86CPU *cpu = x86_env_get_cpu(env);
target_ulong addr; target_ulong addr;
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0); cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
@ -480,13 +482,13 @@ void helper_invlpga(CPUX86State *env, int aflag)
/* XXX: could use the ASID to see if it is needed to do the /* XXX: could use the ASID to see if it is needed to do the
flush */ flush */
tlb_flush_page(env, addr); tlb_flush_page(CPU(cpu), addr);
} }
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
uint64_t param) uint64_t param)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(x86_env_get_cpu(env));
if (likely(!(env->hflags & HF_SVMI_MASK))) { if (likely(!(env->hflags & HF_SVMI_MASK))) {
return; return;
@ -568,7 +570,8 @@ void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
uint32_t next_eip_addend) uint32_t next_eip_addend)
{ {
CPUState *cs = ENV_GET_CPU(env); CPUState *cs = CPU(x86_env_get_cpu(env));
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
/* FIXME: this should be read in at vmrun (faster this way?) */ /* FIXME: this should be read in at vmrun (faster this way?) */
uint64_t addr = ldq_phys(cs->as, env->vm_vmcb + uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
@ -766,11 +769,11 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
#GP fault is delivered inside the host. */ #GP fault is delivered inside the host. */
/* remove any pending exception */ /* remove any pending exception */
env->exception_index = -1; cs->exception_index = -1;
env->error_code = 0; env->error_code = 0;
env->old_exception = -1; env->old_exception = -1;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)

View file

@ -7965,8 +7965,8 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
gen_tb_start(); gen_tb_start();
for(;;) { for(;;) {
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == pc_ptr && if (bp->pc == pc_ptr &&
!((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) { !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
gen_debug(dc, pc_ptr - dc->cs_base); gen_debug(dc, pc_ptr - dc->cs_base);

View file

@ -110,6 +110,11 @@ static void lm32_cpu_init_cfg_reg(LM32CPU *cpu)
env->cfg = cfg; env->cfg = cfg;
} }
static bool lm32_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & CPU_INTERRUPT_HARD;
}
/* CPUClass::reset() */ /* CPUClass::reset() */
static void lm32_cpu_reset(CPUState *s) static void lm32_cpu_reset(CPUState *s)
{ {
@ -120,10 +125,10 @@ static void lm32_cpu_reset(CPUState *s)
lcc->parent_reset(s); lcc->parent_reset(s);
/* reset cpu state */ /* reset cpu state */
memset(env, 0, offsetof(CPULM32State, breakpoints)); memset(env, 0, offsetof(CPULM32State, eba));
lm32_cpu_init_cfg_reg(cpu); lm32_cpu_init_cfg_reg(cpu);
tlb_flush(env, 1); tlb_flush(s, 1);
} }
static void lm32_cpu_realizefn(DeviceState *dev, Error **errp) static void lm32_cpu_realizefn(DeviceState *dev, Error **errp)
@ -255,12 +260,15 @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = lm32_cpu_reset; cc->reset = lm32_cpu_reset;
cc->class_by_name = lm32_cpu_class_by_name; cc->class_by_name = lm32_cpu_class_by_name;
cc->has_work = lm32_cpu_has_work;
cc->do_interrupt = lm32_cpu_do_interrupt; cc->do_interrupt = lm32_cpu_do_interrupt;
cc->dump_state = lm32_cpu_dump_state; cc->dump_state = lm32_cpu_dump_state;
cc->set_pc = lm32_cpu_set_pc; cc->set_pc = lm32_cpu_set_pc;
cc->gdb_read_register = lm32_cpu_gdb_read_register; cc->gdb_read_register = lm32_cpu_gdb_read_register;
cc->gdb_write_register = lm32_cpu_gdb_write_register; cc->gdb_write_register = lm32_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = lm32_cpu_handle_mmu_fault;
#else
cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug; cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_lm32_cpu; cc->vmsd = &vmstate_lm32_cpu;
#endif #endif

View file

@ -166,11 +166,12 @@ struct CPULM32State {
uint32_t bp[4]; /* breakpoints */ uint32_t bp[4]; /* breakpoints */
uint32_t wp[4]; /* watchpoints */ uint32_t wp[4]; /* watchpoints */
CPUBreakpoint * cpu_breakpoint[4]; struct CPUBreakpoint *cpu_breakpoint[4];
CPUWatchpoint * cpu_watchpoint[4]; struct CPUWatchpoint *cpu_watchpoint[4];
CPU_COMMON CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
uint32_t eba; /* exception base address */ uint32_t eba; /* exception base address */
uint32_t deba; /* debug exception base address */ uint32_t deba; /* debug exception base address */
@ -231,9 +232,8 @@ static inline CPULM32State *cpu_init(const char *cpu_model)
#define cpu_gen_code cpu_lm32_gen_code #define cpu_gen_code cpu_lm32_gen_code
#define cpu_signal_handler cpu_lm32_signal_handler #define cpu_signal_handler cpu_lm32_signal_handler
int cpu_lm32_handle_mmu_fault(CPULM32State *env, target_ulong address, int rw, int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx); int mmu_idx);
#define cpu_handle_mmu_fault cpu_lm32_handle_mmu_fault
#include "exec/cpu-all.h" #include "exec/cpu-all.h"
@ -245,11 +245,6 @@ static inline void cpu_get_tb_cpu_state(CPULM32State *env, target_ulong *pc,
*flags = 0; *flags = 0;
} }
static inline bool cpu_has_work(CPUState *cpu)
{
return cpu->interrupt_request & CPU_INTERRUPT_HARD;
}
#include "exec/exec-all.h" #include "exec/exec-all.h"
#endif #endif

View file

@ -20,18 +20,20 @@
#include "cpu.h" #include "cpu.h"
#include "qemu/host-utils.h" #include "qemu/host-utils.h"
int cpu_lm32_handle_mmu_fault(CPULM32State *env, target_ulong address, int rw, int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx) int mmu_idx)
{ {
LM32CPU *cpu = LM32_CPU(cs);
CPULM32State *env = &cpu->env;
int prot; int prot;
address &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK;
prot = PAGE_BITS; prot = PAGE_BITS;
if (env->flags & LM32_FLAG_IGNORE_MSB) { if (env->flags & LM32_FLAG_IGNORE_MSB) {
tlb_set_page(env, address, address & 0x7fffffff, prot, mmu_idx, tlb_set_page(cs, address, address & 0x7fffffff, prot, mmu_idx,
TARGET_PAGE_SIZE); TARGET_PAGE_SIZE);
} else { } else {
tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
} }
return 0; return 0;
@ -51,22 +53,28 @@ hwaddr lm32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
void lm32_breakpoint_insert(CPULM32State *env, int idx, target_ulong address) void lm32_breakpoint_insert(CPULM32State *env, int idx, target_ulong address)
{ {
cpu_breakpoint_insert(env, address, BP_CPU, &env->cpu_breakpoint[idx]); LM32CPU *cpu = lm32_env_get_cpu(env);
cpu_breakpoint_insert(CPU(cpu), address, BP_CPU,
&env->cpu_breakpoint[idx]);
} }
void lm32_breakpoint_remove(CPULM32State *env, int idx) void lm32_breakpoint_remove(CPULM32State *env, int idx)
{ {
LM32CPU *cpu = lm32_env_get_cpu(env);
if (!env->cpu_breakpoint[idx]) { if (!env->cpu_breakpoint[idx]) {
return; return;
} }
cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[idx]); cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[idx]);
env->cpu_breakpoint[idx] = NULL; env->cpu_breakpoint[idx] = NULL;
} }
void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address, void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address,
lm32_wp_t wp_type) lm32_wp_t wp_type)
{ {
LM32CPU *cpu = lm32_env_get_cpu(env);
int flags = 0; int flags = 0;
switch (wp_type) { switch (wp_type) {
@ -85,18 +93,20 @@ void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address,
} }
if (flags != 0) { if (flags != 0) {
cpu_watchpoint_insert(env, address, 1, flags, cpu_watchpoint_insert(CPU(cpu), address, 1, flags,
&env->cpu_watchpoint[idx]); &env->cpu_watchpoint[idx]);
} }
} }
void lm32_watchpoint_remove(CPULM32State *env, int idx) void lm32_watchpoint_remove(CPULM32State *env, int idx)
{ {
LM32CPU *cpu = lm32_env_get_cpu(env);
if (!env->cpu_watchpoint[idx]) { if (!env->cpu_watchpoint[idx]) {
return; return;
} }
cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[idx]); cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[idx]);
env->cpu_watchpoint[idx] = NULL; env->cpu_watchpoint[idx] = NULL;
} }
@ -116,19 +126,20 @@ static bool check_watchpoints(CPULM32State *env)
void lm32_debug_excp_handler(CPULM32State *env) void lm32_debug_excp_handler(CPULM32State *env)
{ {
CPUState *cs = CPU(lm32_env_get_cpu(env));
CPUBreakpoint *bp; CPUBreakpoint *bp;
if (env->watchpoint_hit) { if (cs->watchpoint_hit) {
if (env->watchpoint_hit->flags & BP_CPU) { if (cs->watchpoint_hit->flags & BP_CPU) {
env->watchpoint_hit = NULL; cs->watchpoint_hit = NULL;
if (check_watchpoints(env)) { if (check_watchpoints(env)) {
raise_exception(env, EXCP_WATCHPOINT); raise_exception(env, EXCP_WATCHPOINT);
} else { } else {
cpu_resume_from_signal(env, NULL); cpu_resume_from_signal(cs, NULL);
} }
} }
} else { } else {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == env->pc) { if (bp->pc == env->pc) {
if (bp->flags & BP_CPU) { if (bp->flags & BP_CPU) {
raise_exception(env, EXCP_BREAKPOINT); raise_exception(env, EXCP_BREAKPOINT);
@ -145,9 +156,9 @@ void lm32_cpu_do_interrupt(CPUState *cs)
CPULM32State *env = &cpu->env; CPULM32State *env = &cpu->env;
qemu_log_mask(CPU_LOG_INT, qemu_log_mask(CPU_LOG_INT,
"exception at pc=%x type=%x\n", env->pc, env->exception_index); "exception at pc=%x type=%x\n", env->pc, cs->exception_index);
switch (env->exception_index) { switch (cs->exception_index) {
case EXCP_INSN_BUS_ERROR: case EXCP_INSN_BUS_ERROR:
case EXCP_DATA_BUS_ERROR: case EXCP_DATA_BUS_ERROR:
case EXCP_DIVIDE_BY_ZERO: case EXCP_DIVIDE_BY_ZERO:
@ -158,9 +169,9 @@ void lm32_cpu_do_interrupt(CPUState *cs)
env->ie |= (env->ie & IE_IE) ? IE_EIE : 0; env->ie |= (env->ie & IE_IE) ? IE_EIE : 0;
env->ie &= ~IE_IE; env->ie &= ~IE_IE;
if (env->dc & DC_RE) { if (env->dc & DC_RE) {
env->pc = env->deba + (env->exception_index * 32); env->pc = env->deba + (cs->exception_index * 32);
} else { } else {
env->pc = env->eba + (env->exception_index * 32); env->pc = env->eba + (cs->exception_index * 32);
} }
log_cpu_state_mask(CPU_LOG_INT, cs, 0); log_cpu_state_mask(CPU_LOG_INT, cs, 0);
break; break;
@ -170,30 +181,19 @@ void lm32_cpu_do_interrupt(CPUState *cs)
env->regs[R_BA] = env->pc; env->regs[R_BA] = env->pc;
env->ie |= (env->ie & IE_IE) ? IE_BIE : 0; env->ie |= (env->ie & IE_IE) ? IE_BIE : 0;
env->ie &= ~IE_IE; env->ie &= ~IE_IE;
env->pc = env->deba + (env->exception_index * 32); env->pc = env->deba + (cs->exception_index * 32);
log_cpu_state_mask(CPU_LOG_INT, cs, 0); log_cpu_state_mask(CPU_LOG_INT, cs, 0);
break; break;
default: default:
cpu_abort(env, "unhandled exception type=%d\n", cpu_abort(cs, "unhandled exception type=%d\n",
env->exception_index); cs->exception_index);
break; break;
} }
} }
LM32CPU *cpu_lm32_init(const char *cpu_model) LM32CPU *cpu_lm32_init(const char *cpu_model)
{ {
LM32CPU *cpu; return LM32_CPU(cpu_generic_init(TYPE_LM32_CPU, cpu_model));
ObjectClass *oc;
oc = cpu_class_by_name(TYPE_LM32_CPU, cpu_model);
if (oc == NULL) {
return NULL;
}
cpu = LM32_CPU(object_new(object_class_get_name(oc)));
object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
return cpu;
} }
/* Some soc ignores the MSB on the address bus. Thus creating a shadow memory /* Some soc ignores the MSB on the address bus. Thus creating a shadow memory

View file

@ -25,8 +25,10 @@
void raise_exception(CPULM32State *env, int index) void raise_exception(CPULM32State *env, int index)
{ {
env->exception_index = index; CPUState *cs = CPU(lm32_env_get_cpu(env));
cpu_loop_exit(env);
cs->exception_index = index;
cpu_loop_exit(cs);
} }
void HELPER(raise_exception)(CPULM32State *env, uint32_t index) void HELPER(raise_exception)(CPULM32State *env, uint32_t index)
@ -39,8 +41,8 @@ void HELPER(hlt)(CPULM32State *env)
CPUState *cs = CPU(lm32_env_get_cpu(env)); CPUState *cs = CPU(lm32_env_get_cpu(env));
cs->halted = 1; cs->halted = 1;
env->exception_index = EXCP_HLT; cs->exception_index = EXCP_HLT;
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
void HELPER(ill)(CPULM32State *env) void HELPER(ill)(CPULM32State *env)
@ -148,20 +150,21 @@ uint32_t HELPER(rcsr_jrx)(CPULM32State *env)
} }
/* Try to fill the TLB and return an exception if error. If retaddr is /* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not * NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */ * from generated code or from helper.c)
void tlb_fill(CPULM32State *env, target_ulong addr, int is_write, int mmu_idx, */
void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr) uintptr_t retaddr)
{ {
int ret; int ret;
ret = cpu_lm32_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = lm32_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) { if (unlikely(ret)) {
if (retaddr) { if (retaddr) {
/* now we have a real cpu fault */ /* now we have a real cpu fault */
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
} }
#endif #endif

View file

@ -1037,10 +1037,11 @@ static inline void decode(DisasContext *dc, uint32_t ir)
static void check_breakpoint(CPULM32State *env, DisasContext *dc) static void check_breakpoint(CPULM32State *env, DisasContext *dc)
{ {
CPUState *cs = CPU(lm32_env_get_cpu(env));
CPUBreakpoint *bp; CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) { if (bp->pc == dc->pc) {
tcg_gen_movi_tl(cpu_pc, dc->pc); tcg_gen_movi_tl(cpu_pc, dc->pc);
t_gen_raise_exception(dc, EXCP_DEBUG); t_gen_raise_exception(dc, EXCP_DEBUG);

View file

@ -30,6 +30,11 @@ static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value; cpu->env.pc = value;
} }
static bool m68k_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & CPU_INTERRUPT_HARD;
}
static void m68k_set_feature(CPUM68KState *env, int feature) static void m68k_set_feature(CPUM68KState *env, int feature)
{ {
env->features |= (1u << feature); env->features |= (1u << feature);
@ -44,7 +49,7 @@ static void m68k_cpu_reset(CPUState *s)
mcc->parent_reset(s); mcc->parent_reset(s);
memset(env, 0, offsetof(CPUM68KState, breakpoints)); memset(env, 0, offsetof(CPUM68KState, features));
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
env->sr = 0x2700; env->sr = 0x2700;
#endif #endif
@ -53,7 +58,7 @@ static void m68k_cpu_reset(CPUState *s)
env->cc_op = CC_OP_FLAGS; env->cc_op = CC_OP_FLAGS;
/* TODO: We should set PC from the interrupt vector. */ /* TODO: We should set PC from the interrupt vector. */
env->pc = 0; env->pc = 0;
tlb_flush(env, 1); tlb_flush(s, 1);
} }
/* CPU models */ /* CPU models */
@ -189,12 +194,15 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
cc->reset = m68k_cpu_reset; cc->reset = m68k_cpu_reset;
cc->class_by_name = m68k_cpu_class_by_name; cc->class_by_name = m68k_cpu_class_by_name;
cc->has_work = m68k_cpu_has_work;
cc->do_interrupt = m68k_cpu_do_interrupt; cc->do_interrupt = m68k_cpu_do_interrupt;
cc->dump_state = m68k_cpu_dump_state; cc->dump_state = m68k_cpu_dump_state;
cc->set_pc = m68k_cpu_set_pc; cc->set_pc = m68k_cpu_set_pc;
cc->gdb_read_register = m68k_cpu_gdb_read_register; cc->gdb_read_register = m68k_cpu_gdb_read_register;
cc->gdb_write_register = m68k_cpu_gdb_write_register; cc->gdb_write_register = m68k_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault;
#else
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug; cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
#endif #endif
dc->vmsd = &vmstate_m68k_cpu; dc->vmsd = &vmstate_m68k_cpu;

View file

@ -110,6 +110,7 @@ typedef struct CPUM68KState {
CPU_COMMON CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
uint32_t features; uint32_t features;
} CPUM68KState; } CPUM68KState;
@ -237,9 +238,8 @@ static inline int cpu_mmu_index (CPUM68KState *env)
return (env->sr & SR_S) == 0 ? 1 : 0; return (env->sr & SR_S) == 0 ? 1 : 0;
} }
int cpu_m68k_handle_mmu_fault(CPUM68KState *env, target_ulong address, int rw, int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx); int mmu_idx);
#define cpu_handle_mmu_fault cpu_m68k_handle_mmu_fault
#include "exec/cpu-all.h" #include "exec/cpu-all.h"
@ -253,11 +253,6 @@ static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
| ((env->macsr >> 4) & 0xf); /* Bits 0-3 */ | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
} }
static inline bool cpu_has_work(CPUState *cpu)
{
return cpu->interrupt_request & CPU_INTERRUPT_HARD;
}
#include "exec/exec-all.h" #include "exec/exec-all.h"
#endif #endif

View file

@ -132,6 +132,7 @@ void m68k_cpu_init_gdb(M68kCPU *cpu)
void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op) void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op)
{ {
M68kCPU *cpu = m68k_env_get_cpu(env);
int flags; int flags;
uint32_t src; uint32_t src;
uint32_t dest; uint32_t dest;
@ -204,7 +205,7 @@ void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op)
flags |= CCF_C; flags |= CCF_C;
break; break;
default: default:
cpu_abort(env, "Bad CC_OP %d", cc_op); cpu_abort(CPU(cpu), "Bad CC_OP %d", cc_op);
} }
env->cc_op = CC_OP_FLAGS; env->cc_op = CC_OP_FLAGS;
env->cc_dest = flags; env->cc_dest = flags;
@ -212,6 +213,8 @@ void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op)
void HELPER(movec)(CPUM68KState *env, uint32_t reg, uint32_t val) void HELPER(movec)(CPUM68KState *env, uint32_t reg, uint32_t val)
{ {
M68kCPU *cpu = m68k_env_get_cpu(env);
switch (reg) { switch (reg) {
case 0x02: /* CACR */ case 0x02: /* CACR */
env->cacr = val; env->cacr = val;
@ -225,7 +228,7 @@ void HELPER(movec)(CPUM68KState *env, uint32_t reg, uint32_t val)
break; break;
/* TODO: Implement control registers. */ /* TODO: Implement control registers. */
default: default:
cpu_abort(env, "Unimplemented control register write 0x%x = 0x%x\n", cpu_abort(CPU(cpu), "Unimplemented control register write 0x%x = 0x%x\n",
reg, val); reg, val);
} }
} }
@ -277,11 +280,13 @@ void m68k_switch_sp(CPUM68KState *env)
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
int cpu_m68k_handle_mmu_fault (CPUM68KState *env, target_ulong address, int rw, int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx) int mmu_idx)
{ {
env->exception_index = EXCP_ACCESS; M68kCPU *cpu = M68K_CPU(cs);
env->mmu.ar = address;
cs->exception_index = EXCP_ACCESS;
cpu->env.mmu.ar = address;
return 1; return 1;
} }
@ -295,14 +300,14 @@ hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return addr; return addr;
} }
int cpu_m68k_handle_mmu_fault (CPUM68KState *env, target_ulong address, int rw, int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx) int mmu_idx)
{ {
int prot; int prot;
address &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0; return 0;
} }

View file

@ -428,7 +428,8 @@ void do_m68k_semihosting(CPUM68KState *env, int nr)
case HOSTED_INIT_SIM: case HOSTED_INIT_SIM:
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
{ {
TaskState *ts = env->opaque; CPUState *cs = CPU(m68k_env_get_cpu(env));
TaskState *ts = cs->opaque;
/* Allocate the heap using sbrk. */ /* Allocate the heap using sbrk. */
if (!ts->heap_limit) { if (!ts->heap_limit) {
abi_ulong ret; abi_ulong ret;
@ -460,7 +461,7 @@ void do_m68k_semihosting(CPUM68KState *env, int nr)
#endif #endif
return; return;
default: default:
cpu_abort(env, "Unsupported semihosting syscall %d\n", nr); cpu_abort(CPU(m68k_env_get_cpu(env)), "Unsupported semihosting syscall %d\n", nr);
result = 0; result = 0;
} }
failed: failed:

View file

@ -23,10 +23,7 @@
void m68k_cpu_do_interrupt(CPUState *cs) void m68k_cpu_do_interrupt(CPUState *cs)
{ {
M68kCPU *cpu = M68K_CPU(cs); cs->exception_index = -1;
CPUM68KState *env = &cpu->env;
env->exception_index = -1;
} }
void do_interrupt_m68k_hardirq(CPUM68KState *env) void do_interrupt_m68k_hardirq(CPUM68KState *env)
@ -56,18 +53,18 @@ extern int semihosting_enabled;
/* Try to fill the TLB and return an exception if error. If retaddr is /* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */ from generated code or from helper.c) */
void tlb_fill(CPUM68KState *env, target_ulong addr, int is_write, int mmu_idx, void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr) uintptr_t retaddr)
{ {
int ret; int ret;
ret = cpu_m68k_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = m68k_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) { if (unlikely(ret)) {
if (retaddr) { if (retaddr) {
/* now we have a real cpu fault */ /* now we have a real cpu fault */
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
} }
@ -87,7 +84,7 @@ static void do_rte(CPUM68KState *env)
static void do_interrupt_all(CPUM68KState *env, int is_hw) static void do_interrupt_all(CPUM68KState *env, int is_hw)
{ {
CPUState *cs; CPUState *cs = CPU(m68k_env_get_cpu(env));
uint32_t sp; uint32_t sp;
uint32_t fmt; uint32_t fmt;
uint32_t retaddr; uint32_t retaddr;
@ -97,7 +94,7 @@ static void do_interrupt_all(CPUM68KState *env, int is_hw)
retaddr = env->pc; retaddr = env->pc;
if (!is_hw) { if (!is_hw) {
switch (env->exception_index) { switch (cs->exception_index) {
case EXCP_RTE: case EXCP_RTE:
/* Return from an exception. */ /* Return from an exception. */
do_rte(env); do_rte(env);
@ -112,20 +109,19 @@ static void do_interrupt_all(CPUM68KState *env, int is_hw)
do_m68k_semihosting(env, env->dregs[0]); do_m68k_semihosting(env, env->dregs[0]);
return; return;
} }
cs = CPU(m68k_env_get_cpu(env));
cs->halted = 1; cs->halted = 1;
env->exception_index = EXCP_HLT; cs->exception_index = EXCP_HLT;
cpu_loop_exit(env); cpu_loop_exit(cs);
return; return;
} }
if (env->exception_index >= EXCP_TRAP0 if (cs->exception_index >= EXCP_TRAP0
&& env->exception_index <= EXCP_TRAP15) { && cs->exception_index <= EXCP_TRAP15) {
/* Move the PC after the trap instruction. */ /* Move the PC after the trap instruction. */
retaddr += 2; retaddr += 2;
} }
} }
vector = env->exception_index << 2; vector = cs->exception_index << 2;
sp = env->aregs[7]; sp = env->aregs[7];
@ -168,8 +164,10 @@ void do_interrupt_m68k_hardirq(CPUM68KState *env)
static void raise_exception(CPUM68KState *env, int tt) static void raise_exception(CPUM68KState *env, int tt)
{ {
env->exception_index = tt; CPUState *cs = CPU(m68k_env_get_cpu(env));
cpu_loop_exit(env);
cs->exception_index = tt;
cpu_loop_exit(cs);
} }
void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt) void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)

View file

@ -7,6 +7,5 @@ DEFO32(CC_SRC, cc_src)
DEFO32(CC_X, cc_x) DEFO32(CC_X, cc_x)
DEFO32(DIV1, div1) DEFO32(DIV1, div1)
DEFO32(DIV2, div2) DEFO32(DIV2, div2)
DEFO32(EXCEPTION, exception_index)
DEFO32(MACSR, macsr) DEFO32(MACSR, macsr)
DEFO32(MAC_MASK, mac_mask) DEFO32(MAC_MASK, mac_mask)

View file

@ -43,6 +43,7 @@
#undef DEFF64 #undef DEFF64
static TCGv_i32 cpu_halted; static TCGv_i32 cpu_halted;
static TCGv_i32 cpu_exception_index;
static TCGv_ptr cpu_env; static TCGv_ptr cpu_env;
@ -81,6 +82,10 @@ void m68k_tcg_init(void)
cpu_halted = tcg_global_mem_new_i32(TCG_AREG0, cpu_halted = tcg_global_mem_new_i32(TCG_AREG0,
-offsetof(M68kCPU, env) + -offsetof(M68kCPU, env) +
offsetof(CPUState, halted), "HALTED"); offsetof(CPUState, halted), "HALTED");
cpu_exception_index = tcg_global_mem_new_i32(TCG_AREG0,
-offsetof(M68kCPU, env) +
offsetof(CPUState, exception_index),
"EXCEPTION");
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
@ -110,14 +115,6 @@ void m68k_tcg_init(void)
store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL"); store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL");
} }
static inline void qemu_assert(int cond, const char *msg)
{
if (!cond) {
fprintf (stderr, "badness: %s\n", msg);
abort();
}
}
/* internal defines */ /* internal defines */
typedef struct DisasContext { typedef struct DisasContext {
CPUM68KState *env; CPUM68KState *env;
@ -199,7 +196,7 @@ static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
tcg_gen_qemu_ld32u(tmp, addr, index); tcg_gen_qemu_ld32u(tmp, addr, index);
break; break;
default: default:
qemu_assert(0, "bad load size"); g_assert_not_reached();
} }
gen_throws_exception = gen_last_qop; gen_throws_exception = gen_last_qop;
return tmp; return tmp;
@ -233,7 +230,7 @@ static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
tcg_gen_qemu_st32(val, addr, index); tcg_gen_qemu_st32(val, addr, index);
break; break;
default: default:
qemu_assert(0, "bad store size"); g_assert_not_reached();
} }
gen_throws_exception = gen_last_qop; gen_throws_exception = gen_last_qop;
} }
@ -437,8 +434,7 @@ static inline int opsize_bytes(int opsize)
case OS_SINGLE: return 4; case OS_SINGLE: return 4;
case OS_DOUBLE: return 8; case OS_DOUBLE: return 8;
default: default:
qemu_assert(0, "bad operand size"); g_assert_not_reached();
return 0;
} }
} }
@ -465,8 +461,7 @@ static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
tcg_gen_mov_i32(reg, val); tcg_gen_mov_i32(reg, val);
break; break;
default: default:
qemu_assert(0, "Bad operand size"); g_assert_not_reached();
break;
} }
} }
@ -495,7 +490,7 @@ static inline TCGv gen_extend(TCGv val, int opsize, int sign)
tmp = val; tmp = val;
break; break;
default: default:
qemu_assert(0, "Bad operand size"); g_assert_not_reached();
} }
return tmp; return tmp;
} }
@ -669,7 +664,7 @@ static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
offset = read_im32(env, s); offset = read_im32(env, s);
break; break;
default: default:
qemu_assert(0, "Bad immediate operand"); g_assert_not_reached();
} }
return tcg_const_i32(offset); return tcg_const_i32(offset);
default: default:
@ -886,8 +881,10 @@ DISAS_INSN(undef_fpu)
DISAS_INSN(undef) DISAS_INSN(undef)
{ {
M68kCPU *cpu = m68k_env_get_cpu(env);
gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED); gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
cpu_abort(env, "Illegal instruction: %04x @ %08x", insn, s->pc - 2); cpu_abort(CPU(cpu), "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
} }
DISAS_INSN(mulw) DISAS_INSN(mulw)
@ -2087,12 +2084,14 @@ DISAS_INSN(wddata)
DISAS_INSN(wdebug) DISAS_INSN(wdebug)
{ {
M68kCPU *cpu = m68k_env_get_cpu(env);
if (IS_USER(s)) { if (IS_USER(s)) {
gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
return; return;
} }
/* TODO: Implement wdebug. */ /* TODO: Implement wdebug. */
qemu_assert(0, "WDEBUG not implemented"); cpu_abort(CPU(cpu), "WDEBUG not implemented");
} }
DISAS_INSN(trap) DISAS_INSN(trap)
@ -2466,14 +2465,18 @@ DISAS_INSN(fbcc)
DISAS_INSN(frestore) DISAS_INSN(frestore)
{ {
M68kCPU *cpu = m68k_env_get_cpu(env);
/* TODO: Implement frestore. */ /* TODO: Implement frestore. */
qemu_assert(0, "FRESTORE not implemented"); cpu_abort(CPU(cpu), "FRESTORE not implemented");
} }
DISAS_INSN(fsave) DISAS_INSN(fsave)
{ {
M68kCPU *cpu = m68k_env_get_cpu(env);
/* TODO: Implement fsave. */ /* TODO: Implement fsave. */
qemu_assert(0, "FSAVE not implemented"); cpu_abort(CPU(cpu), "FSAVE not implemented");
} }
static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper) static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
@ -3008,8 +3011,8 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
do { do {
pc_offset = dc->pc - pc_start; pc_offset = dc->pc - pc_start;
gen_throws_exception = NULL; gen_throws_exception = NULL;
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) { if (bp->pc == dc->pc) {
gen_exception(dc, dc->pc, EXCP_DEBUG); gen_exception(dc, dc->pc, EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP; dc->is_jmp = DISAS_JUMP;

View file

@ -34,6 +34,11 @@ static void mb_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.sregs[SR_PC] = value; cpu->env.sregs[SR_PC] = value;
} }
static bool mb_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
}
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
static void microblaze_cpu_set_irq(void *opaque, int irq, int level) static void microblaze_cpu_set_irq(void *opaque, int irq, int level)
{ {
@ -58,9 +63,9 @@ static void mb_cpu_reset(CPUState *s)
mcc->parent_reset(s); mcc->parent_reset(s);
memset(env, 0, offsetof(CPUMBState, breakpoints)); memset(env, 0, sizeof(CPUMBState));
env->res_addr = RES_ADDR_NONE; env->res_addr = RES_ADDR_NONE;
tlb_flush(env, 1); tlb_flush(s, 1);
/* Disable stack protector. */ /* Disable stack protector. */
env->shr = ~0; env->shr = ~0;
@ -160,12 +165,15 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
mcc->parent_reset = cc->reset; mcc->parent_reset = cc->reset;
cc->reset = mb_cpu_reset; cc->reset = mb_cpu_reset;
cc->has_work = mb_cpu_has_work;
cc->do_interrupt = mb_cpu_do_interrupt; cc->do_interrupt = mb_cpu_do_interrupt;
cc->dump_state = mb_cpu_dump_state; cc->dump_state = mb_cpu_dump_state;
cc->set_pc = mb_cpu_set_pc; cc->set_pc = mb_cpu_set_pc;
cc->gdb_read_register = mb_cpu_gdb_read_register; cc->gdb_read_register = mb_cpu_gdb_read_register;
cc->gdb_write_register = mb_cpu_gdb_write_register; cc->gdb_write_register = mb_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = mb_cpu_handle_mmu_fault;
#else
cc->do_unassigned_access = mb_cpu_unassigned_access; cc->do_unassigned_access = mb_cpu_unassigned_access;
cc->get_phys_page_debug = mb_cpu_get_phys_page_debug; cc->get_phys_page_debug = mb_cpu_get_phys_page_debug;
#endif #endif

View file

@ -332,9 +332,8 @@ static inline int cpu_mmu_index (CPUMBState *env)
return MMU_KERNEL_IDX; return MMU_KERNEL_IDX;
} }
int cpu_mb_handle_mmu_fault(CPUMBState *env, target_ulong address, int rw, int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx); int mmu_idx);
#define cpu_handle_mmu_fault cpu_mb_handle_mmu_fault
static inline int cpu_interrupts_enabled(CPUMBState *env) static inline int cpu_interrupts_enabled(CPUMBState *env)
{ {
@ -363,11 +362,6 @@ void mb_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
unsigned size); unsigned size);
#endif #endif
static inline bool cpu_has_work(CPUState *cpu)
{
return cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
}
#include "exec/exec-all.h" #include "exec/exec-all.h"
#endif #endif

View file

@ -31,26 +31,26 @@ void mb_cpu_do_interrupt(CPUState *cs)
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env; CPUMBState *env = &cpu->env;
env->exception_index = -1; cs->exception_index = -1;
env->res_addr = RES_ADDR_NONE; env->res_addr = RES_ADDR_NONE;
env->regs[14] = env->sregs[SR_PC]; env->regs[14] = env->sregs[SR_PC];
} }
int cpu_mb_handle_mmu_fault(CPUMBState * env, target_ulong address, int rw, int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx) int mmu_idx)
{ {
MicroBlazeCPU *cpu = mb_env_get_cpu(env); cs->exception_index = 0xaa;
cpu_dump_state(cs, stderr, fprintf, 0);
env->exception_index = 0xaa;
cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
return 1; return 1;
} }
#else /* !CONFIG_USER_ONLY */ #else /* !CONFIG_USER_ONLY */
int cpu_mb_handle_mmu_fault (CPUMBState *env, target_ulong address, int rw, int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx) int mmu_idx)
{ {
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
unsigned int hit; unsigned int hit;
unsigned int mmu_available; unsigned int mmu_available;
int r = 1; int r = 1;
@ -77,7 +77,7 @@ int cpu_mb_handle_mmu_fault (CPUMBState *env, target_ulong address, int rw,
DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n", DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot)); mmu_idx, vaddr, paddr, lu.prot));
tlb_set_page(env, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE); tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0; r = 0;
} else { } else {
env->sregs[SR_EAR] = address; env->sregs[SR_EAR] = address;
@ -97,18 +97,18 @@ int cpu_mb_handle_mmu_fault (CPUMBState *env, target_ulong address, int rw,
break; break;
} }
if (env->exception_index == EXCP_MMU) { if (cs->exception_index == EXCP_MMU) {
cpu_abort(env, "recursive faults\n"); cpu_abort(cs, "recursive faults\n");
} }
/* TLB miss. */ /* TLB miss. */
env->exception_index = EXCP_MMU; cs->exception_index = EXCP_MMU;
} }
} else { } else {
/* MMU disabled or not available. */ /* MMU disabled or not available. */
address &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK;
prot = PAGE_BITS; prot = PAGE_BITS;
tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0; r = 0;
} }
return r; return r;
@ -125,7 +125,7 @@ void mb_cpu_do_interrupt(CPUState *cs)
assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG))); assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
/* assert(env->sregs[SR_MSR] & (MSR_EE)); Only for HW exceptions. */ /* assert(env->sregs[SR_MSR] & (MSR_EE)); Only for HW exceptions. */
env->res_addr = RES_ADDR_NONE; env->res_addr = RES_ADDR_NONE;
switch (env->exception_index) { switch (cs->exception_index) {
case EXCP_HW_EXCP: case EXCP_HW_EXCP:
if (!(env->pvr.regs[0] & PVR0_USE_EXC_MASK)) { if (!(env->pvr.regs[0] & PVR0_USE_EXC_MASK)) {
qemu_log("Exception raised on system without exceptions!\n"); qemu_log("Exception raised on system without exceptions!\n");
@ -251,7 +251,7 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM); env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
env->sregs[SR_MSR] |= t; env->sregs[SR_MSR] |= t;
env->sregs[SR_MSR] |= MSR_BIP; env->sregs[SR_MSR] |= MSR_BIP;
if (env->exception_index == EXCP_HW_BREAK) { if (cs->exception_index == EXCP_HW_BREAK) {
env->regs[16] = env->sregs[SR_PC]; env->regs[16] = env->sregs[SR_PC];
env->sregs[SR_MSR] |= MSR_BIP; env->sregs[SR_MSR] |= MSR_BIP;
env->sregs[SR_PC] = cpu->base_vectors + 0x18; env->sregs[SR_PC] = cpu->base_vectors + 0x18;
@ -259,8 +259,8 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->sregs[SR_PC] = env->btarget; env->sregs[SR_PC] = env->btarget;
break; break;
default: default:
cpu_abort(env, "unhandled exception type=%d\n", cpu_abort(cs, "unhandled exception type=%d\n",
env->exception_index); cs->exception_index);
break; break;
} }
} }

View file

@ -34,6 +34,7 @@ static unsigned int tlb_decode_size(unsigned int f)
static void mmu_flush_idx(CPUMBState *env, unsigned int idx) static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
{ {
CPUState *cs = CPU(mb_env_get_cpu(env));
struct microblaze_mmu *mmu = &env->mmu; struct microblaze_mmu *mmu = &env->mmu;
unsigned int tlb_size; unsigned int tlb_size;
uint32_t tlb_tag, end, t; uint32_t tlb_tag, end, t;
@ -47,7 +48,7 @@ static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
end = tlb_tag + tlb_size; end = tlb_tag + tlb_size;
while (tlb_tag < end) { while (tlb_tag < end) {
tlb_flush_page(env, tlb_tag); tlb_flush_page(cs, tlb_tag);
tlb_tag += TARGET_PAGE_SIZE; tlb_tag += TARGET_PAGE_SIZE;
} }
} }
@ -218,6 +219,7 @@ uint32_t mmu_read(CPUMBState *env, uint32_t rn)
void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v) void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
{ {
MicroBlazeCPU *cpu = mb_env_get_cpu(env);
unsigned int i; unsigned int i;
D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn])); D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]));
@ -251,7 +253,7 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
/* Changes to the zone protection reg flush the QEMU TLB. /* Changes to the zone protection reg flush the QEMU TLB.
Fortunately, these are very uncommon. */ Fortunately, these are very uncommon. */
if (v != env->mmu.regs[rn]) { if (v != env->mmu.regs[rn]) {
tlb_flush(env, 1); tlb_flush(CPU(cpu), 1);
} }
env->mmu.regs[rn] = v; env->mmu.regs[rn] = v;
break; break;

View file

@ -39,20 +39,21 @@
#include "exec/softmmu_template.h" #include "exec/softmmu_template.h"
/* Try to fill the TLB and return an exception if error. If retaddr is /* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not * NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */ * from generated code or from helper.c)
void tlb_fill(CPUMBState *env, target_ulong addr, int is_write, int mmu_idx, */
void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr) uintptr_t retaddr)
{ {
int ret; int ret;
ret = cpu_mb_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = mb_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) { if (unlikely(ret)) {
if (retaddr) { if (retaddr) {
/* now we have a real cpu fault */ /* now we have a real cpu fault */
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
} }
#endif #endif
@ -94,8 +95,10 @@ uint32_t helper_get(uint32_t id, uint32_t ctrl)
void helper_raise_exception(CPUMBState *env, uint32_t index) void helper_raise_exception(CPUMBState *env, uint32_t index)
{ {
env->exception_index = index; CPUState *cs = CPU(mb_env_get_cpu(env));
cpu_loop_exit(env);
cs->exception_index = index;
cpu_loop_exit(cs);
} }
void helper_debug(CPUMBState *env) void helper_debug(CPUMBState *env)

View file

@ -56,7 +56,7 @@ static TCGv env_res_val;
/* This is the state at translation time. */ /* This is the state at translation time. */
typedef struct DisasContext { typedef struct DisasContext {
CPUMBState *env; MicroBlazeCPU *cpu;
target_ulong pc; target_ulong pc;
/* Decoder. */ /* Decoder. */
@ -327,8 +327,8 @@ static void dec_pattern(DisasContext *dc)
int l1; int l1;
if ((dc->tb_flags & MSR_EE_FLAG) if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) { && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
} }
@ -370,7 +370,7 @@ static void dec_pattern(DisasContext *dc)
} }
break; break;
default: default:
cpu_abort(dc->env, cpu_abort(CPU(dc->cpu),
"unsupported pattern insn opcode=%x\n", dc->opcode); "unsupported pattern insn opcode=%x\n", dc->opcode);
break; break;
} }
@ -441,9 +441,10 @@ static inline void msr_write(DisasContext *dc, TCGv v)
static void dec_msr(DisasContext *dc) static void dec_msr(DisasContext *dc)
{ {
CPUState *cs = CPU(dc->cpu);
TCGv t0, t1; TCGv t0, t1;
unsigned int sr, to, rn; unsigned int sr, to, rn;
int mem_index = cpu_mmu_index(dc->env); int mem_index = cpu_mmu_index(&dc->cpu->env);
sr = dc->imm & ((1 << 14) - 1); sr = dc->imm & ((1 << 14) - 1);
to = dc->imm & (1 << 14); to = dc->imm & (1 << 14);
@ -458,7 +459,7 @@ static void dec_msr(DisasContext *dc)
LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set", LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
dc->rd, dc->imm); dc->rd, dc->imm);
if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) { if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
/* nop??? */ /* nop??? */
return; return;
} }
@ -537,7 +538,7 @@ static void dec_msr(DisasContext *dc)
tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr)); tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
break; break;
default: default:
cpu_abort(dc->env, "unknown mts reg %x\n", sr); cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
break; break;
} }
} else { } else {
@ -586,7 +587,7 @@ static void dec_msr(DisasContext *dc)
cpu_env, offsetof(CPUMBState, pvr.regs[rn])); cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
break; break;
default: default:
cpu_abort(dc->env, "unknown mfs reg %x\n", sr); cpu_abort(cs, "unknown mfs reg %x\n", sr);
break; break;
} }
} }
@ -643,8 +644,8 @@ static void dec_mul(DisasContext *dc)
unsigned int subcode; unsigned int subcode;
if ((dc->tb_flags & MSR_EE_FLAG) if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) { && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
return; return;
@ -662,7 +663,7 @@ static void dec_mul(DisasContext *dc)
/* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */ /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
if (subcode >= 1 && subcode <= 3 if (subcode >= 1 && subcode <= 3
&& !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) { && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
/* nop??? */ /* nop??? */
} }
@ -684,7 +685,7 @@ static void dec_mul(DisasContext *dc)
t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
break; break;
default: default:
cpu_abort(dc->env, "unknown MUL insn %x\n", subcode); cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
break; break;
} }
done: done:
@ -700,8 +701,8 @@ static void dec_div(DisasContext *dc)
u = dc->imm & 2; u = dc->imm & 2;
LOG_DIS("div\n"); LOG_DIS("div\n");
if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) { && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
} }
@ -722,8 +723,8 @@ static void dec_barrel(DisasContext *dc)
unsigned int s, t; unsigned int s, t;
if ((dc->tb_flags & MSR_EE_FLAG) if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) { && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
return; return;
@ -752,9 +753,10 @@ static void dec_barrel(DisasContext *dc)
static void dec_bit(DisasContext *dc) static void dec_bit(DisasContext *dc)
{ {
CPUState *cs = CPU(dc->cpu);
TCGv t0; TCGv t0;
unsigned int op; unsigned int op;
int mem_index = cpu_mmu_index(dc->env); int mem_index = cpu_mmu_index(&dc->cpu->env);
op = dc->ir & ((1 << 9) - 1); op = dc->ir & ((1 << 9) - 1);
switch (op) { switch (op) {
@ -819,12 +821,12 @@ static void dec_bit(DisasContext *dc)
break; break;
case 0xe0: case 0xe0:
if ((dc->tb_flags & MSR_EE_FLAG) if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) { && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
} }
if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) { if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]); gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
} }
break; break;
@ -839,8 +841,8 @@ static void dec_bit(DisasContext *dc)
tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16); tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
break; break;
default: default:
cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n", cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
dc->pc, op, dc->rd, dc->ra, dc->rb); dc->pc, op, dc->rd, dc->ra, dc->rb);
break; break;
} }
} }
@ -933,7 +935,7 @@ static void dec_load(DisasContext *dc)
} }
if (size > 4 && (dc->tb_flags & MSR_EE_FLAG) if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) { && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
return; return;
@ -991,7 +993,7 @@ static void dec_load(DisasContext *dc)
} }
break; break;
default: default:
cpu_abort(dc->env, "Invalid reverse size\n"); cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
break; break;
} }
} }
@ -1018,9 +1020,9 @@ static void dec_load(DisasContext *dc)
* address and if that succeeds we write into the destination reg. * address and if that succeeds we write into the destination reg.
*/ */
v = tcg_temp_new(); v = tcg_temp_new();
tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(dc->env), mop); tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) { if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd), gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
tcg_const_tl(0), tcg_const_tl(size - 1)); tcg_const_tl(0), tcg_const_tl(size - 1));
@ -1063,7 +1065,7 @@ static void dec_store(DisasContext *dc)
} }
if (size > 4 && (dc->tb_flags & MSR_EE_FLAG) if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) { && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
return; return;
@ -1096,7 +1098,8 @@ static void dec_store(DisasContext *dc)
this compare and the following write to be atomic. For user this compare and the following write to be atomic. For user
emulation we need to add atomicity between threads. */ emulation we need to add atomicity between threads. */
tval = tcg_temp_new(); tval = tcg_temp_new();
tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(dc->env), MO_TEUL); tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
MO_TEUL);
tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip); tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
write_carryi(dc, 0); write_carryi(dc, 0);
tcg_temp_free(tval); tcg_temp_free(tval);
@ -1142,14 +1145,14 @@ static void dec_store(DisasContext *dc)
} }
break; break;
default: default:
cpu_abort(dc->env, "Invalid reverse size\n"); cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
break; break;
} }
} }
tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(dc->env), mop); tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
/* Verify alignment if needed. */ /* Verify alignment if needed. */
if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) { if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
/* FIXME: if the alignment is wrong, we should restore the value /* FIXME: if the alignment is wrong, we should restore the value
* in memory. One possible way to achieve this is to probe * in memory. One possible way to achieve this is to probe
@ -1193,7 +1196,7 @@ static inline void eval_cc(DisasContext *dc, unsigned int cc,
tcg_gen_setcond_tl(TCG_COND_GT, d, a, b); tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
break; break;
default: default:
cpu_abort(dc->env, "Unknown condition code %x.\n", cc); cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
break; break;
} }
} }
@ -1244,7 +1247,7 @@ static void dec_bcc(DisasContext *dc)
static void dec_br(DisasContext *dc) static void dec_br(DisasContext *dc)
{ {
unsigned int dslot, link, abs, mbar; unsigned int dslot, link, abs, mbar;
int mem_index = cpu_mmu_index(dc->env); int mem_index = cpu_mmu_index(&dc->cpu->env);
dslot = dc->ir & (1 << 20); dslot = dc->ir & (1 << 20);
abs = dc->ir & (1 << 19); abs = dc->ir & (1 << 19);
@ -1376,7 +1379,7 @@ static inline void do_rte(DisasContext *dc)
static void dec_rts(DisasContext *dc) static void dec_rts(DisasContext *dc)
{ {
unsigned int b_bit, i_bit, e_bit; unsigned int b_bit, i_bit, e_bit;
int mem_index = cpu_mmu_index(dc->env); int mem_index = cpu_mmu_index(&dc->cpu->env);
i_bit = dc->ir & (1 << 21); i_bit = dc->ir & (1 << 21);
b_bit = dc->ir & (1 << 22); b_bit = dc->ir & (1 << 22);
@ -1423,7 +1426,7 @@ static int dec_check_fpuv2(DisasContext *dc)
{ {
int r; int r;
r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK; r = dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU2_MASK;
if (!r && (dc->tb_flags & MSR_EE_FLAG)) { if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
@ -1437,8 +1440,8 @@ static void dec_fpu(DisasContext *dc)
unsigned int fpu_insn; unsigned int fpu_insn;
if ((dc->tb_flags & MSR_EE_FLAG) if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) { && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU_MASK))) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
return; return;
@ -1540,7 +1543,7 @@ static void dec_fpu(DisasContext *dc)
static void dec_null(DisasContext *dc) static void dec_null(DisasContext *dc)
{ {
if ((dc->tb_flags & MSR_EE_FLAG) if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) { && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
return; return;
@ -1552,7 +1555,7 @@ static void dec_null(DisasContext *dc)
/* Insns connected to FSL or AXI stream attached devices. */ /* Insns connected to FSL or AXI stream attached devices. */
static void dec_stream(DisasContext *dc) static void dec_stream(DisasContext *dc)
{ {
int mem_index = cpu_mmu_index(dc->env); int mem_index = cpu_mmu_index(&dc->cpu->env);
TCGv_i32 t_id, t_ctrl; TCGv_i32 t_id, t_ctrl;
int ctrl; int ctrl;
@ -1628,8 +1631,8 @@ static inline void decode(DisasContext *dc, uint32_t ir)
dc->nr_nops = 0; dc->nr_nops = 0;
else { else {
if ((dc->tb_flags & MSR_EE_FLAG) if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) { && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP); t_gen_raise_exception(dc, EXCP_HW_EXCP);
return; return;
@ -1637,8 +1640,9 @@ static inline void decode(DisasContext *dc, uint32_t ir)
LOG_DIS("nr_nops=%d\t", dc->nr_nops); LOG_DIS("nr_nops=%d\t", dc->nr_nops);
dc->nr_nops++; dc->nr_nops++;
if (dc->nr_nops > 4) if (dc->nr_nops > 4) {
cpu_abort(dc->env, "fetching nop sequence\n"); cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
}
} }
/* bit 2 seems to indicate insn type. */ /* bit 2 seems to indicate insn type. */
dc->type_b = ir & (1 << 29); dc->type_b = ir & (1 << 29);
@ -1660,10 +1664,11 @@ static inline void decode(DisasContext *dc, uint32_t ir)
static void check_breakpoint(CPUMBState *env, DisasContext *dc) static void check_breakpoint(CPUMBState *env, DisasContext *dc)
{ {
CPUState *cs = CPU(mb_env_get_cpu(env));
CPUBreakpoint *bp; CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) { if (bp->pc == dc->pc) {
t_gen_raise_exception(dc, EXCP_DEBUG); t_gen_raise_exception(dc, EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE; dc->is_jmp = DISAS_UPDATE;
@ -1690,7 +1695,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
int max_insns; int max_insns;
pc_start = tb->pc; pc_start = tb->pc;
dc->env = env; dc->cpu = cpu;
dc->tb = tb; dc->tb = tb;
org_flags = dc->synced_flags = dc->tb_flags = tb->flags; org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
@ -1708,8 +1713,9 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
dc->abort_at_next_insn = 0; dc->abort_at_next_insn = 0;
dc->nr_nops = 0; dc->nr_nops = 0;
if (pc_start & 3) if (pc_start & 3) {
cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start); cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
}
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
#if !SIM_COMPAT #if !SIM_COMPAT

View file

@ -45,6 +45,35 @@ static void mips_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
env->hflags |= tb->flags & MIPS_HFLAG_BMASK; env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
} }
static bool mips_cpu_has_work(CPUState *cs)
{
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
bool has_work = false;
/* It is implementation dependent if non-enabled interrupts
wake-up the CPU, however most of the implementations only
check for interrupts that can be taken. */
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
cpu_mips_hw_interrupts_pending(env)) {
has_work = true;
}
/* MIPS-MT has the ability to halt the CPU. */
if (env->CP0_Config3 & (1 << CP0C3_MT)) {
/* The QEMU model will issue an _WAKE request whenever the CPUs
should be woken up. */
if (cs->interrupt_request & CPU_INTERRUPT_WAKE) {
has_work = true;
}
if (!mips_vpe_active(env)) {
has_work = false;
}
}
return has_work;
}
/* CPUClass::reset() */ /* CPUClass::reset() */
static void mips_cpu_reset(CPUState *s) static void mips_cpu_reset(CPUState *s)
{ {
@ -54,8 +83,8 @@ static void mips_cpu_reset(CPUState *s)
mcc->parent_reset(s); mcc->parent_reset(s);
memset(env, 0, offsetof(CPUMIPSState, breakpoints)); memset(env, 0, offsetof(CPUMIPSState, mvp));
tlb_flush(env, 1); tlb_flush(s, 1);
cpu_state_reset(env); cpu_state_reset(env);
} }
@ -97,13 +126,16 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
mcc->parent_reset = cc->reset; mcc->parent_reset = cc->reset;
cc->reset = mips_cpu_reset; cc->reset = mips_cpu_reset;
cc->has_work = mips_cpu_has_work;
cc->do_interrupt = mips_cpu_do_interrupt; cc->do_interrupt = mips_cpu_do_interrupt;
cc->dump_state = mips_cpu_dump_state; cc->dump_state = mips_cpu_dump_state;
cc->set_pc = mips_cpu_set_pc; cc->set_pc = mips_cpu_set_pc;
cc->synchronize_from_tb = mips_cpu_synchronize_from_tb; cc->synchronize_from_tb = mips_cpu_synchronize_from_tb;
cc->gdb_read_register = mips_cpu_gdb_read_register; cc->gdb_read_register = mips_cpu_gdb_read_register;
cc->gdb_write_register = mips_cpu_gdb_write_register; cc->gdb_write_register = mips_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = mips_cpu_handle_mmu_fault;
#else
cc->do_unassigned_access = mips_cpu_unassigned_access; cc->do_unassigned_access = mips_cpu_unassigned_access;
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug; cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
#endif #endif

View file

@ -482,6 +482,7 @@ struct CPUMIPSState {
CPU_COMMON CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
CPUMIPSMVPContext *mvp; CPUMIPSMVPContext *mvp;
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
CPUMIPSTLBContext *tlb; CPUMIPSTLBContext *tlb;
@ -666,9 +667,8 @@ void cpu_mips_stop_count(CPUMIPSState *env);
void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level); void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level);
/* helper.c */ /* helper.c */
int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw, int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx); int mmu_idx);
#define cpu_handle_mmu_fault cpu_mips_handle_mmu_fault
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra); void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra);
hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address, hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address,
@ -715,34 +715,6 @@ static inline int mips_vpe_active(CPUMIPSState *env)
return active; return active;
} }
static inline bool cpu_has_work(CPUState *cpu)
{
CPUMIPSState *env = &MIPS_CPU(cpu)->env;
bool has_work = false;
/* It is implementation dependent if non-enabled interrupts
wake-up the CPU, however most of the implementations only
check for interrupts that can be taken. */
if ((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
cpu_mips_hw_interrupts_pending(env)) {
has_work = true;
}
/* MIPS-MT has the ability to halt the CPU. */
if (env->CP0_Config3 & (1 << CP0C3_MT)) {
/* The QEMU model will issue an _WAKE request whenever the CPUs
should be woken up. */
if (cpu->interrupt_request & CPU_INTERRUPT_WAKE) {
has_work = true;
}
if (!mips_vpe_active(env)) {
has_work = false;
}
}
return has_work;
}
#include "exec/exec-all.h" #include "exec/exec-all.h"
static inline void compute_hflags(CPUMIPSState *env) static inline void compute_hflags(CPUMIPSState *env)

View file

@ -204,6 +204,7 @@ static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
int rw, int tlb_error) int rw, int tlb_error)
{ {
CPUState *cs = CPU(mips_env_get_cpu(env));
int exception = 0, error_code = 0; int exception = 0, error_code = 0;
switch (tlb_error) { switch (tlb_error) {
@ -249,7 +250,7 @@ static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
((address & 0xC00000000000ULL) >> (55 - env->SEGBITS)) | ((address & 0xC00000000000ULL) >> (55 - env->SEGBITS)) |
((address & ((1ULL << env->SEGBITS) - 1) & 0xFFFFFFFFFFFFE000ULL) >> 9); ((address & ((1ULL << env->SEGBITS) - 1) & 0xFFFFFFFFFFFFE000ULL) >> 9);
#endif #endif
env->exception_index = exception; cs->exception_index = exception;
env->error_code = error_code; env->error_code = error_code;
} }
@ -268,9 +269,11 @@ hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
} }
#endif #endif
int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw, int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx) int mmu_idx)
{ {
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
hwaddr physical; hwaddr physical;
int prot; int prot;
@ -279,9 +282,9 @@ int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw,
int ret = 0; int ret = 0;
#if 0 #if 0
log_cpu_state(CPU(mips_env_get_cpu(env)), 0); log_cpu_state(cs, 0);
#endif #endif
qemu_log("%s pc " TARGET_FMT_lx " ad " TARGET_FMT_lx " rw %d mmu_idx %d\n", qemu_log("%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, env->active_tc.PC, address, rw, mmu_idx); __func__, env->active_tc.PC, address, rw, mmu_idx);
rw &= 1; rw &= 1;
@ -293,10 +296,11 @@ int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw,
access_type = ACCESS_INT; access_type = ACCESS_INT;
ret = get_physical_address(env, &physical, &prot, ret = get_physical_address(env, &physical, &prot,
address, rw, access_type); address, rw, access_type);
qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx " prot %d\n", qemu_log("%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
__func__, address, ret, physical, prot); " prot %d\n",
__func__, address, ret, physical, prot);
if (ret == TLBRET_MATCH) { if (ret == TLBRET_MATCH) {
tlb_set_page(env, address & TARGET_PAGE_MASK, tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE); mmu_idx, TARGET_PAGE_SIZE);
ret = 0; ret = 0;
@ -401,27 +405,29 @@ static void set_hflags_for_handler (CPUMIPSState *env)
void mips_cpu_do_interrupt(CPUState *cs) void mips_cpu_do_interrupt(CPUState *cs)
{ {
#if !defined(CONFIG_USER_ONLY)
MIPSCPU *cpu = MIPS_CPU(cs); MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env; CPUMIPSState *env = &cpu->env;
#if !defined(CONFIG_USER_ONLY)
target_ulong offset; target_ulong offset;
int cause = -1; int cause = -1;
const char *name; const char *name;
if (qemu_log_enabled() && env->exception_index != EXCP_EXT_INTERRUPT) { if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) {
if (env->exception_index < 0 || env->exception_index > EXCP_LAST) if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) {
name = "unknown"; name = "unknown";
else } else {
name = excp_names[env->exception_index]; name = excp_names[cs->exception_index];
}
qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n", qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n",
__func__, env->active_tc.PC, env->CP0_EPC, name); __func__, env->active_tc.PC, env->CP0_EPC, name);
} }
if (env->exception_index == EXCP_EXT_INTERRUPT && if (cs->exception_index == EXCP_EXT_INTERRUPT &&
(env->hflags & MIPS_HFLAG_DM)) (env->hflags & MIPS_HFLAG_DM)) {
env->exception_index = EXCP_DINT; cs->exception_index = EXCP_DINT;
}
offset = 0x180; offset = 0x180;
switch (env->exception_index) { switch (cs->exception_index) {
case EXCP_DSS: case EXCP_DSS:
env->CP0_Debug |= 1 << CP0DB_DSS; env->CP0_Debug |= 1 << CP0DB_DSS;
/* Debug single step cannot be raised inside a delay slot and /* Debug single step cannot be raised inside a delay slot and
@ -629,11 +635,11 @@ void mips_cpu_do_interrupt(CPUState *cs)
env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC); env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC);
break; break;
default: default:
qemu_log("Invalid MIPS exception %d. Exiting\n", env->exception_index); qemu_log("Invalid MIPS exception %d. Exiting\n", cs->exception_index);
printf("Invalid MIPS exception %d. Exiting\n", env->exception_index); printf("Invalid MIPS exception %d. Exiting\n", cs->exception_index);
exit(1); exit(1);
} }
if (qemu_log_enabled() && env->exception_index != EXCP_EXT_INTERRUPT) { if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) {
qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n" qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
" S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n", " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
__func__, env->active_tc.PC, env->CP0_EPC, cause, __func__, env->active_tc.PC, env->CP0_EPC, cause,
@ -641,12 +647,14 @@ void mips_cpu_do_interrupt(CPUState *cs)
env->CP0_DEPC); env->CP0_DEPC);
} }
#endif #endif
env->exception_index = EXCP_NONE; cs->exception_index = EXCP_NONE;
} }
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra) void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
{ {
MIPSCPU *cpu = mips_env_get_cpu(env);
CPUState *cs;
r4k_tlb_t *tlb; r4k_tlb_t *tlb;
target_ulong addr; target_ulong addr;
target_ulong end; target_ulong end;
@ -672,6 +680,7 @@ void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
/* 1k pages are not supported. */ /* 1k pages are not supported. */
mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
if (tlb->V0) { if (tlb->V0) {
cs = CPU(cpu);
addr = tlb->VPN & ~mask; addr = tlb->VPN & ~mask;
#if defined(TARGET_MIPS64) #if defined(TARGET_MIPS64)
if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
@ -680,11 +689,12 @@ void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
#endif #endif
end = addr | (mask >> 1); end = addr | (mask >> 1);
while (addr < end) { while (addr < end) {
tlb_flush_page (env, addr); tlb_flush_page(cs, addr);
addr += TARGET_PAGE_SIZE; addr += TARGET_PAGE_SIZE;
} }
} }
if (tlb->V1) { if (tlb->V1) {
cs = CPU(cpu);
addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
#if defined(TARGET_MIPS64) #if defined(TARGET_MIPS64)
if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
@ -693,7 +703,7 @@ void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
#endif #endif
end = addr | mask; end = addr | mask;
while (addr - 1 < end) { while (addr - 1 < end) {
tlb_flush_page (env, addr); tlb_flush_page(cs, addr);
addr += TARGET_PAGE_SIZE; addr += TARGET_PAGE_SIZE;
} }
} }

View file

@ -191,6 +191,7 @@ static void load_fpu(QEMUFile *f, CPUMIPSFPUContext *fpu)
int cpu_load(QEMUFile *f, void *opaque, int version_id) int cpu_load(QEMUFile *f, void *opaque, int version_id)
{ {
CPUMIPSState *env = opaque; CPUMIPSState *env = opaque;
MIPSCPU *cpu = mips_env_get_cpu(env);
int i; int i;
if (version_id != 3) if (version_id != 3)
@ -303,6 +304,6 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
load_fpu(f, &env->fpus[i]); load_fpu(f, &env->fpus[i]);
/* XXX: ensure compatibility for halted bit ? */ /* XXX: ensure compatibility for halted bit ? */
tlb_flush(env, 1); tlb_flush(CPU(cpu), 1);
return 0; return 0;
} }

View file

@ -38,18 +38,20 @@ static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
int error_code, int error_code,
uintptr_t pc) uintptr_t pc)
{ {
CPUState *cs = CPU(mips_env_get_cpu(env));
if (exception < EXCP_SC) { if (exception < EXCP_SC) {
qemu_log("%s: %d %d\n", __func__, exception, error_code); qemu_log("%s: %d %d\n", __func__, exception, error_code);
} }
env->exception_index = exception; cs->exception_index = exception;
env->error_code = error_code; env->error_code = error_code;
if (pc) { if (pc) {
/* now we have a real cpu fault */ /* now we have a real cpu fault */
cpu_restore_state(env, pc); cpu_restore_state(cs, pc);
} }
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env, static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
@ -278,7 +280,7 @@ static inline hwaddr do_translate_address(CPUMIPSState *env,
lladdr = cpu_mips_translate_address(env, address, rw); lladdr = cpu_mips_translate_address(env, address, rw);
if (lladdr == -1LL) { if (lladdr == -1LL) {
cpu_loop_exit(env); cpu_loop_exit(CPU(mips_env_get_cpu(env)));
} else { } else {
return lladdr; return lladdr;
} }
@ -1342,6 +1344,7 @@ void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
{ {
MIPSCPU *cpu = mips_env_get_cpu(env);
uint32_t val, old; uint32_t val, old;
uint32_t mask = env->CP0_Status_rw_bitmask; uint32_t mask = env->CP0_Status_rw_bitmask;
@ -1363,7 +1366,9 @@ void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
case MIPS_HFLAG_UM: qemu_log(", UM\n"); break; case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
case MIPS_HFLAG_SM: qemu_log(", SM\n"); break; case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
case MIPS_HFLAG_KM: qemu_log("\n"); break; case MIPS_HFLAG_KM: qemu_log("\n"); break;
default: cpu_abort(env, "Invalid MMU mode!\n"); break; default:
cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
break;
} }
} }
} }
@ -1782,8 +1787,10 @@ target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
/* TLB management */ /* TLB management */
static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global) static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
{ {
MIPSCPU *cpu = mips_env_get_cpu(env);
/* Flush qemu's TLB and discard all shadowed entries. */ /* Flush qemu's TLB and discard all shadowed entries. */
tlb_flush (env, flush_global); tlb_flush(CPU(cpu), flush_global);
env->tlb->tlb_in_use = env->tlb->nb_tlb; env->tlb->tlb_in_use = env->tlb->nb_tlb;
} }
@ -1983,6 +1990,8 @@ static void debug_pre_eret(CPUMIPSState *env)
static void debug_post_eret(CPUMIPSState *env) static void debug_post_eret(CPUMIPSState *env)
{ {
MIPSCPU *cpu = mips_env_get_cpu(env);
if (qemu_loglevel_mask(CPU_LOG_EXEC)) { if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
env->active_tc.PC, env->CP0_EPC); env->active_tc.PC, env->CP0_EPC);
@ -1994,7 +2003,9 @@ static void debug_post_eret(CPUMIPSState *env)
case MIPS_HFLAG_UM: qemu_log(", UM\n"); break; case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
case MIPS_HFLAG_SM: qemu_log(", SM\n"); break; case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
case MIPS_HFLAG_KM: qemu_log("\n"); break; case MIPS_HFLAG_KM: qemu_log("\n"); break;
default: cpu_abort(env, "Invalid MMU mode!\n"); break; default:
cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
break;
} }
} }
} }
@ -2143,14 +2154,17 @@ static void do_unaligned_access(CPUMIPSState *env, target_ulong addr,
do_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL, retaddr); do_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL, retaddr);
} }
void tlb_fill(CPUMIPSState *env, target_ulong addr, int is_write, int mmu_idx, void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr) uintptr_t retaddr)
{ {
int ret; int ret;
ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = mips_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (ret) { if (ret) {
do_raise_exception_err(env, env->exception_index, MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
do_raise_exception_err(env, cs->exception_index,
env->error_code, retaddr); env->error_code, retaddr);
} }
} }

View file

@ -15613,8 +15613,8 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx.mem_idx, ctx.hflags); LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx.mem_idx, ctx.hflags);
gen_tb_start(); gen_tb_start();
while (ctx.bstate == BS_NONE) { while (ctx.bstate == BS_NONE) {
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == ctx.pc) { if (bp->pc == ctx.pc) {
save_cpu_state(&ctx, 1); save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH; ctx.bstate = BS_BRANCH;
@ -15929,10 +15929,8 @@ MIPSCPU *cpu_mips_init(const char *cpu_model)
void cpu_state_reset(CPUMIPSState *env) void cpu_state_reset(CPUMIPSState *env)
{ {
#ifndef CONFIG_USER_ONLY
MIPSCPU *cpu = mips_env_get_cpu(env); MIPSCPU *cpu = mips_env_get_cpu(env);
CPUState *cs = CPU(cpu); CPUState *cs = CPU(cpu);
#endif
/* Reset registers to their default values */ /* Reset registers to their default values */
env->CP0_PRid = env->cpu_model->CP0_PRid; env->CP0_PRid = env->cpu_model->CP0_PRid;
@ -16063,7 +16061,7 @@ void cpu_state_reset(CPUMIPSState *env)
} }
#endif #endif
compute_hflags(env); compute_hflags(env);
env->exception_index = EXCP_NONE; cs->exception_index = EXCP_NONE;
} }
void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, int pc_pos) void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, int pc_pos)

View file

@ -629,6 +629,8 @@ static void r4k_mmu_init (CPUMIPSState *env, const mips_def_t *def)
static void mmu_init (CPUMIPSState *env, const mips_def_t *def) static void mmu_init (CPUMIPSState *env, const mips_def_t *def)
{ {
MIPSCPU *cpu = mips_env_get_cpu(env);
env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext)); env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
switch (def->mmu_type) { switch (def->mmu_type) {
@ -645,7 +647,7 @@ static void mmu_init (CPUMIPSState *env, const mips_def_t *def)
case MMU_TYPE_R6000: case MMU_TYPE_R6000:
case MMU_TYPE_R8000: case MMU_TYPE_R8000:
default: default:
cpu_abort(env, "MMU type not supported\n"); cpu_abort(CPU(cpu), "MMU type not supported\n");
} }
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */

View file

@ -29,6 +29,11 @@ static void moxie_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value; cpu->env.pc = value;
} }
static bool moxie_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & CPU_INTERRUPT_HARD;
}
static void moxie_cpu_reset(CPUState *s) static void moxie_cpu_reset(CPUState *s)
{ {
MoxieCPU *cpu = MOXIE_CPU(s); MoxieCPU *cpu = MOXIE_CPU(s);
@ -37,10 +42,10 @@ static void moxie_cpu_reset(CPUState *s)
mcc->parent_reset(s); mcc->parent_reset(s);
memset(env, 0, offsetof(CPUMoxieState, breakpoints)); memset(env, 0, sizeof(CPUMoxieState));
env->pc = 0x1000; env->pc = 0x1000;
tlb_flush(env, 1); tlb_flush(s, 1);
} }
static void moxie_cpu_realizefn(DeviceState *dev, Error **errp) static void moxie_cpu_realizefn(DeviceState *dev, Error **errp)
@ -99,10 +104,13 @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = moxie_cpu_class_by_name; cc->class_by_name = moxie_cpu_class_by_name;
cc->has_work = moxie_cpu_has_work;
cc->do_interrupt = moxie_cpu_do_interrupt; cc->do_interrupt = moxie_cpu_do_interrupt;
cc->dump_state = moxie_cpu_dump_state; cc->dump_state = moxie_cpu_dump_state;
cc->set_pc = moxie_cpu_set_pc; cc->set_pc = moxie_cpu_set_pc;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = moxie_cpu_handle_mmu_fault;
#else
cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug; cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_moxie_cpu; cc->vmsd = &vmstate_moxie_cpu;
#endif #endif
@ -130,18 +138,7 @@ static const MoxieCPUInfo moxie_cpus[] = {
MoxieCPU *cpu_moxie_init(const char *cpu_model) MoxieCPU *cpu_moxie_init(const char *cpu_model)
{ {
MoxieCPU *cpu; return MOXIE_CPU(cpu_generic_init(TYPE_MOXIE_CPU, cpu_model));
ObjectClass *oc;
oc = moxie_cpu_class_by_name(cpu_model);
if (oc == NULL) {
return NULL;
}
cpu = MOXIE_CPU(object_new(object_class_get_name(oc)));
object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
return cpu;
} }
static void cpu_register(const MoxieCPUInfo *info) static void cpu_register(const MoxieCPUInfo *info)

View file

@ -152,12 +152,7 @@ static inline void cpu_get_tb_cpu_state(CPUMoxieState *env, target_ulong *pc,
*flags = 0; *flags = 0;
} }
static inline int cpu_has_work(CPUState *cpu) int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
{
return cpu->interrupt_request & CPU_INTERRUPT_HARD;
}
int cpu_moxie_handle_mmu_fault(CPUMoxieState *env, target_ulong address,
int rw, int mmu_idx); int rw, int mmu_idx);
#endif /* _CPU_MOXIE_H */ #endif /* _CPU_MOXIE_H */

View file

@ -46,31 +46,33 @@
/* Try to fill the TLB and return an exception if error. If retaddr is /* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */ from generated code or from helper.c) */
void tlb_fill(CPUMoxieState *env, target_ulong addr, int is_write, int mmu_idx, void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr) uintptr_t retaddr)
{ {
int ret; int ret;
ret = cpu_moxie_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = moxie_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) { if (unlikely(ret)) {
if (retaddr) { if (retaddr) {
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
} }
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
void helper_raise_exception(CPUMoxieState *env, int ex) void helper_raise_exception(CPUMoxieState *env, int ex)
{ {
env->exception_index = ex; CPUState *cs = CPU(moxie_env_get_cpu(env));
cs->exception_index = ex;
/* Stash the exception type. */ /* Stash the exception type. */
env->sregs[2] = ex; env->sregs[2] = ex;
/* Stash the address where the exception occurred. */ /* Stash the address where the exception occurred. */
cpu_restore_state(env, GETPC()); cpu_restore_state(cs, GETPC());
env->sregs[5] = env->pc; env->sregs[5] = env->pc;
/* Jump the the exception handline routine. */ /* Jump the the exception handline routine. */
env->pc = env->sregs[1]; env->pc = env->sregs[1];
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
uint32_t helper_div(CPUMoxieState *env, uint32_t a, uint32_t b) uint32_t helper_div(CPUMoxieState *env, uint32_t a, uint32_t b)
@ -97,33 +99,39 @@ uint32_t helper_udiv(CPUMoxieState *env, uint32_t a, uint32_t b)
void helper_debug(CPUMoxieState *env) void helper_debug(CPUMoxieState *env)
{ {
env->exception_index = EXCP_DEBUG; CPUState *cs = CPU(moxie_env_get_cpu(env));
cpu_loop_exit(env);
cs->exception_index = EXCP_DEBUG;
cpu_loop_exit(cs);
} }
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
void moxie_cpu_do_interrupt(CPUState *env) void moxie_cpu_do_interrupt(CPUState *cs)
{ {
env->exception_index = -1; CPUState *cs = CPU(moxie_env_get_cpu(env));
cs->exception_index = -1;
} }
int cpu_moxie_handle_mmu_fault(CPUMoxieState *env, target_ulong address, int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int rw, int mmu_idx) int rw, int mmu_idx)
{ {
MoxieCPU *cpu = moxie_env_get_cpu(env); MoxieCPU *cpu = MOXIE_CPU(cs);
env->exception_index = 0xaa; cs->exception_index = 0xaa;
env->debug1 = address; cpu->env.debug1 = address;
cpu_dump_state(CPU(cpu), stderr, fprintf, 0); cpu_dump_state(cs, stderr, fprintf, 0);
return 1; return 1;
} }
#else /* !CONFIG_USER_ONLY */ #else /* !CONFIG_USER_ONLY */
int cpu_moxie_handle_mmu_fault(CPUMoxieState *env, target_ulong address, int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int rw, int mmu_idx) int rw, int mmu_idx)
{ {
MoxieCPU *cpu = MOXIE_CPU(cs);
CPUMoxieState *env = &cpu->env;
MoxieMMUResult res; MoxieMMUResult res;
int prot, miss; int prot, miss;
target_ulong phy; target_ulong phy;
@ -135,22 +143,19 @@ int cpu_moxie_handle_mmu_fault(CPUMoxieState *env, target_ulong address,
if (miss) { if (miss) {
/* handle the miss. */ /* handle the miss. */
phy = 0; phy = 0;
env->exception_index = MOXIE_EX_MMU_MISS; cs->exception_index = MOXIE_EX_MMU_MISS;
} else { } else {
phy = res.phy; phy = res.phy;
r = 0; r = 0;
} }
tlb_set_page(env, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE); tlb_set_page(cs, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE);
return r; return r;
} }
void moxie_cpu_do_interrupt(CPUState *cs) void moxie_cpu_do_interrupt(CPUState *cs)
{ {
MoxieCPU *cpu = MOXIE_CPU(cs); switch (cs->exception_index) {
CPUMoxieState *env = &cpu->env;
switch (env->exception_index) {
case MOXIE_EX_BREAK: case MOXIE_EX_BREAK:
break; break;
default: default:

View file

@ -845,8 +845,8 @@ gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
gen_tb_start(); gen_tb_start();
do { do {
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (ctx.pc == bp->pc) { if (ctx.pc == bp->pc) {
tcg_gen_movi_i32(cpu_pc, ctx.pc); tcg_gen_movi_i32(cpu_pc, ctx.pc);
gen_helper_debug(cpu_env); gen_helper_debug(cpu_env);

View file

@ -27,6 +27,12 @@ static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value; cpu->env.pc = value;
} }
static bool openrisc_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_TIMER);
}
/* CPUClass::reset() */ /* CPUClass::reset() */
static void openrisc_cpu_reset(CPUState *s) static void openrisc_cpu_reset(CPUState *s)
{ {
@ -35,14 +41,18 @@ static void openrisc_cpu_reset(CPUState *s)
occ->parent_reset(s); occ->parent_reset(s);
memset(&cpu->env, 0, offsetof(CPUOpenRISCState, breakpoints)); #ifndef CONFIG_USER_ONLY
memset(&cpu->env, 0, offsetof(CPUOpenRISCState, tlb));
#else
memset(&cpu->env, 0, offsetof(CPUOpenRISCState, irq));
#endif
tlb_flush(&cpu->env, 1); tlb_flush(s, 1);
/*tb_flush(&cpu->env); FIXME: Do we need it? */ /*tb_flush(&cpu->env); FIXME: Do we need it? */
cpu->env.pc = 0x100; cpu->env.pc = 0x100;
cpu->env.sr = SR_FO | SR_SM; cpu->env.sr = SR_FO | SR_SM;
cpu->env.exception_index = -1; s->exception_index = -1;
cpu->env.upr = UPR_UP | UPR_DMP | UPR_IMP | UPR_PICP | UPR_TTP; cpu->env.upr = UPR_UP | UPR_DMP | UPR_IMP | UPR_PICP | UPR_TTP;
cpu->env.cpucfgr = CPUCFGR_OB32S | CPUCFGR_OF32S; cpu->env.cpucfgr = CPUCFGR_OB32S | CPUCFGR_OF32S;
@ -153,12 +163,15 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = openrisc_cpu_reset; cc->reset = openrisc_cpu_reset;
cc->class_by_name = openrisc_cpu_class_by_name; cc->class_by_name = openrisc_cpu_class_by_name;
cc->has_work = openrisc_cpu_has_work;
cc->do_interrupt = openrisc_cpu_do_interrupt; cc->do_interrupt = openrisc_cpu_do_interrupt;
cc->dump_state = openrisc_cpu_dump_state; cc->dump_state = openrisc_cpu_dump_state;
cc->set_pc = openrisc_cpu_set_pc; cc->set_pc = openrisc_cpu_set_pc;
cc->gdb_read_register = openrisc_cpu_gdb_read_register; cc->gdb_read_register = openrisc_cpu_gdb_read_register;
cc->gdb_write_register = openrisc_cpu_gdb_write_register; cc->gdb_write_register = openrisc_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = openrisc_cpu_handle_mmu_fault;
#else
cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug; cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_openrisc_cpu; dc->vmsd = &vmstate_openrisc_cpu;
#endif #endif
@ -201,18 +214,7 @@ static void openrisc_cpu_register_types(void)
OpenRISCCPU *cpu_openrisc_init(const char *cpu_model) OpenRISCCPU *cpu_openrisc_init(const char *cpu_model)
{ {
OpenRISCCPU *cpu; return OPENRISC_CPU(cpu_generic_init(TYPE_OPENRISC_CPU, cpu_model));
ObjectClass *oc;
oc = openrisc_cpu_class_by_name(cpu_model);
if (oc == NULL) {
return NULL;
}
cpu = OPENRISC_CPU(object_new(object_class_get_name(oc)));
object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
return cpu;
} }
/* Sort alphabetically by type name, except for "any". */ /* Sort alphabetically by type name, except for "any". */

View file

@ -304,6 +304,7 @@ typedef struct CPUOpenRISCState {
CPU_COMMON CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
CPUOpenRISCTLBContext * tlb; CPUOpenRISCTLBContext * tlb;
@ -353,15 +354,13 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void openrisc_translate_init(void); void openrisc_translate_init(void);
int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env, int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
target_ulong address,
int rw, int mmu_idx); int rw, int mmu_idx);
int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc); int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
#define cpu_list cpu_openrisc_list #define cpu_list cpu_openrisc_list
#define cpu_exec cpu_openrisc_exec #define cpu_exec cpu_openrisc_exec
#define cpu_gen_code cpu_openrisc_gen_code #define cpu_gen_code cpu_openrisc_gen_code
#define cpu_handle_mmu_fault cpu_openrisc_handle_mmu_fault
#define cpu_signal_handler cpu_openrisc_signal_handler #define cpu_signal_handler cpu_openrisc_signal_handler
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
@ -419,11 +418,6 @@ static inline int cpu_mmu_index(CPUOpenRISCState *env)
} }
#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_INT_0 #define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_INT_0
static inline bool cpu_has_work(CPUState *cpu)
{
return cpu->interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_TIMER);
}
#include "exec/exec-all.h" #include "exec/exec-all.h"

View file

@ -22,6 +22,8 @@
void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp) void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp)
{ {
cpu->env.exception_index = excp; CPUState *cs = CPU(cpu);
cpu_loop_exit(&cpu->env);
cs->exception_index = excp;
cpu_loop_exit(cs);
} }

View file

@ -27,9 +27,9 @@
void openrisc_cpu_do_interrupt(CPUState *cs) void openrisc_cpu_do_interrupt(CPUState *cs)
{ {
#ifndef CONFIG_USER_ONLY
OpenRISCCPU *cpu = OPENRISC_CPU(cs); OpenRISCCPU *cpu = OPENRISC_CPU(cs);
CPUOpenRISCState *env = &cpu->env; CPUOpenRISCState *env = &cpu->env;
#ifndef CONFIG_USER_ONLY
env->epcr = env->pc; env->epcr = env->pc;
if (env->flags & D_FLAG) { if (env->flags & D_FLAG) {
@ -37,13 +37,13 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
env->sr |= SR_DSX; env->sr |= SR_DSX;
env->epcr -= 4; env->epcr -= 4;
} }
if (env->exception_index == EXCP_SYSCALL) { if (cs->exception_index == EXCP_SYSCALL) {
env->epcr += 4; env->epcr += 4;
} }
/* For machine-state changed between user-mode and supervisor mode, /* For machine-state changed between user-mode and supervisor mode,
we need flush TLB when we enter&exit EXCP. */ we need flush TLB when we enter&exit EXCP. */
tlb_flush(env, 1); tlb_flush(cs, 1);
env->esr = env->sr; env->esr = env->sr;
env->sr &= ~SR_DME; env->sr &= ~SR_DME;
@ -54,12 +54,12 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
env->tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu; env->tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu;
env->tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu; env->tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu;
if (env->exception_index > 0 && env->exception_index < EXCP_NR) { if (cs->exception_index > 0 && cs->exception_index < EXCP_NR) {
env->pc = (env->exception_index << 8); env->pc = (cs->exception_index << 8);
} else { } else {
cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
} }
#endif #endif
env->exception_index = -1; cs->exception_index = -1;
} }

View file

@ -51,7 +51,7 @@ void HELPER(rfe)(CPUOpenRISCState *env)
} }
if (need_flush_tlb) { if (need_flush_tlb) {
tlb_flush(&cpu->env, 1); tlb_flush(cs, 1);
} }
#endif #endif
cs->interrupt_request |= CPU_INTERRUPT_EXITTB; cs->interrupt_request |= CPU_INTERRUPT_EXITTB;

View file

@ -139,6 +139,7 @@ static void cpu_openrisc_raise_mmu_exception(OpenRISCCPU *cpu,
target_ulong address, target_ulong address,
int rw, int tlb_error) int rw, int tlb_error)
{ {
CPUState *cs = CPU(cpu);
int exception = 0; int exception = 0;
switch (tlb_error) { switch (tlb_error) {
@ -169,24 +170,24 @@ static void cpu_openrisc_raise_mmu_exception(OpenRISCCPU *cpu,
#endif #endif
} }
cpu->env.exception_index = exception; cs->exception_index = exception;
cpu->env.eear = address; cpu->env.eear = address;
} }
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env, int openrisc_cpu_handle_mmu_fault(CPUState *cs,
target_ulong address, int rw, int mmu_idx) vaddr address, int rw, int mmu_idx)
{ {
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int ret = 0; int ret = 0;
hwaddr physical = 0; hwaddr physical = 0;
int prot = 0; int prot = 0;
OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
ret = cpu_openrisc_get_phys_addr(cpu, &physical, &prot, ret = cpu_openrisc_get_phys_addr(cpu, &physical, &prot,
address, rw); address, rw);
if (ret == TLBRET_MATCH) { if (ret == TLBRET_MATCH) {
tlb_set_page(env, address & TARGET_PAGE_MASK, tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot, physical & TARGET_PAGE_MASK, prot,
mmu_idx, TARGET_PAGE_SIZE); mmu_idx, TARGET_PAGE_SIZE);
ret = 0; ret = 0;
@ -198,11 +199,11 @@ int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env,
return ret; return ret;
} }
#else #else
int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env, int openrisc_cpu_handle_mmu_fault(CPUState *cs,
target_ulong address, int rw, int mmu_idx) vaddr address, int rw, int mmu_idx)
{ {
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int ret = 0; int ret = 0;
OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
cpu_openrisc_raise_mmu_exception(cpu, address, rw, ret); cpu_openrisc_raise_mmu_exception(cpu, address, rw, ret);
ret = 1; ret = 1;

View file

@ -36,20 +36,20 @@
#define SHIFT 3 #define SHIFT 3
#include "exec/softmmu_template.h" #include "exec/softmmu_template.h"
void tlb_fill(CPUOpenRISCState *env, target_ulong addr, int is_write, void tlb_fill(CPUState *cs, target_ulong addr, int is_write,
int mmu_idx, uintptr_t retaddr) int mmu_idx, uintptr_t retaddr)
{ {
int ret; int ret;
ret = cpu_openrisc_handle_mmu_fault(env, addr, is_write, mmu_idx); ret = openrisc_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (ret) { if (ret) {
if (retaddr) { if (retaddr) {
/* now we have a real cpu fault. */ /* now we have a real cpu fault. */
cpu_restore_state(env, retaddr); cpu_restore_state(cs, retaddr);
} }
/* Raise Exception. */ /* Raise Exception. */
cpu_loop_exit(env); cpu_loop_exit(cs);
} }
} }
#endif #endif

Some files were not shown because too many files have changed in this diff Show more