TCG patch queue

-----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJZ8FmqAAoJEGTfOOivfiFf/78IALolAxDqnbfN5moh76OEy7++
 somg/CahMYl3rIR93bN8QMrNn72evPxdr9OVAjTXy/QTDbK8WDZ6xQ0yzhiNaD5+
 swYuhffcAq4djw6kVkuGB0fDpjF6tRvVP955JYsUp49u06uqKiWYTbwCSAlHKfvP
 yIIn/yOgDwaLFs10fTo+WrxEuSpRKxOGrrYIX3h+zX+cdlOifPAG8SxxKSJKL6OG
 wcKKQjLFpNmRbhqaoUMqD5Q5LebCvdl7Z0HSUakAgp8NVqART7Ix5BzweCP8GL5z
 9qO8Phrgeu9Uz0dTxC+7WTrYDrWvxWmxlbOIy79fVUIt2Z5kHNj7SEWj60cDM8Q=
 =PYec
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20171025' into staging

TCG patch queue

# gpg: Signature made Wed 25 Oct 2017 10:30:18 BST
# gpg:                using RSA key 0x64DF38E8AF7E215F
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>"
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20171025: (51 commits)
  translate-all: exit from tb_phys_invalidate if qht_remove fails
  tcg: Initialize cpu_env generically
  tcg: enable multiple TCG contexts in softmmu
  tcg: introduce regions to split code_gen_buffer
  translate-all: use qemu_protect_rwx/none helpers
  osdep: introduce qemu_mprotect_rwx/none
  tcg: allocate optimizer temps with tcg_malloc
  tcg: distribute profiling counters across TCGContext's
  tcg: introduce **tcg_ctxs to keep track of all TCGContext's
  gen-icount: fold exitreq_label into TCGContext
  tcg: define tcg_init_ctx and make tcg_ctx a pointer
  tcg: take tb_ctx out of TCGContext
  translate-all: report correct avg host TB size
  exec-all: rename tb_free to tb_remove
  translate-all: use a binary search tree to track TBs in TBContext
  tcg: Remove CF_IGNORE_ICOUNT
  tcg: Add CF_LAST_IO + CF_USE_ICOUNT to CF_HASH_MASK
  cpu-exec: lookup/generate TB outside exclusive region during step_atomic
  tcg: check CF_PARALLEL instead of parallel_cpus
  target/sparc: check CF_PARALLEL instead of parallel_cpus
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2017-10-25 16:38:57 +01:00
commit ae49fbbcd8
80 changed files with 2292 additions and 1820 deletions

View file

@ -198,16 +198,19 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
TranslationBlock *orig_tb, bool ignore_icount) TranslationBlock *orig_tb, bool ignore_icount)
{ {
TranslationBlock *tb; TranslationBlock *tb;
uint32_t cflags = curr_cflags() | CF_NOCACHE;
if (ignore_icount) {
cflags &= ~CF_USE_ICOUNT;
}
/* Should never happen. /* Should never happen.
We only end up here when an existing TB is too long. */ We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK) cflags |= MIN(max_cycles, CF_COUNT_MASK);
max_cycles = CF_COUNT_MASK;
tb_lock(); tb_lock();
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
max_cycles | CF_NOCACHE orig_tb->flags, cflags);
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
tb->orig_tb = orig_tb; tb->orig_tb = orig_tb;
tb_unlock(); tb_unlock();
@ -217,39 +220,45 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
tb_lock(); tb_lock();
tb_phys_invalidate(tb, -1); tb_phys_invalidate(tb, -1);
tb_free(tb); tb_remove(tb);
tb_unlock(); tb_unlock();
} }
#endif #endif
static void cpu_exec_step(CPUState *cpu) void cpu_exec_step_atomic(CPUState *cpu)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; target_ulong cs_base, pc;
uint32_t flags; uint32_t flags;
uint32_t cflags = 1;
uint32_t cf_mask = cflags & CF_HASH_MASK;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
if (sigsetjmp(cpu->jmp_env, 0) == 0) { if (sigsetjmp(cpu->jmp_env, 0) == 0) {
mmap_lock(); tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
tb_lock(); if (tb == NULL) {
tb = tb_gen_code(cpu, pc, cs_base, flags, mmap_lock();
1 | CF_NOCACHE | CF_IGNORE_ICOUNT); tb_lock();
tb->orig_tb = NULL; tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
tb_unlock(); if (likely(tb == NULL)) {
mmap_unlock(); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
}
tb_unlock();
mmap_unlock();
}
start_exclusive();
/* Since we got here, we know that parallel_cpus must be true. */
parallel_cpus = false;
cc->cpu_exec_enter(cpu); cc->cpu_exec_enter(cpu);
/* execute the generated code */ /* execute the generated code */
trace_exec_tb_nocache(tb, pc); trace_exec_tb(tb, pc);
cpu_tb_exec(cpu, tb); cpu_tb_exec(cpu, tb);
cc->cpu_exec_exit(cpu); cc->cpu_exec_exit(cpu);
parallel_cpus = true;
tb_lock(); end_exclusive();
tb_phys_invalidate(tb, -1);
tb_free(tb);
tb_unlock();
} else { } else {
/* We may have exited due to another problem here, so we need /* We may have exited due to another problem here, so we need
* to reset any tb_locks we may have taken but didn't release. * to reset any tb_locks we may have taken but didn't release.
@ -263,24 +272,13 @@ static void cpu_exec_step(CPUState *cpu)
} }
} }
void cpu_exec_step_atomic(CPUState *cpu)
{
start_exclusive();
/* Since we got here, we know that parallel_cpus must be true. */
parallel_cpus = false;
cpu_exec_step(cpu);
parallel_cpus = true;
end_exclusive();
}
struct tb_desc { struct tb_desc {
target_ulong pc; target_ulong pc;
target_ulong cs_base; target_ulong cs_base;
CPUArchState *env; CPUArchState *env;
tb_page_addr_t phys_page1; tb_page_addr_t phys_page1;
uint32_t flags; uint32_t flags;
uint32_t cf_mask;
uint32_t trace_vcpu_dstate; uint32_t trace_vcpu_dstate;
}; };
@ -294,7 +292,7 @@ static bool tb_cmp(const void *p, const void *d)
tb->cs_base == desc->cs_base && tb->cs_base == desc->cs_base &&
tb->flags == desc->flags && tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
!(atomic_read(&tb->cflags) & CF_INVALID)) { (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
/* check next page if needed */ /* check next page if needed */
if (tb->page_addr[1] == -1) { if (tb->page_addr[1] == -1) {
return true; return true;
@ -313,7 +311,8 @@ static bool tb_cmp(const void *p, const void *d)
} }
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags) target_ulong cs_base, uint32_t flags,
uint32_t cf_mask)
{ {
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc;
struct tb_desc desc; struct tb_desc desc;
@ -322,12 +321,13 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
desc.env = (CPUArchState *)cpu->env_ptr; desc.env = (CPUArchState *)cpu->env_ptr;
desc.cs_base = cs_base; desc.cs_base = cs_base;
desc.flags = flags; desc.flags = flags;
desc.cf_mask = cf_mask;
desc.trace_vcpu_dstate = *cpu->trace_dstate; desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc; desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc); phys_pc = get_page_addr_code(desc.env, pc);
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_hash_func(phys_pc, pc, flags, *cpu->trace_dstate); h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h); return qht_lookup(&tb_ctx.htable, tb_cmp, &desc, h);
} }
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
@ -367,14 +367,14 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
static inline TranslationBlock *tb_find(CPUState *cpu, static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *last_tb, TranslationBlock *last_tb,
int tb_exit) int tb_exit, uint32_t cf_mask)
{ {
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; target_ulong cs_base, pc;
uint32_t flags; uint32_t flags;
bool acquired_tb_lock = false; bool acquired_tb_lock = false;
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags); tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
if (tb == NULL) { if (tb == NULL) {
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. As system emulation is currently * taken outside tb_lock. As system emulation is currently
@ -387,10 +387,10 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
/* There's a chance that our desired tb has been translated while /* There's a chance that our desired tb has been translated while
* taking the locks so we check again inside the lock. * taking the locks so we check again inside the lock.
*/ */
tb = tb_htable_lookup(cpu, pc, cs_base, flags); tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
if (likely(tb == NULL)) { if (likely(tb == NULL)) {
/* if no translated code available, then translate it now */ /* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, flags, 0); tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
} }
mmap_unlock(); mmap_unlock();
@ -500,7 +500,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
} else if (replay_has_exception() } else if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) { && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */ /* try to cause an exception pending in the log */
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true); cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
*ret = -1; *ret = -1;
return true; return true;
#endif #endif
@ -696,7 +696,21 @@ int cpu_exec(CPUState *cpu)
int tb_exit = 0; int tb_exit = 0;
while (!cpu_handle_interrupt(cpu, &last_tb)) { while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit); uint32_t cflags = cpu->cflags_next_tb;
TranslationBlock *tb;
/* When requested, use an exact setting for cflags for the next
execution. This is used for icount, precise smc, and stop-
after-access watchpoints. Since this request should never
have CF_INVALID set, -1 is a convenient invalid value that
does not require tcg headers for cpu_common_reset. */
if (cflags == -1) {
cflags = curr_cflags();
} else {
cpu->cflags_next_tb = -1;
}
tb = tb_find(cpu, last_tb, tb_exit, cflags);
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks /* Try to align the host and virtual clocks
if the guest is in advance */ if the guest is in advance */

View file

@ -151,9 +151,9 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env)
target_ulong cs_base, pc; target_ulong cs_base, pc;
uint32_t flags; uint32_t flags;
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags); tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags());
if (tb == NULL) { if (tb == NULL) {
return tcg_ctx.code_gen_epilogue; return tcg_ctx->code_gen_epilogue;
} }
qemu_log_mask_and_addr(CPU_LOG_EXEC, pc, qemu_log_mask_and_addr(CPU_LOG_EXEC, pc,
"Chain %p [%d: " TARGET_FMT_lx "] %s\n", "Chain %p [%d: " TARGET_FMT_lx "] %s\n",

View file

@ -153,7 +153,9 @@ static int v_l2_levels;
static void *l1_map[V_L1_MAX_SIZE]; static void *l1_map[V_L1_MAX_SIZE];
/* code generation context */ /* code generation context */
TCGContext tcg_ctx; TCGContext tcg_init_ctx;
__thread TCGContext *tcg_ctx;
TBContext tb_ctx;
bool parallel_cpus; bool parallel_cpus;
/* translation block context */ /* translation block context */
@ -185,7 +187,7 @@ static void page_table_config_init(void)
void tb_lock(void) void tb_lock(void)
{ {
assert_tb_unlocked(); assert_tb_unlocked();
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); qemu_mutex_lock(&tb_ctx.tb_lock);
have_tb_lock++; have_tb_lock++;
} }
@ -193,13 +195,13 @@ void tb_unlock(void)
{ {
assert_tb_locked(); assert_tb_locked();
have_tb_lock--; have_tb_lock--;
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); qemu_mutex_unlock(&tb_ctx.tb_lock);
} }
void tb_lock_reset(void) void tb_lock_reset(void)
{ {
if (have_tb_lock) { if (have_tb_lock) {
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); qemu_mutex_unlock(&tb_ctx.tb_lock);
have_tb_lock = 0; have_tb_lock = 0;
} }
} }
@ -208,7 +210,7 @@ static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
void cpu_gen_init(void) void cpu_gen_init(void)
{ {
tcg_context_init(&tcg_ctx); tcg_context_init(&tcg_init_ctx);
} }
/* Encode VAL as a signed leb128 sequence at P. /* Encode VAL as a signed leb128 sequence at P.
@ -266,12 +268,10 @@ static target_long decode_sleb128(uint8_t **pp)
static int encode_search(TranslationBlock *tb, uint8_t *block) static int encode_search(TranslationBlock *tb, uint8_t *block)
{ {
uint8_t *highwater = tcg_ctx.code_gen_highwater; uint8_t *highwater = tcg_ctx->code_gen_highwater;
uint8_t *p = block; uint8_t *p = block;
int i, j, n; int i, j, n;
tb->tc.search = block;
for (i = 0, n = tb->icount; i < n; ++i) { for (i = 0, n = tb->icount; i < n; ++i) {
target_ulong prev; target_ulong prev;
@ -279,12 +279,12 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
if (i == 0) { if (i == 0) {
prev = (j == 0 ? tb->pc : 0); prev = (j == 0 ? tb->pc : 0);
} else { } else {
prev = tcg_ctx.gen_insn_data[i - 1][j]; prev = tcg_ctx->gen_insn_data[i - 1][j];
} }
p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev); p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
} }
prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]); prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev); p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
/* Test for (pending) buffer overflow. The assumption is that any /* Test for (pending) buffer overflow. The assumption is that any
one row beginning below the high water mark cannot overrun one row beginning below the high water mark cannot overrun
@ -307,9 +307,10 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
uintptr_t host_pc = (uintptr_t)tb->tc.ptr; uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
uint8_t *p = tb->tc.search; uint8_t *p = tb->tc.ptr + tb->tc.size;
int i, j, num_insns = tb->icount; int i, j, num_insns = tb->icount;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
int64_t ti = profile_getclock(); int64_t ti = profile_getclock();
#endif #endif
@ -344,8 +345,9 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
restore_state_to_opc(env, tb, data); restore_state_to_opc(env, tb, data);
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
tcg_ctx.restore_time += profile_getclock() - ti; atomic_set(&prof->restore_time,
tcg_ctx.restore_count++; prof->restore_time + profile_getclock() - ti);
atomic_set(&prof->restore_count, prof->restore_count + 1);
#endif #endif
return 0; return 0;
} }
@ -375,7 +377,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
if (tb->cflags & CF_NOCACHE) { if (tb->cflags & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */ /* one-shot translation, invalidate it immediately */
tb_phys_invalidate(tb, -1); tb_phys_invalidate(tb, -1);
tb_free(tb); tb_remove(tb);
} }
r = true; r = true;
} }
@ -591,7 +593,7 @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
buf1 = buf2; buf1 = buf2;
} }
tcg_ctx.code_gen_buffer_size = size1; tcg_ctx->code_gen_buffer_size = size1;
return buf1; return buf1;
} }
#endif #endif
@ -600,75 +602,35 @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN))); __attribute__((aligned(CODE_GEN_ALIGN)));
# ifdef _WIN32
static inline void do_protect(void *addr, long size, int prot)
{
DWORD old_protect;
VirtualProtect(addr, size, prot, &old_protect);
}
static inline void map_exec(void *addr, long size)
{
do_protect(addr, size, PAGE_EXECUTE_READWRITE);
}
static inline void map_none(void *addr, long size)
{
do_protect(addr, size, PAGE_NOACCESS);
}
# else
static inline void do_protect(void *addr, long size, int prot)
{
uintptr_t start, end;
start = (uintptr_t)addr;
start &= qemu_real_host_page_mask;
end = (uintptr_t)addr + size;
end = ROUND_UP(end, qemu_real_host_page_size);
mprotect((void *)start, end - start, prot);
}
static inline void map_exec(void *addr, long size)
{
do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
}
static inline void map_none(void *addr, long size)
{
do_protect(addr, size, PROT_NONE);
}
# endif /* WIN32 */
static inline void *alloc_code_gen_buffer(void) static inline void *alloc_code_gen_buffer(void)
{ {
void *buf = static_code_gen_buffer; void *buf = static_code_gen_buffer;
size_t full_size, size; void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
size_t size;
/* The size of the buffer, rounded down to end on a page boundary. */ /* page-align the beginning and end of the buffer */
full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer)) buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
& qemu_real_host_page_mask) - (uintptr_t)buf; end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
/* Reserve a guard page. */ size = end - buf;
size = full_size - qemu_real_host_page_size;
/* Honor a command-line option limiting the size of the buffer. */ /* Honor a command-line option limiting the size of the buffer. */
if (size > tcg_ctx.code_gen_buffer_size) { if (size > tcg_ctx->code_gen_buffer_size) {
size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size) size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
& qemu_real_host_page_mask) - (uintptr_t)buf; qemu_real_host_page_size);
} }
tcg_ctx.code_gen_buffer_size = size; tcg_ctx->code_gen_buffer_size = size;
#ifdef __mips__ #ifdef __mips__
if (cross_256mb(buf, size)) { if (cross_256mb(buf, size)) {
buf = split_cross_256mb(buf, size); buf = split_cross_256mb(buf, size);
size = tcg_ctx.code_gen_buffer_size; size = tcg_ctx->code_gen_buffer_size;
} }
#endif #endif
map_exec(buf, size); if (qemu_mprotect_rwx(buf, size)) {
map_none(buf + size, qemu_real_host_page_size); abort();
}
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
return buf; return buf;
@ -676,26 +638,20 @@ static inline void *alloc_code_gen_buffer(void)
#elif defined(_WIN32) #elif defined(_WIN32)
static inline void *alloc_code_gen_buffer(void) static inline void *alloc_code_gen_buffer(void)
{ {
size_t size = tcg_ctx.code_gen_buffer_size; size_t size = tcg_ctx->code_gen_buffer_size;
void *buf1, *buf2; void *buf;
/* Perform the allocation in two steps, so that the guard page buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
is reserved but uncommitted. */ PAGE_EXECUTE_READWRITE);
buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size, return buf;
MEM_RESERVE, PAGE_NOACCESS);
if (buf1 != NULL) {
buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
assert(buf1 == buf2);
}
return buf1;
} }
#else #else
static inline void *alloc_code_gen_buffer(void) static inline void *alloc_code_gen_buffer(void)
{ {
int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
int flags = MAP_PRIVATE | MAP_ANONYMOUS; int flags = MAP_PRIVATE | MAP_ANONYMOUS;
uintptr_t start = 0; uintptr_t start = 0;
size_t size = tcg_ctx.code_gen_buffer_size; size_t size = tcg_ctx->code_gen_buffer_size;
void *buf; void *buf;
/* Constrain the position of the buffer based on the host cpu. /* Constrain the position of the buffer based on the host cpu.
@ -712,7 +668,7 @@ static inline void *alloc_code_gen_buffer(void)
flags |= MAP_32BIT; flags |= MAP_32BIT;
/* Cannot expect to map more than 800MB in low memory. */ /* Cannot expect to map more than 800MB in low memory. */
if (size > 800u * 1024 * 1024) { if (size > 800u * 1024 * 1024) {
tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024; tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
} }
# elif defined(__sparc__) # elif defined(__sparc__)
start = 0x40000000ul; start = 0x40000000ul;
@ -726,8 +682,7 @@ static inline void *alloc_code_gen_buffer(void)
# endif # endif
# endif # endif
buf = mmap((void *)start, size + qemu_real_host_page_size, buf = mmap((void *)start, size, prot, flags, -1, 0);
PROT_NONE, flags, -1, 0);
if (buf == MAP_FAILED) { if (buf == MAP_FAILED) {
return NULL; return NULL;
} }
@ -737,24 +692,23 @@ static inline void *alloc_code_gen_buffer(void)
/* Try again, with the original still mapped, to avoid re-acquiring /* Try again, with the original still mapped, to avoid re-acquiring
that 256mb crossing. This time don't specify an address. */ that 256mb crossing. This time don't specify an address. */
size_t size2; size_t size2;
void *buf2 = mmap(NULL, size + qemu_real_host_page_size, void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
PROT_NONE, flags, -1, 0);
switch ((int)(buf2 != MAP_FAILED)) { switch ((int)(buf2 != MAP_FAILED)) {
case 1: case 1:
if (!cross_256mb(buf2, size)) { if (!cross_256mb(buf2, size)) {
/* Success! Use the new buffer. */ /* Success! Use the new buffer. */
munmap(buf, size + qemu_real_host_page_size); munmap(buf, size);
break; break;
} }
/* Failure. Work with what we had. */ /* Failure. Work with what we had. */
munmap(buf2, size + qemu_real_host_page_size); munmap(buf2, size);
/* fallthru */ /* fallthru */
default: default:
/* Split the original buffer. Free the smaller half. */ /* Split the original buffer. Free the smaller half. */
buf2 = split_cross_256mb(buf, size); buf2 = split_cross_256mb(buf, size);
size2 = tcg_ctx.code_gen_buffer_size; size2 = tcg_ctx->code_gen_buffer_size;
if (buf == buf2) { if (buf == buf2) {
munmap(buf + size2 + qemu_real_host_page_size, size - size2); munmap(buf + size2, size - size2);
} else { } else {
munmap(buf, size - size2); munmap(buf, size - size2);
} }
@ -765,10 +719,6 @@ static inline void *alloc_code_gen_buffer(void)
} }
#endif #endif
/* Make the final buffer accessible. The guard page at the end
will remain inaccessible with PROT_NONE. */
mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
/* Request large pages for the buffer. */ /* Request large pages for the buffer. */
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
@ -776,31 +726,65 @@ static inline void *alloc_code_gen_buffer(void)
} }
#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
/* compare a pointer @ptr and a tb_tc @s */
static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
{
if (ptr >= s->ptr + s->size) {
return 1;
} else if (ptr < s->ptr) {
return -1;
}
return 0;
}
static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
{
const struct tb_tc *a = ap;
const struct tb_tc *b = bp;
/*
* When both sizes are set, we know this isn't a lookup.
* This is the most likely case: every TB must be inserted; lookups
* are a lot less frequent.
*/
if (likely(a->size && b->size)) {
if (a->ptr > b->ptr) {
return 1;
} else if (a->ptr < b->ptr) {
return -1;
}
/* a->ptr == b->ptr should happen only on deletions */
g_assert(a->size == b->size);
return 0;
}
/*
* All lookups have either .size field set to 0.
* From the glib sources we see that @ap is always the lookup key. However
* the docs provide no guarantee, so we just mark this case as likely.
*/
if (likely(a->size == 0)) {
return ptr_cmp_tb_tc(a->ptr, b);
}
return ptr_cmp_tb_tc(b->ptr, a);
}
static inline void code_gen_alloc(size_t tb_size) static inline void code_gen_alloc(size_t tb_size)
{ {
tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size); tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
tcg_ctx.code_gen_buffer = alloc_code_gen_buffer(); tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
if (tcg_ctx.code_gen_buffer == NULL) { if (tcg_ctx->code_gen_buffer == NULL) {
fprintf(stderr, "Could not allocate dynamic translator buffer\n"); fprintf(stderr, "Could not allocate dynamic translator buffer\n");
exit(1); exit(1);
} }
tb_ctx.tb_tree = g_tree_new(tb_tc_cmp);
/* size this conservatively -- realloc later if needed */ qemu_mutex_init(&tb_ctx.tb_lock);
tcg_ctx.tb_ctx.tbs_size =
tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE / 8;
if (unlikely(!tcg_ctx.tb_ctx.tbs_size)) {
tcg_ctx.tb_ctx.tbs_size = 64 * 1024;
}
tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock *, tcg_ctx.tb_ctx.tbs_size);
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
} }
static void tb_htable_init(void) static void tb_htable_init(void)
{ {
unsigned int mode = QHT_MODE_AUTO_RESIZE; unsigned int mode = QHT_MODE_AUTO_RESIZE;
qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode); qht_init(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
} }
/* Must be called before using the QEMU cpus. 'tb_size' is the size /* Must be called before using the QEMU cpus. 'tb_size' is the size
@ -816,7 +800,7 @@ void tcg_exec_init(unsigned long tb_size)
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
/* There's no guest base to take into account, so go ahead and /* There's no guest base to take into account, so go ahead and
initialize the prologue now. */ initialize the prologue now. */
tcg_prologue_init(&tcg_ctx); tcg_prologue_init(tcg_ctx);
#endif #endif
} }
@ -829,38 +813,22 @@ void tcg_exec_init(unsigned long tb_size)
static TranslationBlock *tb_alloc(target_ulong pc) static TranslationBlock *tb_alloc(target_ulong pc)
{ {
TranslationBlock *tb; TranslationBlock *tb;
TBContext *ctx;
assert_tb_locked(); assert_tb_locked();
tb = tcg_tb_alloc(&tcg_ctx); tb = tcg_tb_alloc(tcg_ctx);
if (unlikely(tb == NULL)) { if (unlikely(tb == NULL)) {
return NULL; return NULL;
} }
ctx = &tcg_ctx.tb_ctx;
if (unlikely(ctx->nb_tbs == ctx->tbs_size)) {
ctx->tbs_size *= 2;
ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size);
}
ctx->tbs[ctx->nb_tbs++] = tb;
return tb; return tb;
} }
/* Called with tb_lock held. */ /* Called with tb_lock held. */
void tb_free(TranslationBlock *tb) void tb_remove(TranslationBlock *tb)
{ {
assert_tb_locked(); assert_tb_locked();
/* In practice this is mostly used for single use temporary TB g_tree_remove(tb_ctx.tb_tree, &tb->tc);
Ignore the hard cases and just back up if this TB happens to
be the last one generated. */
if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize);
tcg_ctx.code_gen_ptr = tb->tc.ptr - struct_size;
tcg_ctx.tb_ctx.nb_tbs--;
}
} }
static inline void invalidate_page_bitmap(PageDesc *p) static inline void invalidate_page_bitmap(PageDesc *p)
@ -905,6 +873,15 @@ static void page_flush_tb(void)
} }
} }
static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
{
const TranslationBlock *tb = value;
size_t *size = data;
*size += tb->tc.size;
return false;
}
/* flush all the translation blocks */ /* flush all the translation blocks */
static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
{ {
@ -913,35 +890,34 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
/* If it is already been done on request of another CPU, /* If it is already been done on request of another CPU,
* just retry. * just retry.
*/ */
if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) { if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
goto done; goto done;
} }
if (DEBUG_TB_FLUSH_GATE) { if (DEBUG_TB_FLUSH_GATE) {
printf("qemu: flush code_size=%td nb_tbs=%d avg_tb_size=%td\n", size_t nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, size_t host_size = 0;
tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) / g_tree_foreach(tb_ctx.tb_tree, tb_host_size_iter, &host_size);
tcg_ctx.tb_ctx.nb_tbs : 0); printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
} tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
> tcg_ctx.code_gen_buffer_size) {
cpu_abort(cpu, "Internal error: code buffer overflow\n");
} }
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
cpu_tb_jmp_cache_clear(cpu); cpu_tb_jmp_cache_clear(cpu);
} }
tcg_ctx.tb_ctx.nb_tbs = 0; /* Increment the refcount first so that destroy acts as a reset */
qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE); g_tree_ref(tb_ctx.tb_tree);
g_tree_destroy(tb_ctx.tb_tree);
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
page_flush_tb(); page_flush_tb();
tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; tcg_region_reset_all();
/* XXX: flush processor icache at this point if cache flush is /* XXX: flush processor icache at this point if cache flush is
expensive */ expensive */
atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count, atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
tcg_ctx.tb_ctx.tb_flush_count + 1);
done: done:
tb_unlock(); tb_unlock();
@ -950,7 +926,7 @@ done:
void tb_flush(CPUState *cpu) void tb_flush(CPUState *cpu)
{ {
if (tcg_enabled()) { if (tcg_enabled()) {
unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count); unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
async_safe_run_on_cpu(cpu, do_tb_flush, async_safe_run_on_cpu(cpu, do_tb_flush,
RUN_ON_CPU_HOST_INT(tb_flush_count)); RUN_ON_CPU_HOST_INT(tb_flush_count));
} }
@ -983,7 +959,7 @@ do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
static void tb_invalidate_check(target_ulong address) static void tb_invalidate_check(target_ulong address)
{ {
address &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK;
qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address); qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
} }
static void static void
@ -1003,7 +979,7 @@ do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
/* verify that all the pages have correct rights for code */ /* verify that all the pages have correct rights for code */
static void tb_page_check(void) static void tb_page_check(void)
{ {
qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL); qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */
@ -1101,8 +1077,11 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* remove the TB from the hash list */ /* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate); h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
qht_remove(&tcg_ctx.tb_ctx.htable, tb, h); tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
}
/* remove the TB from the page list */ /* remove the TB from the page list */
if (tb->page_addr[0] != page_addr) { if (tb->page_addr[0] != page_addr) {
@ -1131,7 +1110,7 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* suppress any remaining jumps to this TB */ /* suppress any remaining jumps to this TB */
tb_jmp_unlink(tb); tb_jmp_unlink(tb);
tcg_ctx.tb_ctx.tb_phys_invalidate_count++; tb_ctx.tb_phys_invalidate_count++;
} }
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
@ -1245,8 +1224,9 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
} }
/* add in the hash table */ /* add in the hash table */
h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate); h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
qht_insert(&tcg_ctx.tb_ctx.htable, tb, h); tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h);
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
if (DEBUG_TB_CHECK_GATE) { if (DEBUG_TB_CHECK_GATE) {
@ -1267,18 +1247,16 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_insn_unit *gen_code_buf; tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size; int gen_code_size, search_size;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
int64_t ti; int64_t ti;
#endif #endif
assert_memory_lock(); assert_memory_lock();
phys_pc = get_page_addr_code(env, pc); phys_pc = get_page_addr_code(env, pc);
if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
cflags |= CF_USE_ICOUNT;
}
buffer_overflow:
tb = tb_alloc(pc); tb = tb_alloc(pc);
if (unlikely(!tb)) { if (unlikely(!tb)) {
buffer_overflow:
/* flush must be done */ /* flush must be done */
tb_flush(cpu); tb_flush(cpu);
mmap_unlock(); mmap_unlock();
@ -1287,43 +1265,44 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
cpu_loop_exit(cpu); cpu_loop_exit(cpu);
} }
gen_code_buf = tcg_ctx.code_gen_ptr; gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = gen_code_buf; tb->tc.ptr = gen_code_buf;
tb->pc = pc; tb->pc = pc;
tb->cs_base = cs_base; tb->cs_base = cs_base;
tb->flags = flags; tb->flags = flags;
tb->cflags = cflags; tb->cflags = cflags;
tb->trace_vcpu_dstate = *cpu->trace_dstate; tb->trace_vcpu_dstate = *cpu->trace_dstate;
tcg_ctx->tb_cflags = cflags;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
tcg_ctx.tb_count1++; /* includes aborted translations because of /* includes aborted translations because of exceptions */
exceptions */ atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
ti = profile_getclock(); ti = profile_getclock();
#endif #endif
tcg_func_start(&tcg_ctx); tcg_func_start(tcg_ctx);
tcg_ctx.cpu = ENV_GET_CPU(env); tcg_ctx->cpu = ENV_GET_CPU(env);
gen_intermediate_code(cpu, tb); gen_intermediate_code(cpu, tb);
tcg_ctx.cpu = NULL; tcg_ctx->cpu = NULL;
trace_translate_block(tb, tb->pc, tb->tc.ptr); trace_translate_block(tb, tb->pc, tb->tc.ptr);
/* generate machine code */ /* generate machine code */
tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset; tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
if (TCG_TARGET_HAS_direct_jump) { if (TCG_TARGET_HAS_direct_jump) {
tcg_ctx.tb_jmp_insn_offset = tb->jmp_target_arg; tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
tcg_ctx.tb_jmp_target_addr = NULL; tcg_ctx->tb_jmp_target_addr = NULL;
} else { } else {
tcg_ctx.tb_jmp_insn_offset = NULL; tcg_ctx->tb_jmp_insn_offset = NULL;
tcg_ctx.tb_jmp_target_addr = tb->jmp_target_arg; tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
} }
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
tcg_ctx.tb_count++; atomic_set(&prof->tb_count, prof->tb_count + 1);
tcg_ctx.interm_time += profile_getclock() - ti; atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
ti = profile_getclock(); ti = profile_getclock();
#endif #endif
@ -1332,7 +1311,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
the tcg optimization currently hidden inside tcg_gen_code. All the tcg optimization currently hidden inside tcg_gen_code. All
that should be required is to flush the TBs, allocate a new TB, that should be required is to flush the TBs, allocate a new TB,
re-initialize it per above, and re-do the actual code generation. */ re-initialize it per above, and re-do the actual code generation. */
gen_code_size = tcg_gen_code(&tcg_ctx, tb); gen_code_size = tcg_gen_code(tcg_ctx, tb);
if (unlikely(gen_code_size < 0)) { if (unlikely(gen_code_size < 0)) {
goto buffer_overflow; goto buffer_overflow;
} }
@ -1340,12 +1319,13 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
if (unlikely(search_size < 0)) { if (unlikely(search_size < 0)) {
goto buffer_overflow; goto buffer_overflow;
} }
tb->tc.size = gen_code_size;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
tcg_ctx.code_time += profile_getclock() - ti; atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
tcg_ctx.code_in_len += tb->size; atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
tcg_ctx.code_out_len += gen_code_size; atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
tcg_ctx.search_out_len += search_size; atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
#endif #endif
#ifdef DEBUG_DISAS #ifdef DEBUG_DISAS
@ -1353,8 +1333,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_in_addr_range(tb->pc)) { qemu_log_in_addr_range(tb->pc)) {
qemu_log_lock(); qemu_log_lock();
qemu_log("OUT: [size=%d]\n", gen_code_size); qemu_log("OUT: [size=%d]\n", gen_code_size);
if (tcg_ctx.data_gen_ptr) { if (tcg_ctx->data_gen_ptr) {
size_t code_size = tcg_ctx.data_gen_ptr - tb->tc.ptr; size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
size_t data_size = gen_code_size - code_size; size_t data_size = gen_code_size - code_size;
size_t i; size_t i;
@ -1363,12 +1343,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
if (sizeof(tcg_target_ulong) == 8) { if (sizeof(tcg_target_ulong) == 8) {
qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n", qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
(uintptr_t)tcg_ctx.data_gen_ptr + i, (uintptr_t)tcg_ctx->data_gen_ptr + i,
*(uint64_t *)(tcg_ctx.data_gen_ptr + i)); *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
} else { } else {
qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n", qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
(uintptr_t)tcg_ctx.data_gen_ptr + i, (uintptr_t)tcg_ctx->data_gen_ptr + i,
*(uint32_t *)(tcg_ctx.data_gen_ptr + i)); *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
} }
} }
} else { } else {
@ -1380,9 +1360,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
} }
#endif #endif
tcg_ctx.code_gen_ptr = (void *) atomic_set(&tcg_ctx->code_gen_ptr, (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN); CODE_GEN_ALIGN));
/* init jump list */ /* init jump list */
assert(((uintptr_t)tb & 3) == 0); assert(((uintptr_t)tb & 3) == 0);
@ -1410,6 +1390,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* through the physical hash table and physical page list. * through the physical hash table and physical page list.
*/ */
tb_link_page(tb, phys_pc, phys_page2); tb_link_page(tb, phys_pc, phys_page2);
g_tree_insert(tb_ctx.tb_tree, &tb->tc, tb);
return tb; return tb;
} }
@ -1461,14 +1442,12 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access) int is_cpu_write_access)
{ {
TranslationBlock *tb, *tb_next; TranslationBlock *tb, *tb_next;
#if defined(TARGET_HAS_PRECISE_SMC)
CPUState *cpu = current_cpu;
CPUArchState *env = NULL;
#endif
tb_page_addr_t tb_start, tb_end; tb_page_addr_t tb_start, tb_end;
PageDesc *p; PageDesc *p;
int n; int n;
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
CPUState *cpu = current_cpu;
CPUArchState *env = NULL;
int current_tb_not_found = is_cpu_write_access; int current_tb_not_found = is_cpu_write_access;
TranslationBlock *current_tb = NULL; TranslationBlock *current_tb = NULL;
int current_tb_modified = 0; int current_tb_modified = 0;
@ -1545,10 +1524,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
#endif #endif
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) { if (current_tb_modified) {
/* we generate a block containing just the instruction /* Force execution of one insn next time. */
modifying the memory. It will ensure that it cannot modify cpu->cflags_next_tb = 1 | curr_cflags();
itself */
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
cpu_loop_exit_noexc(cpu); cpu_loop_exit_noexc(cpu);
} }
#endif #endif
@ -1663,10 +1640,8 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
p->first_tb = NULL; p->first_tb = NULL;
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) { if (current_tb_modified) {
/* we generate a block containing just the instruction /* Force execution of one insn next time. */
modifying the memory. It will ensure that it cannot modify cpu->cflags_next_tb = 1 | curr_cflags();
itself */
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
/* tb_lock will be reset after cpu_loop_exit_noexc longjmps /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
* back into the cpu_exec loop. */ * back into the cpu_exec loop. */
return true; return true;
@ -1678,37 +1653,16 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
} }
#endif #endif
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < /*
tb[1].tc_ptr. Return NULL if not found */ * Find the TB 'tb' such that
* tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
* Return NULL if not found.
*/
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr) static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
{ {
int m_min, m_max, m; struct tb_tc s = { .ptr = (void *)tc_ptr };
uintptr_t v;
TranslationBlock *tb;
if (tcg_ctx.tb_ctx.nb_tbs <= 0) { return g_tree_lookup(tb_ctx.tb_tree, &s);
return NULL;
}
if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
return NULL;
}
/* binary search (cf Knuth) */
m_min = 0;
m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
while (m_min <= m_max) {
m = (m_min + m_max) >> 1;
tb = tcg_ctx.tb_ctx.tbs[m];
v = (uintptr_t)tb->tc.ptr;
if (v == tc_ptr) {
return tb;
} else if (tc_ptr < v) {
m_max = m - 1;
} else {
m_min = m + 1;
}
}
return tcg_ctx.tb_ctx.tbs[m_max];
} }
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
@ -1769,9 +1723,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
#endif #endif
TranslationBlock *tb; TranslationBlock *tb;
uint32_t n, cflags; uint32_t n;
target_ulong pc, cs_base;
uint32_t flags;
tb_lock(); tb_lock();
tb = tb_find_pc(retaddr); tb = tb_find_pc(retaddr);
@ -1809,22 +1761,17 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cpu_abort(cpu, "TB too big during recompile"); cpu_abort(cpu, "TB too big during recompile");
} }
cflags = n | CF_LAST_IO; /* Adjust the execution state of the next TB. */
pc = tb->pc; cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
cs_base = tb->cs_base;
flags = tb->flags;
tb_phys_invalidate(tb, -1);
if (tb->cflags & CF_NOCACHE) { if (tb->cflags & CF_NOCACHE) {
if (tb->orig_tb) { if (tb->orig_tb) {
/* Invalidate original TB if this TB was generated in /* Invalidate original TB if this TB was generated in
* cpu_exec_nocache() */ * cpu_exec_nocache() */
tb_phys_invalidate(tb->orig_tb, -1); tb_phys_invalidate(tb->orig_tb, -1);
} }
tb_free(tb); tb_remove(tb);
} }
/* FIXME: In theory this could raise an exception. In practice
we have already translated the block once so it's probably ok. */
tb_gen_code(cpu, pc, cs_base, flags, cflags);
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
* the first in the TB) then we end up generating a whole new TB and * the first in the TB) then we end up generating a whole new TB and
@ -1893,73 +1840,79 @@ static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
g_free(hgram); g_free(hgram);
} }
struct tb_tree_stats {
size_t host_size;
size_t target_size;
size_t max_target_size;
size_t direct_jmp_count;
size_t direct_jmp2_count;
size_t cross_page;
};
static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
{
const TranslationBlock *tb = value;
struct tb_tree_stats *tst = data;
tst->host_size += tb->tc.size;
tst->target_size += tb->size;
if (tb->size > tst->max_target_size) {
tst->max_target_size = tb->size;
}
if (tb->page_addr[1] != -1) {
tst->cross_page++;
}
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
tst->direct_jmp_count++;
if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
tst->direct_jmp2_count++;
}
}
return false;
}
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
{ {
int i, target_code_size, max_target_code_size; struct tb_tree_stats tst = {};
int direct_jmp_count, direct_jmp2_count, cross_page;
TranslationBlock *tb;
struct qht_stats hst; struct qht_stats hst;
size_t nb_tbs;
tb_lock(); tb_lock();
target_code_size = 0; nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
max_target_code_size = 0; g_tree_foreach(tb_ctx.tb_tree, tb_tree_stats_iter, &tst);
cross_page = 0;
direct_jmp_count = 0;
direct_jmp2_count = 0;
for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
tb = tcg_ctx.tb_ctx.tbs[i];
target_code_size += tb->size;
if (tb->size > max_target_code_size) {
max_target_code_size = tb->size;
}
if (tb->page_addr[1] != -1) {
cross_page++;
}
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
direct_jmp_count++;
if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
direct_jmp2_count++;
}
}
}
/* XXX: avoid using doubles ? */ /* XXX: avoid using doubles ? */
cpu_fprintf(f, "Translation buffer state:\n"); cpu_fprintf(f, "Translation buffer state:\n");
cpu_fprintf(f, "gen code size %td/%zd\n", /*
tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, * Report total code size including the padding and TB structs;
tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer); * otherwise users might think "-tb-size" is not honoured.
cpu_fprintf(f, "TB count %d\n", tcg_ctx.tb_ctx.nb_tbs); * For avg host size we use the precise numbers from tb_tree_stats though.
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", */
tcg_ctx.tb_ctx.nb_tbs ? target_code_size / cpu_fprintf(f, "gen code size %zu/%zu\n",
tcg_ctx.tb_ctx.nb_tbs : 0, tcg_code_size(), tcg_code_capacity());
max_target_code_size); cpu_fprintf(f, "TB count %zu\n", nb_tbs);
cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n",
tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr - nb_tbs ? tst.target_size / nb_tbs : 0,
tcg_ctx.code_gen_buffer) / tst.max_target_size);
tcg_ctx.tb_ctx.nb_tbs : 0, cpu_fprintf(f, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
target_code_size ? (double) (tcg_ctx.code_gen_ptr - nb_tbs ? tst.host_size / nb_tbs : 0,
tcg_ctx.code_gen_buffer) / tst.target_size ? (double)tst.host_size / tst.target_size : 0);
target_code_size : 0); cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page, nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / cpu_fprintf(f, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
tcg_ctx.tb_ctx.nb_tbs : 0); tst.direct_jmp_count,
cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
direct_jmp_count, tst.direct_jmp2_count,
tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
tcg_ctx.tb_ctx.nb_tbs : 0,
direct_jmp2_count,
tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
tcg_ctx.tb_ctx.nb_tbs : 0);
qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst); qht_statistics_init(&tb_ctx.htable, &hst);
print_qht_statistics(f, cpu_fprintf, hst); print_qht_statistics(f, cpu_fprintf, hst);
qht_statistics_destroy(&hst); qht_statistics_destroy(&hst);
cpu_fprintf(f, "\nStatistics:\n"); cpu_fprintf(f, "\nStatistics:\n");
cpu_fprintf(f, "TB flush count %u\n", cpu_fprintf(f, "TB flush count %u\n",
atomic_read(&tcg_ctx.tb_ctx.tb_flush_count)); atomic_read(&tb_ctx.tb_flush_count));
cpu_fprintf(f, "TB invalidate count %d\n", cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count);
tcg_ctx.tb_ctx.tb_phys_invalidate_count);
cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count()); cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
tcg_dump_info(f, cpu_fprintf); tcg_dump_info(f, cpu_fprintf);

View file

@ -45,7 +45,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
db->singlestep_enabled = cpu->singlestep_enabled; db->singlestep_enabled = cpu->singlestep_enabled;
/* Instruction counting */ /* Instruction counting */
max_insns = db->tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(db->tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -95,7 +95,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
update db->pc_next and db->is_jmp to indicate what should be update db->pc_next and db->is_jmp to indicate what should be
done next -- either exiting this loop or locate the start of done next -- either exiting this loop or locate the start of
the next instruction. */ the next instruction. */
if (db->num_insns == max_insns && (db->tb->cflags & CF_LAST_IO)) { if (db->num_insns == max_insns && (tb_cflags(db->tb) & CF_LAST_IO)) {
/* Accept I/O on the last instruction. */ /* Accept I/O on the last instruction. */
gen_io_start(); gen_io_start();
ops->translate_insn(db, cpu); ops->translate_insn(db, cpu);

View file

@ -977,7 +977,8 @@ int main(int argc, char **argv)
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
generating the prologue until now so that the prologue can take generating the prologue until now so that the prologue can take
the real value of GUEST_BASE into account. */ the real value of GUEST_BASE into account. */
tcg_prologue_init(&tcg_ctx); tcg_prologue_init(tcg_ctx);
tcg_region_init();
/* build Task State */ /* build Task State */
memset(ts, 0, sizeof(TaskState)); memset(ts, 0, sizeof(TaskState));

14
cpus.c
View file

@ -1307,6 +1307,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
CPUState *cpu = arg; CPUState *cpu = arg;
rcu_register_thread(); rcu_register_thread();
tcg_register_thread();
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
@ -1454,6 +1455,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
g_assert(!use_icount); g_assert(!use_icount);
rcu_register_thread(); rcu_register_thread();
tcg_register_thread();
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
@ -1664,6 +1666,18 @@ static void qemu_tcg_init_vcpu(CPUState *cpu)
char thread_name[VCPU_THREAD_NAME_SIZE]; char thread_name[VCPU_THREAD_NAME_SIZE];
static QemuCond *single_tcg_halt_cond; static QemuCond *single_tcg_halt_cond;
static QemuThread *single_tcg_cpu_thread; static QemuThread *single_tcg_cpu_thread;
static int tcg_region_inited;
/*
* Initialize TCG regions--once. Now is a good time, because:
* (1) TCG's init context, prologue and target globals have been set up.
* (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
* -accel flag is processed, so the check doesn't work then).
*/
if (!tcg_region_inited) {
tcg_region_inited = 1;
tcg_region_init();
}
if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) { if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->thread = g_malloc0(sizeof(QemuThread));

14
exec.c
View file

@ -791,10 +791,15 @@ void cpu_exec_initfn(CPUState *cpu)
void cpu_exec_realizefn(CPUState *cpu, Error **errp) void cpu_exec_realizefn(CPUState *cpu, Error **errp)
{ {
CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
cpu_list_add(cpu); cpu_list_add(cpu);
if (tcg_enabled() && !cc->tcg_initialized) {
cc->tcg_initialized = true;
cc->tcg_initialize();
}
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
@ -2426,11 +2431,8 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
{ {
CPUState *cpu = current_cpu; CPUState *cpu = current_cpu;
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
target_ulong vaddr; target_ulong vaddr;
CPUWatchpoint *wp; CPUWatchpoint *wp;
uint32_t cpu_flags;
assert(tcg_enabled()); assert(tcg_enabled());
if (cpu->watchpoint_hit) { if (cpu->watchpoint_hit) {
@ -2470,8 +2472,8 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
cpu->exception_index = EXCP_DEBUG; cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(cpu); cpu_loop_exit(cpu);
} else { } else {
cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); /* Force execution of one insn next time. */
tb_gen_code(cpu, pc, cs_base, cpu_flags, 1); cpu->cflags_next_tb = 1 | curr_cflags();
cpu_loop_exit_noexc(cpu); cpu_loop_exit_noexc(cpu);
} }
} }

View file

@ -22,6 +22,7 @@
#include "qemu-common.h" #include "qemu-common.h"
#include "exec/tb-context.h" #include "exec/tb-context.h"
#include "sysemu/cpus.h"
/* allow to see translation results - the slowdown should be negligible, so we leave it */ /* allow to see translation results - the slowdown should be negligible, so we leave it */
#define DEBUG_DISAS #define DEBUG_DISAS
@ -305,10 +306,14 @@ static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
/* /*
* Translation Cache-related fields of a TB. * Translation Cache-related fields of a TB.
* This struct exists just for convenience; we keep track of TB's in a binary
* search tree, and the only fields needed to compare TB's in the tree are
* @ptr and @size.
* Note: the address of search data can be obtained by adding @size to @ptr.
*/ */
struct tb_tc { struct tb_tc {
void *ptr; /* pointer to the translated code */ void *ptr; /* pointer to the translated code */
uint8_t *search; /* pointer to search data */ size_t size;
}; };
struct TranslationBlock { struct TranslationBlock {
@ -319,12 +324,15 @@ struct TranslationBlock {
size <= TARGET_PAGE_SIZE) */ size <= TARGET_PAGE_SIZE) */
uint16_t icount; uint16_t icount;
uint32_t cflags; /* compile flags */ uint32_t cflags; /* compile flags */
#define CF_COUNT_MASK 0x7fff #define CF_COUNT_MASK 0x00007fff
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
#define CF_NOCACHE 0x10000 /* To be freed after execution */ #define CF_NOCACHE 0x00010000 /* To be freed after execution */
#define CF_USE_ICOUNT 0x20000 #define CF_USE_ICOUNT 0x00020000
#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */ #define CF_INVALID 0x00040000 /* TB is stale. Setters need tb_lock */
#define CF_INVALID 0x80000 /* TB is stale. Setters must acquire tb_lock */ #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
/* cflags' mask for hashing/comparison */
#define CF_HASH_MASK \
(CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL)
/* Per-vCPU dynamic tracing state used to generate this TB */ /* Per-vCPU dynamic tracing state used to generate this TB */
uint32_t trace_vcpu_dstate; uint32_t trace_vcpu_dstate;
@ -365,11 +373,27 @@ struct TranslationBlock {
uintptr_t jmp_list_first; uintptr_t jmp_list_first;
}; };
void tb_free(TranslationBlock *tb); extern bool parallel_cpus;
/* Hide the atomic_read to make code a little easier on the eyes */
static inline uint32_t tb_cflags(const TranslationBlock *tb)
{
return atomic_read(&tb->cflags);
}
/* current cflags for hashing/comparison */
static inline uint32_t curr_cflags(void)
{
return (parallel_cpus ? CF_PARALLEL : 0)
| (use_icount ? CF_USE_ICOUNT : 0);
}
void tb_remove(TranslationBlock *tb);
void tb_flush(CPUState *cpu); void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags); target_ulong cs_base, uint32_t flags,
uint32_t cf_mask);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
/* GETPC is the true target of the return instruction that we'll execute. */ /* GETPC is the true target of the return instruction that we'll execute. */

View file

@ -6,23 +6,22 @@
/* Helpers for instruction counting code generation. */ /* Helpers for instruction counting code generation. */
static int icount_start_insn_idx; static int icount_start_insn_idx;
static TCGLabel *exitreq_label;
static inline void gen_tb_start(TranslationBlock *tb) static inline void gen_tb_start(TranslationBlock *tb)
{ {
TCGv_i32 count, imm; TCGv_i32 count, imm;
exitreq_label = gen_new_label(); tcg_ctx->exitreq_label = gen_new_label();
if (tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(tb) & CF_USE_ICOUNT) {
count = tcg_temp_local_new_i32(); count = tcg_temp_local_new_i32();
} else { } else {
count = tcg_temp_new_i32(); count = tcg_temp_new_i32();
} }
tcg_gen_ld_i32(count, tcg_ctx.tcg_env, tcg_gen_ld_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32)); -ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
if (tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(tb) & CF_USE_ICOUNT) {
imm = tcg_temp_new_i32(); imm = tcg_temp_new_i32();
/* We emit a movi with a dummy immediate argument. Keep the insn index /* We emit a movi with a dummy immediate argument. Keep the insn index
* of the movi so that we later (when we know the actual insn count) * of the movi so that we later (when we know the actual insn count)
@ -34,10 +33,10 @@ static inline void gen_tb_start(TranslationBlock *tb)
tcg_temp_free_i32(imm); tcg_temp_free_i32(imm);
} }
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, exitreq_label); tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
if (tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(tb) & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, tcg_ctx.tcg_env, tcg_gen_st16_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low)); -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
} }
@ -46,32 +45,30 @@ static inline void gen_tb_start(TranslationBlock *tb)
static inline void gen_tb_end(TranslationBlock *tb, int num_insns) static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
{ {
if (tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(tb) & CF_USE_ICOUNT) {
/* Update the num_insn immediate parameter now that we know /* Update the num_insn immediate parameter now that we know
* the actual insn count. */ * the actual insn count. */
tcg_set_insn_param(icount_start_insn_idx, 1, num_insns); tcg_set_insn_param(icount_start_insn_idx, 1, num_insns);
} }
gen_set_label(exitreq_label); gen_set_label(tcg_ctx->exitreq_label);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED); tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
/* Terminate the linked list. */ /* Terminate the linked list. */
tcg_ctx.gen_op_buf[tcg_ctx.gen_op_buf[0].prev].next = 0; tcg_ctx->gen_op_buf[tcg_ctx->gen_op_buf[0].prev].next = 0;
} }
static inline void gen_io_start(void) static inline void gen_io_start(void)
{ {
TCGv_i32 tmp = tcg_const_i32(1); TCGv_i32 tmp = tcg_const_i32(1);
tcg_gen_st_i32(tmp, tcg_ctx.tcg_env, tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
-ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
} }
static inline void gen_io_end(void) static inline void gen_io_end(void)
{ {
TCGv_i32 tmp = tcg_const_i32(0); TCGv_i32 tmp = tcg_const_i32(0);
tcg_gen_st_i32(tmp, tcg_ctx.tcg_env, tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
-ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
} }

View file

@ -9,31 +9,31 @@
#define DEF_HELPER_FLAGS_0(name, flags, ret) \ #define DEF_HELPER_FLAGS_0(name, flags, ret) \
static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \ static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
{ \ { \
tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 0, NULL); \ tcg_gen_callN(HELPER(name), dh_retvar(ret), 0, NULL); \
} }
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \ #define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
dh_arg_decl(t1, 1)) \ dh_arg_decl(t1, 1)) \
{ \ { \
TCGArg args[1] = { dh_arg(t1, 1) }; \ TCGTemp *args[1] = { dh_arg(t1, 1) }; \
tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 1, args); \ tcg_gen_callN(HELPER(name), dh_retvar(ret), 1, args); \
} }
#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \ #define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
{ \ { \
TCGArg args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \ TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 2, args); \ tcg_gen_callN(HELPER(name), dh_retvar(ret), 2, args); \
} }
#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \ #define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
{ \ { \
TCGArg args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \ TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 3, args); \ tcg_gen_callN(HELPER(name), dh_retvar(ret), 3, args); \
} }
#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \ #define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
@ -41,9 +41,9 @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \ dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
{ \ { \
TCGArg args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \ TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
dh_arg(t3, 3), dh_arg(t4, 4) }; \ dh_arg(t3, 3), dh_arg(t4, 4) }; \
tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 4, args); \ tcg_gen_callN(HELPER(name), dh_retvar(ret), 4, args); \
} }
#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \ #define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
@ -51,9 +51,9 @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
{ \ { \
TCGArg args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
dh_arg(t4, 4), dh_arg(t5, 5) }; \ dh_arg(t4, 4), dh_arg(t5, 5) }; \
tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 5, args); \ tcg_gen_callN(HELPER(name), dh_retvar(ret), 5, args); \
} }
#include "helper.h" #include "helper.h"

View file

@ -20,10 +20,6 @@
#define HELPER(name) glue(helper_, name) #define HELPER(name) glue(helper_, name)
#define GET_TCGV_i32 GET_TCGV_I32
#define GET_TCGV_i64 GET_TCGV_I64
#define GET_TCGV_ptr GET_TCGV_PTR
/* Some types that make sense in C, but not for TCG. */ /* Some types that make sense in C, but not for TCG. */
#define dh_alias_i32 i32 #define dh_alias_i32 i32
#define dh_alias_s32 i32 #define dh_alias_s32 i32
@ -78,11 +74,11 @@
#define dh_retvar_decl_ptr TCGv_ptr retval, #define dh_retvar_decl_ptr TCGv_ptr retval,
#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t)) #define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
#define dh_retvar_void TCG_CALL_DUMMY_ARG #define dh_retvar_void NULL
#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG #define dh_retvar_noreturn NULL
#define dh_retvar_i32 GET_TCGV_i32(retval) #define dh_retvar_i32 tcgv_i32_temp(retval)
#define dh_retvar_i64 GET_TCGV_i64(retval) #define dh_retvar_i64 tcgv_i64_temp(retval)
#define dh_retvar_ptr GET_TCGV_ptr(retval) #define dh_retvar_ptr tcgv_ptr_temp(retval)
#define dh_retvar(t) glue(dh_retvar_, dh_alias(t)) #define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
#define dh_is_64bit_void 0 #define dh_is_64bit_void 0
@ -113,7 +109,7 @@
((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1))) ((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1)))
#define dh_arg(t, n) \ #define dh_arg(t, n) \
glue(GET_TCGV_, dh_alias(t))(glue(arg, n)) glue(glue(tcgv_, dh_alias(t)), _temp)(glue(arg, n))
#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n) #define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n)

View file

@ -31,10 +31,8 @@ typedef struct TBContext TBContext;
struct TBContext { struct TBContext {
TranslationBlock **tbs; GTree *tb_tree;
struct qht htable; struct qht htable;
size_t tbs_size;
int nb_tbs;
/* any access to the tbs or the page table must use this lock */ /* any access to the tbs or the page table must use this lock */
QemuMutex tb_lock; QemuMutex tb_lock;
@ -43,4 +41,6 @@ struct TBContext {
int tb_phys_invalidate_count; int tb_phys_invalidate_count;
}; };
extern TBContext tb_ctx;
#endif #endif

View file

@ -48,8 +48,8 @@
* xxhash32, customized for input variables that are not guaranteed to be * xxhash32, customized for input variables that are not guaranteed to be
* contiguous in memory. * contiguous in memory.
*/ */
static inline static inline uint32_t
uint32_t tb_hash_func6(uint64_t a0, uint64_t b0, uint32_t e, uint32_t f) tb_hash_func7(uint64_t a0, uint64_t b0, uint32_t e, uint32_t f, uint32_t g)
{ {
uint32_t v1 = TB_HASH_XX_SEED + PRIME32_1 + PRIME32_2; uint32_t v1 = TB_HASH_XX_SEED + PRIME32_1 + PRIME32_2;
uint32_t v2 = TB_HASH_XX_SEED + PRIME32_2; uint32_t v2 = TB_HASH_XX_SEED + PRIME32_2;
@ -78,7 +78,7 @@ uint32_t tb_hash_func6(uint64_t a0, uint64_t b0, uint32_t e, uint32_t f)
v4 *= PRIME32_1; v4 *= PRIME32_1;
h32 = rol32(v1, 1) + rol32(v2, 7) + rol32(v3, 12) + rol32(v4, 18); h32 = rol32(v1, 1) + rol32(v2, 7) + rol32(v3, 12) + rol32(v4, 18);
h32 += 24; h32 += 28;
h32 += e * PRIME32_3; h32 += e * PRIME32_3;
h32 = rol32(h32, 17) * PRIME32_4; h32 = rol32(h32, 17) * PRIME32_4;
@ -86,6 +86,9 @@ uint32_t tb_hash_func6(uint64_t a0, uint64_t b0, uint32_t e, uint32_t f)
h32 += f * PRIME32_3; h32 += f * PRIME32_3;
h32 = rol32(h32, 17) * PRIME32_4; h32 = rol32(h32, 17) * PRIME32_4;
h32 += g * PRIME32_3;
h32 = rol32(h32, 17) * PRIME32_4;
h32 ^= h32 >> 15; h32 ^= h32 >> 15;
h32 *= PRIME32_2; h32 *= PRIME32_2;
h32 ^= h32 >> 13; h32 ^= h32 >> 13;

View file

@ -59,9 +59,9 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
static inline static inline
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags, uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
uint32_t trace_vcpu_dstate) uint32_t cf_mask, uint32_t trace_vcpu_dstate)
{ {
return tb_hash_func6(phys_pc, pc, flags, trace_vcpu_dstate); return tb_hash_func7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate);
} }
#endif #endif

View file

@ -21,7 +21,7 @@
/* Might cause an exception, so have a longjmp destination ready */ /* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock * static inline TranslationBlock *
tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base, tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
uint32_t *flags) uint32_t *flags, uint32_t cf_mask)
{ {
CPUArchState *env = (CPUArchState *)cpu->env_ptr; CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb; TranslationBlock *tb;
@ -35,10 +35,10 @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
tb->cs_base == *cs_base && tb->cs_base == *cs_base &&
tb->flags == *flags && tb->flags == *flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate && tb->trace_vcpu_dstate == *cpu->trace_dstate &&
!(atomic_read(&tb->cflags) & CF_INVALID))) { (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) {
return tb; return tb;
} }
tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags); tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask);
if (tb == NULL) { if (tb == NULL) {
return NULL; return NULL;
} }

View file

@ -371,6 +371,8 @@ void sigaction_invoke(struct sigaction *action,
#endif #endif
int qemu_madvise(void *addr, size_t len, int advice); int qemu_madvise(void *addr, size_t len, int advice);
int qemu_mprotect_rwx(void *addr, size_t size);
int qemu_mprotect_none(void *addr, size_t size);
int qemu_open(const char *name, int flags, ...); int qemu_open(const char *name, int flags, ...);
int qemu_close(int fd); int qemu_close(int fd);

View file

@ -195,10 +195,8 @@ typedef struct CPUClass {
void *opaque); void *opaque);
const struct VMStateDescription *vmsd; const struct VMStateDescription *vmsd;
int gdb_num_core_regs;
const char *gdb_core_xml_file; const char *gdb_core_xml_file;
gchar * (*gdb_arch_name)(CPUState *cpu); gchar * (*gdb_arch_name)(CPUState *cpu);
bool gdb_stop_before_watchpoint;
void (*cpu_exec_enter)(CPUState *cpu); void (*cpu_exec_enter)(CPUState *cpu);
void (*cpu_exec_exit)(CPUState *cpu); void (*cpu_exec_exit)(CPUState *cpu);
@ -206,6 +204,12 @@ typedef struct CPUClass {
void (*disas_set_info)(CPUState *cpu, disassemble_info *info); void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len); vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
void (*tcg_initialize)(void);
/* Keep non-pointer data at the end to minimize holes. */
int gdb_num_core_regs;
bool gdb_stop_before_watchpoint;
bool tcg_initialized;
} CPUClass; } CPUClass;
#ifdef HOST_WORDS_BIGENDIAN #ifdef HOST_WORDS_BIGENDIAN
@ -340,6 +344,7 @@ struct CPUState {
bool unplug; bool unplug;
bool crash_occurred; bool crash_occurred;
bool exit_request; bool exit_request;
uint32_t cflags_next_tb;
/* updates protected by BQL */ /* updates protected by BQL */
uint32_t interrupt_request; uint32_t interrupt_request;
int singlestep_enabled; int singlestep_enabled;

View file

@ -129,7 +129,7 @@ int cpu_get_pic_interrupt(CPUX86State *env)
void fork_start(void) void fork_start(void)
{ {
cpu_list_lock(); cpu_list_lock();
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); qemu_mutex_lock(&tb_ctx.tb_lock);
mmap_fork_start(); mmap_fork_start();
} }
@ -145,11 +145,11 @@ void fork_end(int child)
QTAILQ_REMOVE(&cpus, cpu, node); QTAILQ_REMOVE(&cpus, cpu, node);
} }
} }
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); qemu_mutex_init(&tb_ctx.tb_lock);
qemu_init_cpu_list(); qemu_init_cpu_list();
gdbserver_fork(thread_cpu); gdbserver_fork(thread_cpu);
} else { } else {
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); qemu_mutex_unlock(&tb_ctx.tb_lock);
cpu_list_unlock(); cpu_list_unlock();
} }
} }
@ -4476,7 +4476,8 @@ int main(int argc, char **argv, char **envp)
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
generating the prologue until now so that the prologue can take generating the prologue until now so that the prologue can take
the real value of GUEST_BASE into account. */ the real value of GUEST_BASE into account. */
tcg_prologue_init(&tcg_ctx); tcg_prologue_init(tcg_ctx);
tcg_region_init();
#if defined(TARGET_I386) #if defined(TARGET_I386)
env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;

View file

@ -6218,6 +6218,7 @@ static void *clone_func(void *arg)
TaskState *ts; TaskState *ts;
rcu_register_thread(); rcu_register_thread();
tcg_register_thread();
env = info->env; env = info->env;
cpu = ENV_GET_CPU(env); cpu = ENV_GET_CPU(env);
thread_cpu = cpu; thread_cpu = cpu;

View file

@ -301,6 +301,7 @@ static void cpu_common_reset(CPUState *cpu)
cpu->can_do_io = 1; cpu->can_do_io = 1;
cpu->exception_index = -1; cpu->exception_index = -1;
cpu->crash_occurred = false; cpu->crash_occurred = false;
cpu->cflags_next_tb = -1;
if (tcg_enabled()) { if (tcg_enabled()) {
cpu_tb_jmp_cache_clear(cpu); cpu_tb_jmp_cache_clear(cpu);

View file

@ -260,8 +260,6 @@ static void alpha_cpu_initfn(Object *obj)
cs->env_ptr = env; cs->env_ptr = env;
tlb_flush(cs); tlb_flush(cs);
alpha_translate_init();
env->lock_addr = -1; env->lock_addr = -1;
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN; env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN;
@ -299,6 +297,7 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
dc->vmsd = &vmstate_alpha_cpu; dc->vmsd = &vmstate_alpha_cpu;
#endif #endif
cc->disas_set_info = alpha_cpu_disas_set_info; cc->disas_set_info = alpha_cpu_disas_set_info;
cc->tcg_initialize = alpha_translate_init;
cc->gdb_num_core_regs = 67; cc->gdb_num_core_regs = 67;
} }

View file

@ -78,7 +78,6 @@ struct DisasContext {
#define DISAS_PC_STALE DISAS_TARGET_2 #define DISAS_PC_STALE DISAS_TARGET_2
/* global register indexes */ /* global register indexes */
static TCGv_env cpu_env;
static TCGv cpu_std_ir[31]; static TCGv cpu_std_ir[31];
static TCGv cpu_fir[31]; static TCGv cpu_fir[31];
static TCGv cpu_pc; static TCGv cpu_pc;
@ -124,17 +123,8 @@ void alpha_translate_init(void)
}; };
#endif #endif
static bool done_init = 0;
int i; int i;
if (done_init) {
return;
}
done_init = 1;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
for (i = 0; i < 31; i++) { for (i = 0; i < 31; i++) {
cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env, cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUAlphaState, ir[i]), offsetof(CPUAlphaState, ir[i]),
@ -461,7 +451,7 @@ static bool in_superpage(DisasContext *ctx, int64_t addr)
static bool use_exit_tb(DisasContext *ctx) static bool use_exit_tb(DisasContext *ctx)
{ {
return ((ctx->base.tb->cflags & CF_LAST_IO) return ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
|| ctx->base.singlestep_enabled || ctx->base.singlestep_enabled
|| singlestep); || singlestep);
} }
@ -2405,7 +2395,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0xC000: case 0xC000:
/* RPCC */ /* RPCC */
va = dest_gpr(ctx, ra); va = dest_gpr(ctx, ra);
if (ctx->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
gen_helper_load_pcc(va, cpu_env); gen_helper_load_pcc(va, cpu_env);
gen_io_end(); gen_io_end();

View file

@ -534,7 +534,6 @@ static void arm_cpu_initfn(Object *obj)
{ {
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
ARMCPU *cpu = ARM_CPU(obj); ARMCPU *cpu = ARM_CPU(obj);
static bool inited;
cs->env_ptr = &cpu->env; cs->env_ptr = &cpu->env;
cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
@ -578,10 +577,6 @@ static void arm_cpu_initfn(Object *obj)
if (tcg_enabled()) { if (tcg_enabled()) {
cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
if (!inited) {
inited = true;
arm_translate_init();
}
} }
} }
@ -1765,6 +1760,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
#endif #endif
cc->disas_set_info = arm_disas_set_info; cc->disas_set_info = arm_disas_set_info;
cc->tcg_initialize = arm_translate_init;
} }
static void cpu_register(const ARMCPUInfo *info) static void cpu_register(const ARMCPUInfo *info)

View file

@ -430,8 +430,9 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
} }
/* Returns 0 on success; 1 otherwise. */ /* Returns 0 on success; 1 otherwise. */
uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi) uint64_t new_lo, uint64_t new_hi,
bool parallel)
{ {
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
Int128 oldv, cmpv, newv; Int128 oldv, cmpv, newv;
@ -440,7 +441,7 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
cmpv = int128_make128(env->exclusive_val, env->exclusive_high); cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);
if (parallel_cpus) { if (parallel) {
#ifndef CONFIG_ATOMIC128 #ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else #else
@ -484,8 +485,21 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
return !success; return !success;
} }
uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi) uint64_t new_lo, uint64_t new_hi)
{
return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, false);
}
uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi)
{
return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, true);
}
static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi,
bool parallel)
{ {
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
Int128 oldv, cmpv, newv; Int128 oldv, cmpv, newv;
@ -494,7 +508,7 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
cmpv = int128_make128(env->exclusive_val, env->exclusive_high); cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);
if (parallel_cpus) { if (parallel) {
#ifndef CONFIG_ATOMIC128 #ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else #else
@ -537,3 +551,15 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
return !success; return !success;
} }
uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi)
{
return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, false);
}
uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi)
{
return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, true);
}

View file

@ -43,4 +43,8 @@ DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env)
DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_le, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(paired_cmpxchg64_le, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_le_parallel, TCG_CALL_NO_WG,
i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_be_parallel, TCG_CALL_NO_WG,
i64, env, i64, i64, i64)

View file

@ -502,13 +502,6 @@ void HELPER(yield)(CPUARMState *env)
ARMCPU *cpu = arm_env_get_cpu(env); ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu); CPUState *cs = CPU(cpu);
/* When running in MTTCG we don't generate jumps to the yield and
* WFE helpers as it won't affect the scheduling of other vCPUs.
* If we wanted to more completely model WFE/SEV so we don't busy
* spin unnecessarily we would need to do something more involved.
*/
g_assert(!parallel_cpus);
/* This is a non-trappable hint instruction that generally indicates /* This is a non-trappable hint instruction that generally indicates
* that the guest is currently busy-looping. Yield control back to the * that the guest is currently busy-looping. Yield control back to the
* top level loop so that a more deserving VCPU has a chance to run. * top level loop so that a more deserving VCPU has a chance to run.

View file

@ -348,7 +348,8 @@ static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
/* No direct tb linking with singlestep (either QEMU's or the ARM /* No direct tb linking with singlestep (either QEMU's or the ARM
* debug architecture kind) or deterministic io * debug architecture kind) or deterministic io
*/ */
if (s->base.singlestep_enabled || s->ss_active || (s->base.tb->cflags & CF_LAST_IO)) { if (s->base.singlestep_enabled || s->ss_active ||
(tb_cflags(s->base.tb) & CF_LAST_IO)) {
return false; return false;
} }
@ -1335,13 +1336,18 @@ static void handle_hint(DisasContext *s, uint32_t insn,
case 3: /* WFI */ case 3: /* WFI */
s->base.is_jmp = DISAS_WFI; s->base.is_jmp = DISAS_WFI;
return; return;
/* When running in MTTCG we don't generate jumps to the yield and
* WFE helpers as it won't affect the scheduling of other vCPUs.
* If we wanted to more completely model WFE/SEV so we don't busy
* spin unnecessarily we would need to do something more involved.
*/
case 1: /* YIELD */ case 1: /* YIELD */
if (!parallel_cpus) { if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_YIELD; s->base.is_jmp = DISAS_YIELD;
} }
return; return;
case 2: /* WFE */ case 2: /* WFE */
if (!parallel_cpus) { if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_WFE; s->base.is_jmp = DISAS_WFE;
} }
return; return;
@ -1561,7 +1567,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
break; break;
} }
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start(); gen_io_start();
} }
@ -1592,7 +1598,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
} }
} }
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */ /* I/O operations must end the TB here (whether read or write) */
gen_io_end(); gen_io_end();
s->base.is_jmp = DISAS_UPDATE; s->base.is_jmp = DISAS_UPDATE;
@ -1930,11 +1936,25 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
MO_64 | MO_ALIGN | s->be_data); MO_64 | MO_ALIGN | s->be_data);
tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
} else if (s->be_data == MO_LE) { } else if (s->be_data == MO_LE) {
gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr, if (tb_cflags(s->base.tb) & CF_PARALLEL) {
cpu_reg(s, rt), cpu_reg(s, rt2)); gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
cpu_exclusive_addr,
cpu_reg(s, rt),
cpu_reg(s, rt2));
} else {
gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
cpu_reg(s, rt), cpu_reg(s, rt2));
}
} else { } else {
gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr, if (tb_cflags(s->base.tb) & CF_PARALLEL) {
cpu_reg(s, rt), cpu_reg(s, rt2)); gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
cpu_exclusive_addr,
cpu_reg(s, rt),
cpu_reg(s, rt2));
} else {
gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
cpu_reg(s, rt), cpu_reg(s, rt2));
}
} }
} else { } else {
tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val, tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,

View file

@ -58,7 +58,6 @@
#define IS_USER(s) (s->user) #define IS_USER(s) (s->user)
#endif #endif
TCGv_env cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */ /* We reuse the same 64-bit temporaries for efficiency. */
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0; static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16]; static TCGv_i32 cpu_R[16];
@ -81,9 +80,6 @@ void arm_translate_init(void)
{ {
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new_i32(cpu_env, cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUARMState, regs[i]), offsetof(CPUARMState, regs[i]),
@ -4546,8 +4542,13 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
static void gen_nop_hint(DisasContext *s, int val) static void gen_nop_hint(DisasContext *s, int val)
{ {
switch (val) { switch (val) {
/* When running in MTTCG we don't generate jumps to the yield and
* WFE helpers as it won't affect the scheduling of other vCPUs.
* If we wanted to more completely model WFE/SEV so we don't busy
* spin unnecessarily we would need to do something more involved.
*/
case 1: /* yield */ case 1: /* yield */
if (!parallel_cpus) { if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->base.is_jmp = DISAS_YIELD; s->base.is_jmp = DISAS_YIELD;
} }
@ -4557,7 +4558,7 @@ static void gen_nop_hint(DisasContext *s, int val)
s->base.is_jmp = DISAS_WFI; s->base.is_jmp = DISAS_WFI;
break; break;
case 2: /* wfe */ case 2: /* wfe */
if (!parallel_cpus) { if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->base.is_jmp = DISAS_WFE; s->base.is_jmp = DISAS_WFE;
} }
@ -7704,7 +7705,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
break; break;
} }
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start(); gen_io_start();
} }
@ -7795,7 +7796,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
} }
} }
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */ /* I/O operations must end the TB here (whether read or write) */
gen_io_end(); gen_io_end();
gen_lookup_tb(s); gen_lookup_tb(s);
@ -12253,7 +12254,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) { if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying code. */ /* FIXME: This can theoretically happen with self-modifying code. */
cpu_abort(cpu, "IO on conditional branch instruction"); cpu_abort(cpu, "IO on conditional branch instruction");
} }

View file

@ -80,7 +80,6 @@ typedef struct DisasCompare {
} DisasCompare; } DisasCompare;
/* Share the TCG temporaries common between 32 and 64 bit modes. */ /* Share the TCG temporaries common between 32 and 64 bit modes. */
extern TCGv_env cpu_env;
extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF; extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
extern TCGv_i64 cpu_exclusive_addr; extern TCGv_i64 cpu_exclusive_addr;
extern TCGv_i64 cpu_exclusive_val; extern TCGv_i64 cpu_exclusive_val;

View file

@ -181,7 +181,6 @@ static void cris_cpu_initfn(Object *obj)
CRISCPU *cpu = CRIS_CPU(obj); CRISCPU *cpu = CRIS_CPU(obj);
CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj); CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj);
CPUCRISState *env = &cpu->env; CPUCRISState *env = &cpu->env;
static bool tcg_initialized;
cs->env_ptr = env; cs->env_ptr = env;
@ -191,15 +190,6 @@ static void cris_cpu_initfn(Object *obj)
/* IRQ and NMI lines. */ /* IRQ and NMI lines. */
qdev_init_gpio_in(DEVICE(cpu), cris_cpu_set_irq, 2); qdev_init_gpio_in(DEVICE(cpu), cris_cpu_set_irq, 2);
#endif #endif
if (tcg_enabled() && !tcg_initialized) {
tcg_initialized = true;
if (env->pregs[PR_VR] < 32) {
cris_initialize_crisv10_tcg();
} else {
cris_initialize_tcg();
}
}
} }
static void crisv8_cpu_class_init(ObjectClass *oc, void *data) static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
@ -210,6 +200,7 @@ static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 8; ccc->vr = 8;
cc->do_interrupt = crisv10_cpu_do_interrupt; cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register; cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
} }
static void crisv9_cpu_class_init(ObjectClass *oc, void *data) static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
@ -220,6 +211,7 @@ static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 9; ccc->vr = 9;
cc->do_interrupt = crisv10_cpu_do_interrupt; cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register; cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
} }
static void crisv10_cpu_class_init(ObjectClass *oc, void *data) static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
@ -230,6 +222,7 @@ static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 10; ccc->vr = 10;
cc->do_interrupt = crisv10_cpu_do_interrupt; cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register; cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
} }
static void crisv11_cpu_class_init(ObjectClass *oc, void *data) static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
@ -240,6 +233,7 @@ static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 11; ccc->vr = 11;
cc->do_interrupt = crisv10_cpu_do_interrupt; cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register; cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
} }
static void crisv17_cpu_class_init(ObjectClass *oc, void *data) static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
@ -250,6 +244,7 @@ static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 17; ccc->vr = 17;
cc->do_interrupt = crisv10_cpu_do_interrupt; cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register; cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
} }
static void crisv32_cpu_class_init(ObjectClass *oc, void *data) static void crisv32_cpu_class_init(ObjectClass *oc, void *data)
@ -322,6 +317,7 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_stop_before_watchpoint = true; cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = cris_disas_set_info; cc->disas_set_info = cris_disas_set_info;
cc->tcg_initialize = cris_initialize_tcg;
} }
static const TypeInfo cris_cpu_type_info = { static const TypeInfo cris_cpu_type_info = {

View file

@ -66,7 +66,6 @@
#define CC_MASK_NZVC 0xf #define CC_MASK_NZVC 0xf
#define CC_MASK_RNZV 0x10e #define CC_MASK_RNZV 0x10e
static TCGv_env cpu_env;
static TCGv cpu_R[16]; static TCGv cpu_R[16];
static TCGv cpu_PR[16]; static TCGv cpu_PR[16];
static TCGv cc_x; static TCGv cc_x;
@ -839,7 +838,7 @@ static void cris_alu(DisasContext *dc, int op,
} }
tcg_gen_or_tl(d, d, tmp); tcg_gen_or_tl(d, d, tmp);
} }
if (!TCGV_EQUAL(tmp, d)) { if (tmp != d) {
tcg_temp_free(tmp); tcg_temp_free(tmp);
} }
} }
@ -1162,7 +1161,7 @@ static inline void t_gen_sext(TCGv d, TCGv s, int size)
tcg_gen_ext8s_i32(d, s); tcg_gen_ext8s_i32(d, s);
} else if (size == 2) { } else if (size == 2) {
tcg_gen_ext16s_i32(d, s); tcg_gen_ext16s_i32(d, s);
} else if (!TCGV_EQUAL(d, s)) { } else {
tcg_gen_mov_tl(d, s); tcg_gen_mov_tl(d, s);
} }
} }
@ -1173,7 +1172,7 @@ static inline void t_gen_zext(TCGv d, TCGv s, int size)
tcg_gen_ext8u_i32(d, s); tcg_gen_ext8u_i32(d, s);
} else if (size == 2) { } else if (size == 2) {
tcg_gen_ext16u_i32(d, s); tcg_gen_ext16u_i32(d, s);
} else if (!TCGV_EQUAL(d, s)) { } else {
tcg_gen_mov_tl(d, s); tcg_gen_mov_tl(d, s);
} }
} }
@ -3141,7 +3140,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -3171,7 +3170,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
/* Pretty disas. */ /* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc); LOG_DIS("%8.8x:\t", dc->pc);
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
dc->clear_x = 1; dc->clear_x = 1;
@ -3244,7 +3243,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
npc = dc->pc; npc = dc->pc;
if (tb->cflags & CF_LAST_IO) if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end(); gen_io_end();
/* Force an update if the per-tb cpu state has changed. */ /* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT if (dc->is_jmp == DISAS_NEXT
@ -3368,8 +3367,6 @@ void cris_initialize_tcg(void)
{ {
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
cc_x = tcg_global_mem_new(cpu_env, cc_x = tcg_global_mem_new(cpu_env,
offsetof(CPUCRISState, cc_x), "cc_x"); offsetof(CPUCRISState, cc_x), "cc_x");
cc_src = tcg_global_mem_new(cpu_env, cc_src = tcg_global_mem_new(cpu_env,

View file

@ -1272,8 +1272,6 @@ void cris_initialize_crisv10_tcg(void)
{ {
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
cc_x = tcg_global_mem_new(cpu_env, cc_x = tcg_global_mem_new(cpu_env,
offsetof(CPUCRISState, cc_x), "cc_x"); offsetof(CPUCRISState, cc_x), "cc_x");
cc_src = tcg_global_mem_new(cpu_env, cc_src = tcg_global_mem_new(cpu_env,

View file

@ -108,8 +108,6 @@ static void hppa_cpu_initfn(Object *obj)
cs->env_ptr = env; cs->env_ptr = env;
cpu_hppa_loaded_fr0(env); cpu_hppa_loaded_fr0(env);
set_snan_bit_is_one(true, &env->fp_status); set_snan_bit_is_one(true, &env->fp_status);
hppa_translate_init();
} }
static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model) static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
@ -136,6 +134,7 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = hppa_cpu_gdb_write_register; cc->gdb_write_register = hppa_cpu_gdb_write_register;
cc->handle_mmu_fault = hppa_cpu_handle_mmu_fault; cc->handle_mmu_fault = hppa_cpu_handle_mmu_fault;
cc->disas_set_info = hppa_cpu_disas_set_info; cc->disas_set_info = hppa_cpu_disas_set_info;
cc->tcg_initialize = hppa_translate_init;
cc->gdb_num_core_regs = 128; cc->gdb_num_core_regs = 128;
} }

View file

@ -3,7 +3,9 @@ DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tl)
DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tl) DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tl)
DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tl) DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tl)
DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tl) DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tl)
DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
DEF_HELPER_FLAGS_1(probe_r, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(probe_r, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(probe_w, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(probe_w, TCG_CALL_NO_RWG_SE, tl, tl)

View file

@ -76,7 +76,8 @@ static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val,
#endif #endif
} }
void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val) static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
bool parallel)
{ {
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
@ -89,7 +90,7 @@ void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
break; break;
case 1: case 1:
/* The 3 byte store must appear atomic. */ /* The 3 byte store must appear atomic. */
if (parallel_cpus) { if (parallel) {
atomic_store_3(env, addr, val, 0x00ffffffu, ra); atomic_store_3(env, addr, val, 0x00ffffffu, ra);
} else { } else {
cpu_stb_data_ra(env, addr, val >> 16, ra); cpu_stb_data_ra(env, addr, val >> 16, ra);
@ -102,14 +103,26 @@ void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
} }
} }
void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val) void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
{
do_stby_b(env, addr, val, false);
}
void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
target_ulong val)
{
do_stby_b(env, addr, val, true);
}
static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
bool parallel)
{ {
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
switch (addr & 3) { switch (addr & 3) {
case 3: case 3:
/* The 3 byte store must appear atomic. */ /* The 3 byte store must appear atomic. */
if (parallel_cpus) { if (parallel) {
atomic_store_3(env, addr - 3, val, 0xffffff00u, ra); atomic_store_3(env, addr - 3, val, 0xffffff00u, ra);
} else { } else {
cpu_stw_data_ra(env, addr - 3, val >> 16, ra); cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
@ -132,6 +145,17 @@ void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
} }
} }
void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
{
do_stby_e(env, addr, val, false);
}
void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
target_ulong val)
{
do_stby_e(env, addr, val, true);
}
target_ulong HELPER(probe_r)(target_ulong addr) target_ulong HELPER(probe_r)(target_ulong addr)
{ {
return page_check_range(addr, 1, PAGE_READ); return page_check_range(addr, 1, PAGE_READ);

View file

@ -83,7 +83,6 @@ typedef struct DisasInsn {
} DisasInsn; } DisasInsn;
/* global register indexes */ /* global register indexes */
static TCGv_env cpu_env;
static TCGv cpu_gr[32]; static TCGv cpu_gr[32];
static TCGv cpu_iaoq_f; static TCGv cpu_iaoq_f;
static TCGv cpu_iaoq_b; static TCGv cpu_iaoq_b;
@ -124,17 +123,8 @@ void hppa_translate_init(void)
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
}; };
static bool done_init = 0;
int i; int i;
if (done_init) {
return;
}
done_init = 1;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
TCGV_UNUSED(cpu_gr[0]); TCGV_UNUSED(cpu_gr[0]);
for (i = 1; i < 32; i++) { for (i = 1; i < 32; i++) {
cpu_gr[i] = tcg_global_mem_new(cpu_env, cpu_gr[i] = tcg_global_mem_new(cpu_env,
@ -475,7 +465,7 @@ static DisasJumpType gen_illegal(DisasContext *ctx)
static bool use_goto_tb(DisasContext *ctx, target_ulong dest) static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{ {
/* Suppress goto_tb in the case of single-steping and IO. */ /* Suppress goto_tb in the case of single-steping and IO. */
if ((ctx->base.tb->cflags & CF_LAST_IO) || ctx->base.singlestep_enabled) { if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
return false; return false;
} }
return true; return true;
@ -2297,9 +2287,17 @@ static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
val = load_gpr(ctx, rt); val = load_gpr(ctx, rt);
if (a) { if (a) {
gen_helper_stby_e(cpu_env, addr, val); if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
gen_helper_stby_e_parallel(cpu_env, addr, val);
} else {
gen_helper_stby_e(cpu_env, addr, val);
}
} else { } else {
gen_helper_stby_b(cpu_env, addr, val); if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
gen_helper_stby_b_parallel(cpu_env, addr, val);
} else {
gen_helper_stby_b(cpu_env, addr, val);
}
} }
if (m) { if (m) {

View file

@ -3719,10 +3719,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
return; return;
} }
if (tcg_enabled()) {
tcg_x86_init();
}
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
qemu_register_reset(x86_cpu_machine_reset_cb, cpu); qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
@ -4216,6 +4212,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
#endif #endif
cc->cpu_exec_enter = x86_cpu_exec_enter; cc->cpu_exec_enter = x86_cpu_exec_enter;
cc->cpu_exec_exit = x86_cpu_exec_exit; cc->cpu_exec_exit = x86_cpu_exec_exit;
cc->tcg_initialize = tcg_x86_init;
dc->user_creatable = true; dc->user_creatable = true;
} }

View file

@ -72,7 +72,6 @@
//#define MACRO_TEST 1 //#define MACRO_TEST 1
/* global register indexes */ /* global register indexes */
static TCGv_env cpu_env;
static TCGv cpu_A0; static TCGv cpu_A0;
static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT; static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
static TCGv_i32 cpu_cc_op; static TCGv_i32 cpu_cc_op;
@ -742,7 +741,7 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
size = s->cc_op - CC_OP_SUBB; size = s->cc_op - CC_OP_SUBB;
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
/* If no temporary was used, be careful not to alias t1 and t0. */ /* If no temporary was used, be careful not to alias t1 and t0. */
t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg; t0 = t1 == cpu_cc_src ? cpu_tmp0 : reg;
tcg_gen_mov_tl(t0, cpu_cc_srcT); tcg_gen_mov_tl(t0, cpu_cc_srcT);
gen_extu(size, t0); gen_extu(size, t0);
goto add_sub; goto add_sub;
@ -951,7 +950,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
break; break;
case JCC_L: case JCC_L:
gen_compute_eflags(s); gen_compute_eflags(s);
if (TCGV_EQUAL(reg, cpu_cc_src)) { if (reg == cpu_cc_src) {
reg = cpu_tmp0; reg = cpu_tmp0;
} }
tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
@ -962,7 +961,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
default: default:
case JCC_LE: case JCC_LE:
gen_compute_eflags(s); gen_compute_eflags(s);
if (TCGV_EQUAL(reg, cpu_cc_src)) { if (reg == cpu_cc_src) {
reg = cpu_tmp0; reg = cpu_tmp0;
} }
tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
@ -1118,7 +1117,7 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
static inline void gen_ins(DisasContext *s, TCGMemOp ot) static inline void gen_ins(DisasContext *s, TCGMemOp ot)
{ {
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_string_movl_A0_EDI(s); gen_string_movl_A0_EDI(s);
@ -1133,14 +1132,14 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot)
gen_op_movl_T0_Dshift(ot); gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI); gen_op_add_reg_T0(s->aflag, R_EDI);
gen_bpt_io(s, cpu_tmp2_i32, ot); gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
} }
static inline void gen_outs(DisasContext *s, TCGMemOp ot) static inline void gen_outs(DisasContext *s, TCGMemOp ot)
{ {
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_string_movl_A0_ESI(s); gen_string_movl_A0_ESI(s);
@ -1153,7 +1152,7 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot)
gen_op_movl_T0_Dshift(ot); gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI); gen_op_add_reg_T0(s->aflag, R_ESI);
gen_bpt_io(s, cpu_tmp2_i32, ot); gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
} }
@ -5307,7 +5306,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
goto illegal_op; goto illegal_op;
gen_lea_modrm(env, s, modrm); gen_lea_modrm(env, s, modrm);
if ((s->prefix & PREFIX_LOCK) && parallel_cpus) { if ((s->prefix & PREFIX_LOCK) && (tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_helper_cmpxchg16b(cpu_env, cpu_A0); gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else { } else {
gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0); gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
@ -5318,7 +5317,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
if (!(s->cpuid_features & CPUID_CX8)) if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op; goto illegal_op;
gen_lea_modrm(env, s, modrm); gen_lea_modrm(env, s, modrm);
if ((s->prefix & PREFIX_LOCK) && parallel_cpus) { if ((s->prefix & PREFIX_LOCK) && (tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_helper_cmpxchg8b(cpu_env, cpu_A0); gen_helper_cmpxchg8b(cpu_env, cpu_A0);
} else { } else {
gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0); gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
@ -6340,7 +6339,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else { } else {
gen_ins(s, ot); gen_ins(s, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base); gen_jmp(s, s->pc - s->cs_base);
} }
} }
@ -6355,7 +6354,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else { } else {
gen_outs(s, ot); gen_outs(s, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base); gen_jmp(s, s->pc - s->cs_base);
} }
} }
@ -6371,14 +6370,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_movi_tl(cpu_T0, val); tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base, gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
tcg_gen_movi_i32(cpu_tmp2_i32, val); tcg_gen_movi_i32(cpu_tmp2_i32, val);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1); gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot); gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_jmp(s, s->pc - s->cs_base); gen_jmp(s, s->pc - s->cs_base);
} }
@ -6392,14 +6391,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
svm_is_rep(prefixes)); svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX); gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
tcg_gen_movi_i32(cpu_tmp2_i32, val); tcg_gen_movi_i32(cpu_tmp2_i32, val);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot); gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_jmp(s, s->pc - s->cs_base); gen_jmp(s, s->pc - s->cs_base);
} }
@ -6410,14 +6409,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base, gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1); gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot); gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_jmp(s, s->pc - s->cs_base); gen_jmp(s, s->pc - s->cs_base);
} }
@ -6430,14 +6429,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
svm_is_rep(prefixes)); svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX); gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot); gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_jmp(s, s->pc - s->cs_base); gen_jmp(s, s->pc - s->cs_base);
} }
@ -7143,11 +7142,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x131: /* rdtsc */ case 0x131: /* rdtsc */
gen_update_cc_op(s); gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base); gen_jmp_im(pc_start - s->cs_base);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_rdtsc(cpu_env); gen_helper_rdtsc(cpu_env);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_jmp(s, s->pc - s->cs_base); gen_jmp(s, s->pc - s->cs_base);
} }
@ -7602,11 +7601,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
} }
gen_update_cc_op(s); gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base); gen_jmp_im(pc_start - s->cs_base);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_rdtscp(cpu_env); gen_helper_rdtscp(cpu_env);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_jmp(s, s->pc - s->cs_base); gen_jmp(s, s->pc - s->cs_base);
} }
@ -7971,24 +7970,24 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_update_cc_op(s); gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base); gen_jmp_im(pc_start - s->cs_base);
if (b & 2) { if (b & 2) {
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_op_mov_v_reg(ot, cpu_T0, rm); gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg), gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T0); cpu_T0);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
gen_jmp_im(s->pc - s->cs_base); gen_jmp_im(s->pc - s->cs_base);
gen_eob(s); gen_eob(s);
} else { } else {
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg)); gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(ot, rm, cpu_T0); gen_op_mov_reg_v(ot, rm, cpu_T0);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
} }
@ -8366,15 +8365,7 @@ void tcg_x86_init(void)
"bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub" "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
}; };
int i; int i;
static bool initialized;
if (initialized) {
return;
}
initialized = true;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
cpu_cc_op = tcg_global_mem_new_i32(cpu_env, cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUX86State, cc_op), "cc_op"); offsetof(CPUX86State, cc_op), "cc_op");
cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst), cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
@ -8458,7 +8449,7 @@ static int i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu,
record/replay modes and there will always be an record/replay modes and there will always be an
additional step for ecx=0 when icount is enabled. additional step for ecx=0 when icount is enabled.
*/ */
dc->repz_opt = !dc->jmp_opt && !(dc->base.tb->cflags & CF_USE_ICOUNT); dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
#if 0 #if 0
/* check addseg logic */ /* check addseg logic */
if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
@ -8524,7 +8515,7 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
the flag and abort the translation to give the irqs a the flag and abort the translation to give the irqs a
chance to happen */ chance to happen */
dc->base.is_jmp = DISAS_TOO_MANY; dc->base.is_jmp = DISAS_TOO_MANY;
} else if ((dc->base.tb->cflags & CF_USE_ICOUNT) } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
&& ((dc->base.pc_next & TARGET_PAGE_MASK) && ((dc->base.pc_next & TARGET_PAGE_MASK)
!= ((dc->base.pc_next + TARGET_MAX_INSN_SIZE - 1) != ((dc->base.pc_next + TARGET_MAX_INSN_SIZE - 1)
& TARGET_PAGE_MASK) & TARGET_PAGE_MASK)

View file

@ -163,16 +163,10 @@ static void lm32_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
LM32CPU *cpu = LM32_CPU(obj); LM32CPU *cpu = LM32_CPU(obj);
CPULM32State *env = &cpu->env; CPULM32State *env = &cpu->env;
static bool tcg_initialized;
cs->env_ptr = env; cs->env_ptr = env;
env->flags = 0; env->flags = 0;
if (tcg_enabled() && !tcg_initialized) {
tcg_initialized = true;
lm32_translate_init();
}
} }
static void lm32_basic_cpu_initfn(Object *obj) static void lm32_basic_cpu_initfn(Object *obj)
@ -286,6 +280,7 @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_stop_before_watchpoint = true; cc->gdb_stop_before_watchpoint = true;
cc->debug_excp_handler = lm32_debug_excp_handler; cc->debug_excp_handler = lm32_debug_excp_handler;
cc->disas_set_info = lm32_cpu_disas_set_info; cc->disas_set_info = lm32_cpu_disas_set_info;
cc->tcg_initialize = lm32_translate_init;
} }
static void lm32_register_cpu_type(const LM32CPUInfo *info) static void lm32_register_cpu_type(const LM32CPUInfo *info)

View file

@ -53,7 +53,6 @@
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */ #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
static TCGv_env cpu_env;
static TCGv cpu_R[32]; static TCGv cpu_R[32];
static TCGv cpu_pc; static TCGv cpu_pc;
static TCGv cpu_ie; static TCGv cpu_ie;
@ -880,24 +879,24 @@ static void dec_wcsr(DisasContext *dc)
break; break;
case CSR_IM: case CSR_IM:
/* mark as an io operation because it could cause an interrupt */ /* mark as an io operation because it could cause an interrupt */
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]); gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4); tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
dc->is_jmp = DISAS_UPDATE; dc->is_jmp = DISAS_UPDATE;
break; break;
case CSR_IP: case CSR_IP:
/* mark as an io operation because it could cause an interrupt */ /* mark as an io operation because it could cause an interrupt */
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]); gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4); tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
dc->is_jmp = DISAS_UPDATE; dc->is_jmp = DISAS_UPDATE;
@ -1078,7 +1077,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -1106,7 +1105,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
/* Pretty disas. */ /* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc); LOG_DIS("%8.8x:\t", dc->pc);
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -1119,7 +1118,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
&& (dc->pc < next_page_start) && (dc->pc < next_page_start)
&& num_insns < max_insns); && num_insns < max_insns);
if (tb->cflags & CF_LAST_IO) { if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end(); gen_io_end();
} }
@ -1208,9 +1207,6 @@ void lm32_translate_init(void)
{ {
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
for (i = 0; i < ARRAY_SIZE(cpu_R); i++) { for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
cpu_R[i] = tcg_global_mem_new(cpu_env, cpu_R[i] = tcg_global_mem_new(cpu_env,
offsetof(CPULM32State, regs[i]), offsetof(CPULM32State, regs[i]),

View file

@ -247,14 +247,8 @@ static void m68k_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
M68kCPU *cpu = M68K_CPU(obj); M68kCPU *cpu = M68K_CPU(obj);
CPUM68KState *env = &cpu->env; CPUM68KState *env = &cpu->env;
static bool inited;
cs->env_ptr = env; cs->env_ptr = env;
if (tcg_enabled() && !inited) {
inited = true;
m68k_tcg_init();
}
} }
static const VMStateDescription vmstate_m68k_cpu = { static const VMStateDescription vmstate_m68k_cpu = {
@ -288,6 +282,7 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug; cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
#endif #endif
cc->disas_set_info = m68k_cpu_disas_set_info; cc->disas_set_info = m68k_cpu_disas_set_info;
cc->tcg_initialize = m68k_tcg_init;
cc->gdb_num_core_regs = 18; cc->gdb_num_core_regs = 18;
cc->gdb_core_xml_file = "cf-core.xml"; cc->gdb_core_xml_file = "cf-core.xml";

View file

@ -11,6 +11,7 @@ DEF_HELPER_2(set_sr, void, env, i32)
DEF_HELPER_3(movec, void, env, i32, i32) DEF_HELPER_3(movec, void, env, i32, i32)
DEF_HELPER_4(cas2w, void, env, i32, i32, i32) DEF_HELPER_4(cas2w, void, env, i32, i32, i32)
DEF_HELPER_4(cas2l, void, env, i32, i32, i32) DEF_HELPER_4(cas2l, void, env, i32, i32, i32)
DEF_HELPER_4(cas2l_parallel, void, env, i32, i32, i32)
#define dh_alias_fp ptr #define dh_alias_fp ptr
#define dh_ctype_fp FPReg * #define dh_ctype_fp FPReg *

View file

@ -361,6 +361,7 @@ void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den)
env->dregs[numr] = quot; env->dregs[numr] = quot;
} }
/* We're executing in a serial context -- no need to be atomic. */
void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2) void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
{ {
uint32_t Dc1 = extract32(regs, 9, 3); uint32_t Dc1 = extract32(regs, 9, 3);
@ -374,17 +375,11 @@ void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
int16_t l1, l2; int16_t l1, l2;
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
if (parallel_cpus) { l1 = cpu_lduw_data_ra(env, a1, ra);
/* Tell the main loop we need to serialize this insn. */ l2 = cpu_lduw_data_ra(env, a2, ra);
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); if (l1 == c1 && l2 == c2) {
} else { cpu_stw_data_ra(env, a1, u1, ra);
/* We're executing in a serial context -- no need to be atomic. */ cpu_stw_data_ra(env, a2, u2, ra);
l1 = cpu_lduw_data_ra(env, a1, ra);
l2 = cpu_lduw_data_ra(env, a2, ra);
if (l1 == c1 && l2 == c2) {
cpu_stw_data_ra(env, a1, u1, ra);
cpu_stw_data_ra(env, a2, u2, ra);
}
} }
if (c1 != l1) { if (c1 != l1) {
@ -399,7 +394,8 @@ void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2); env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2);
} }
void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2) static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
bool parallel)
{ {
uint32_t Dc1 = extract32(regs, 9, 3); uint32_t Dc1 = extract32(regs, 9, 3);
uint32_t Dc2 = extract32(regs, 6, 3); uint32_t Dc2 = extract32(regs, 6, 3);
@ -416,7 +412,7 @@ void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
TCGMemOpIdx oi; TCGMemOpIdx oi;
#endif #endif
if (parallel_cpus) { if (parallel) {
/* We're executing in a parallel context -- must be atomic. */ /* We're executing in a parallel context -- must be atomic. */
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
uint64_t c, u, l; uint64_t c, u, l;
@ -470,6 +466,17 @@ void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
env->dregs[Dc2] = l2; env->dregs[Dc2] = l2;
} }
void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
{
do_cas2l(env, regs, a1, a2, false);
}
void HELPER(cas2l_parallel)(CPUM68KState *env, uint32_t regs, uint32_t a1,
uint32_t a2)
{
do_cas2l(env, regs, a1, a2, true);
}
struct bf_data { struct bf_data {
uint32_t addr; uint32_t addr;
uint32_t bofs; uint32_t bofs;

View file

@ -44,8 +44,6 @@
static TCGv_i32 cpu_halted; static TCGv_i32 cpu_halted;
static TCGv_i32 cpu_exception_index; static TCGv_i32 cpu_exception_index;
static TCGv_env cpu_env;
static char cpu_reg_names[2 * 8 * 3 + 5 * 4]; static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
static TCGv cpu_dregs[8]; static TCGv cpu_dregs[8];
static TCGv cpu_aregs[8]; static TCGv cpu_aregs[8];
@ -58,7 +56,7 @@ static TCGv_i64 cpu_macc[4];
#define QREG_SP get_areg(s, 7) #define QREG_SP get_areg(s, 7)
static TCGv NULL_QREG; static TCGv NULL_QREG;
#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG)) #define IS_NULL_QREG(t) (t == NULL_QREG)
/* Used to distinguish stores from bad addressing modes. */ /* Used to distinguish stores from bad addressing modes. */
static TCGv store_dummy; static TCGv store_dummy;
@ -69,9 +67,6 @@ void m68k_tcg_init(void)
char *p; char *p;
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
#define DEFO32(name, offset) \ #define DEFO32(name, offset) \
QREG_##name = tcg_global_mem_new_i32(cpu_env, \ QREG_##name = tcg_global_mem_new_i32(cpu_env, \
offsetof(CPUM68KState, offset), #name); offsetof(CPUM68KState, offset), #name);
@ -2312,7 +2307,11 @@ DISAS_INSN(cas2w)
(REG(ext1, 6) << 3) | (REG(ext1, 6) << 3) |
(REG(ext2, 0) << 6) | (REG(ext2, 0) << 6) |
(REG(ext1, 0) << 9)); (REG(ext1, 0) << 9));
gen_helper_cas2w(cpu_env, regs, addr1, addr2); if (tb_cflags(s->tb) & CF_PARALLEL) {
gen_helper_exit_atomic(cpu_env);
} else {
gen_helper_cas2w(cpu_env, regs, addr1, addr2);
}
tcg_temp_free(regs); tcg_temp_free(regs);
/* Note that cas2w also assigned to env->cc_op. */ /* Note that cas2w also assigned to env->cc_op. */
@ -2358,7 +2357,11 @@ DISAS_INSN(cas2l)
(REG(ext1, 6) << 3) | (REG(ext1, 6) << 3) |
(REG(ext2, 0) << 6) | (REG(ext2, 0) << 6) |
(REG(ext1, 0) << 9)); (REG(ext1, 0) << 9));
gen_helper_cas2l(cpu_env, regs, addr1, addr2); if (tb_cflags(s->tb) & CF_PARALLEL) {
gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
} else {
gen_helper_cas2l(cpu_env, regs, addr1, addr2);
}
tcg_temp_free(regs); tcg_temp_free(regs);
/* Note that cas2l also assigned to env->cc_op. */ /* Note that cas2l also assigned to env->cc_op. */
@ -5547,7 +5550,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
dc->done_mac = 0; dc->done_mac = 0;
dc->writeback_mask = 0; dc->writeback_mask = 0;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -5573,7 +5576,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
break; break;
} }
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -5585,7 +5588,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
(pc_offset) < (TARGET_PAGE_SIZE - 32) && (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
num_insns < max_insns); num_insns < max_insns);
if (tb->cflags & CF_LAST_IO) if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end(); gen_io_end();
if (unlikely(cs->singlestep_enabled)) { if (unlikely(cs->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */ /* Make sure the pc is updated, and raise a debug exception. */

View file

@ -205,7 +205,6 @@ static void mb_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj); MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj);
CPUMBState *env = &cpu->env; CPUMBState *env = &cpu->env;
static bool tcg_initialized;
cs->env_ptr = env; cs->env_ptr = env;
@ -215,11 +214,6 @@ static void mb_cpu_initfn(Object *obj)
/* Inbound IRQ and FIR lines */ /* Inbound IRQ and FIR lines */
qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2); qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2);
#endif #endif
if (tcg_enabled() && !tcg_initialized) {
tcg_initialized = true;
mb_tcg_init();
}
} }
static const VMStateDescription vmstate_mb_cpu = { static const VMStateDescription vmstate_mb_cpu = {
@ -289,6 +283,7 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_num_core_regs = 32 + 5; cc->gdb_num_core_regs = 32 + 5;
cc->disas_set_info = mb_disas_set_info; cc->disas_set_info = mb_disas_set_info;
cc->tcg_initialize = mb_tcg_init;
} }
static const TypeInfo mb_cpu_type_info = { static const TypeInfo mb_cpu_type_info = {

View file

@ -53,7 +53,6 @@
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */ #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
static TCGv env_debug; static TCGv env_debug;
static TCGv_env cpu_env;
static TCGv cpu_R[32]; static TCGv cpu_R[32];
static TCGv cpu_SR[18]; static TCGv cpu_SR[18];
static TCGv env_imm; static TCGv env_imm;
@ -1666,7 +1665,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -1701,7 +1700,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
/* Pretty disas. */ /* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc); LOG_DIS("%8.8x:\t", dc->pc);
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -1763,7 +1762,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
npc = dc->jmp_pc; npc = dc->jmp_pc;
} }
if (tb->cflags & CF_LAST_IO) if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end(); gen_io_end();
/* Force an update if the per-tb cpu state has changed. */ /* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT if (dc->is_jmp == DISAS_NEXT
@ -1855,9 +1854,6 @@ void mb_tcg_init(void)
{ {
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
env_debug = tcg_global_mem_new(cpu_env, env_debug = tcg_global_mem_new(cpu_env,
offsetof(CPUMBState, debug), offsetof(CPUMBState, debug),
"debug0"); "debug0");

View file

@ -150,10 +150,6 @@ static void mips_cpu_initfn(Object *obj)
cs->env_ptr = env; cs->env_ptr = env;
env->cpu_model = mcc->cpu_def; env->cpu_model = mcc->cpu_def;
if (tcg_enabled()) {
mips_tcg_init();
}
} }
static char *mips_cpu_type_name(const char *cpu_model) static char *mips_cpu_type_name(const char *cpu_model)
@ -202,6 +198,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
cc->vmsd = &vmstate_mips_cpu; cc->vmsd = &vmstate_mips_cpu;
#endif #endif
cc->disas_set_info = mips_cpu_disas_set_info; cc->disas_set_info = mips_cpu_disas_set_info;
cc->tcg_initialize = mips_tcg_init;
cc->gdb_num_core_regs = 73; cc->gdb_num_core_regs = 73;
cc->gdb_stop_before_watchpoint = true; cc->gdb_stop_before_watchpoint = true;

View file

@ -1376,7 +1376,6 @@ enum {
}; };
/* global register indices */ /* global register indices */
static TCGv_env cpu_env;
static TCGv cpu_gpr[32], cpu_PC; static TCGv cpu_gpr[32], cpu_PC;
static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC]; static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC];
static TCGv cpu_dspctrl, btarget, bcond; static TCGv cpu_dspctrl, btarget, bcond;
@ -5327,11 +5326,11 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
switch (sel) { switch (sel) {
case 0: case 0:
/* Mark as an IO operation because we read the time. */ /* Mark as an IO operation because we read the time. */
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_mfc0_count(arg, cpu_env); gen_helper_mfc0_count(arg, cpu_env);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
/* Break the TB to be able to take timer interrupts immediately /* Break the TB to be able to take timer interrupts immediately
@ -5734,7 +5733,7 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
if (sel != 0) if (sel != 0)
check_insn(ctx, ISA_MIPS32); check_insn(ctx, ISA_MIPS32);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
@ -6401,7 +6400,7 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
trace_mips_translate_c0("mtc0", rn, reg, sel); trace_mips_translate_c0("mtc0", rn, reg, sel);
/* For simplicity assume that all writes can cause interrupts. */ /* For simplicity assume that all writes can cause interrupts. */
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
/* BS_STOP isn't sufficient, we need to ensure we break out of /* BS_STOP isn't sufficient, we need to ensure we break out of
* translated code to check for pending interrupts. */ * translated code to check for pending interrupts. */
@ -6679,11 +6678,11 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
switch (sel) { switch (sel) {
case 0: case 0:
/* Mark as an IO operation because we read the time. */ /* Mark as an IO operation because we read the time. */
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_mfc0_count(arg, cpu_env); gen_helper_mfc0_count(arg, cpu_env);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
/* Break the TB to be able to take timer interrupts immediately /* Break the TB to be able to take timer interrupts immediately
@ -7072,7 +7071,7 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
if (sel != 0) if (sel != 0)
check_insn(ctx, ISA_MIPS64); check_insn(ctx, ISA_MIPS64);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
@ -7727,7 +7726,7 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
trace_mips_translate_c0("dmtc0", rn, reg, sel); trace_mips_translate_c0("dmtc0", rn, reg, sel);
/* For simplicity assume that all writes can cause interrupts. */ /* For simplicity assume that all writes can cause interrupts. */
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
/* BS_STOP isn't sufficient, we need to ensure we break out of /* BS_STOP isn't sufficient, we need to ensure we break out of
* translated code to check for pending interrupts. */ * translated code to check for pending interrupts. */
@ -10756,11 +10755,11 @@ static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
gen_store_gpr(t0, rt); gen_store_gpr(t0, rt);
break; break;
case 2: case 2:
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_rdhwr_cc(t0, cpu_env); gen_helper_rdhwr_cc(t0, cpu_env);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
gen_store_gpr(t0, rt); gen_store_gpr(t0, rt);
@ -20248,7 +20247,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
ctx.default_tcg_memop_mask = (ctx.insn_flags & ISA_MIPS32R6) ? ctx.default_tcg_memop_mask = (ctx.insn_flags & ISA_MIPS32R6) ?
MO_UNALN : MO_ALIGN; MO_UNALN : MO_ALIGN;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -20274,7 +20273,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
goto done_generating; goto done_generating;
} }
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -20335,7 +20334,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
if (singlestep) if (singlestep)
break; break;
} }
if (tb->cflags & CF_LAST_IO) { if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end(); gen_io_end();
} }
if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) { if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) {
@ -20453,14 +20452,6 @@ void mips_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
void mips_tcg_init(void) void mips_tcg_init(void)
{ {
int i; int i;
static int inited;
/* Initialize various static tables. */
if (inited)
return;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
TCGV_UNUSED(cpu_gpr[0]); TCGV_UNUSED(cpu_gpr[0]);
for (i = 1; i < 32; i++) for (i = 1; i < 32; i++)
@ -20506,8 +20497,6 @@ void mips_tcg_init(void)
fpu_fcr31 = tcg_global_mem_new_i32(cpu_env, fpu_fcr31 = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMIPSState, active_fpu.fcr31), offsetof(CPUMIPSState, active_fpu.fcr31),
"fcr31"); "fcr31");
inited = 1;
} }
#include "translate_init.c" #include "translate_init.c"

View file

@ -77,14 +77,8 @@ static void moxie_cpu_initfn(Object *obj)
{ {
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
MoxieCPU *cpu = MOXIE_CPU(obj); MoxieCPU *cpu = MOXIE_CPU(obj);
static int inited;
cs->env_ptr = &cpu->env; cs->env_ptr = &cpu->env;
if (tcg_enabled() && !inited) {
inited = 1;
moxie_translate_init();
}
} }
static ObjectClass *moxie_cpu_class_by_name(const char *cpu_model) static ObjectClass *moxie_cpu_class_by_name(const char *cpu_model)
@ -122,6 +116,7 @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
cc->vmsd = &vmstate_moxie_cpu; cc->vmsd = &vmstate_moxie_cpu;
#endif #endif
cc->disas_set_info = moxie_cpu_disas_set_info; cc->disas_set_info = moxie_cpu_disas_set_info;
cc->tcg_initialize = moxie_translate_init;
} }
static void moxielite_initfn(Object *obj) static void moxielite_initfn(Object *obj)

View file

@ -56,7 +56,6 @@ enum {
static TCGv cpu_pc; static TCGv cpu_pc;
static TCGv cpu_gregs[16]; static TCGv cpu_gregs[16];
static TCGv_env cpu_env;
static TCGv cc_a, cc_b; static TCGv cc_a, cc_b;
#include "exec/gen-icount.h" #include "exec/gen-icount.h"
@ -94,7 +93,6 @@ void moxie_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
void moxie_translate_init(void) void moxie_translate_init(void)
{ {
int i; int i;
static int done_init;
static const char * const gregnames[16] = { static const char * const gregnames[16] = {
"$fp", "$sp", "$r0", "$r1", "$fp", "$sp", "$r0", "$r1",
"$r2", "$r3", "$r4", "$r5", "$r2", "$r3", "$r4", "$r5",
@ -102,11 +100,6 @@ void moxie_translate_init(void)
"$r10", "$r11", "$r12", "$r13" "$r10", "$r11", "$r12", "$r13"
}; };
if (done_init) {
return;
}
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
cpu_pc = tcg_global_mem_new_i32(cpu_env, cpu_pc = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMoxieState, pc), "$pc"); offsetof(CPUMoxieState, pc), "$pc");
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
@ -118,8 +111,6 @@ void moxie_translate_init(void)
offsetof(CPUMoxieState, cc_a), "cc_a"); offsetof(CPUMoxieState, cc_a), "cc_a");
cc_b = tcg_global_mem_new_i32(cpu_env, cc_b = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMoxieState, cc_b), "cc_b"); offsetof(CPUMoxieState, cc_b), "cc_b");
done_init = 1;
} }
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
@ -838,7 +829,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
ctx.singlestep_enabled = 0; ctx.singlestep_enabled = 0;
ctx.bstate = BS_NONE; ctx.bstate = BS_NONE;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }

View file

@ -69,18 +69,12 @@ static void nios2_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
Nios2CPU *cpu = NIOS2_CPU(obj); Nios2CPU *cpu = NIOS2_CPU(obj);
CPUNios2State *env = &cpu->env; CPUNios2State *env = &cpu->env;
static bool tcg_initialized;
cs->env_ptr = env; cs->env_ptr = env;
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
mmu_init(env); mmu_init(env);
#endif #endif
if (tcg_enabled() && !tcg_initialized) {
tcg_initialized = true;
nios2_tcg_init();
}
} }
static ObjectClass *nios2_cpu_class_by_name(const char *cpu_model) static ObjectClass *nios2_cpu_class_by_name(const char *cpu_model)
@ -215,6 +209,7 @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = nios2_cpu_gdb_read_register; cc->gdb_read_register = nios2_cpu_gdb_read_register;
cc->gdb_write_register = nios2_cpu_gdb_write_register; cc->gdb_write_register = nios2_cpu_gdb_write_register;
cc->gdb_num_core_regs = 49; cc->gdb_num_core_regs = 49;
cc->tcg_initialize = nios2_tcg_init;
} }
static const TypeInfo nios2_cpu_type_info = { static const TypeInfo nios2_cpu_type_info = {

View file

@ -789,7 +789,6 @@ static const char * const regnames[] = {
"rpc" "rpc"
}; };
static TCGv_ptr cpu_env;
static TCGv cpu_R[NUM_CORE_REGS]; static TCGv cpu_R[NUM_CORE_REGS];
#include "exec/gen-icount.h" #include "exec/gen-icount.h"
@ -827,7 +826,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
max_insns = 1; max_insns = 1;
} else { } else {
int page_insns = (TARGET_PAGE_SIZE - (tb->pc & TARGET_PAGE_MASK)) / 4; int page_insns = (TARGET_PAGE_SIZE - (tb->pc & TARGET_PAGE_MASK)) / 4;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -854,7 +853,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
break; break;
} }
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -871,7 +870,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
!tcg_op_buf_full() && !tcg_op_buf_full() &&
num_insns < max_insns); num_insns < max_insns);
if (tb->cflags & CF_LAST_IO) { if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end(); gen_io_end();
} }
@ -947,9 +946,6 @@ void nios2_tcg_init(void)
{ {
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
for (i = 0; i < NUM_CORE_REGS; i++) { for (i = 0; i < NUM_CORE_REGS; i++) {
cpu_R[i] = tcg_global_mem_new(cpu_env, cpu_R[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUNios2State, regs[i]), offsetof(CPUNios2State, regs[i]),

View file

@ -86,18 +86,12 @@ static void openrisc_cpu_initfn(Object *obj)
{ {
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
OpenRISCCPU *cpu = OPENRISC_CPU(obj); OpenRISCCPU *cpu = OPENRISC_CPU(obj);
static int inited;
cs->env_ptr = &cpu->env; cs->env_ptr = &cpu->env;
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
cpu_openrisc_mmu_init(cpu); cpu_openrisc_mmu_init(cpu);
#endif #endif
if (tcg_enabled() && !inited) {
inited = 1;
openrisc_translate_init();
}
} }
/* CPU models */ /* CPU models */
@ -169,6 +163,7 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
dc->vmsd = &vmstate_openrisc_cpu; dc->vmsd = &vmstate_openrisc_cpu;
#endif #endif
cc->gdb_num_core_regs = 32 + 3; cc->gdb_num_core_regs = 32 + 3;
cc->tcg_initialize = openrisc_translate_init;
} }
static void cpu_register(const OpenRISCCPUInfo *info) static void cpu_register(const OpenRISCCPUInfo *info)

View file

@ -53,7 +53,6 @@ typedef struct DisasContext {
bool singlestep_enabled; bool singlestep_enabled;
} DisasContext; } DisasContext;
static TCGv_env cpu_env;
static TCGv cpu_sr; static TCGv cpu_sr;
static TCGv cpu_R[32]; static TCGv cpu_R[32];
static TCGv cpu_R0; static TCGv cpu_R0;
@ -80,8 +79,6 @@ void openrisc_translate_init(void)
}; };
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
cpu_sr = tcg_global_mem_new(cpu_env, cpu_sr = tcg_global_mem_new(cpu_env,
offsetof(CPUOpenRISCState, sr), "sr"); offsetof(CPUOpenRISCState, sr), "sr");
cpu_dflag = tcg_global_mem_new_i32(cpu_env, cpu_dflag = tcg_global_mem_new_i32(cpu_env,
@ -1546,7 +1543,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
@ -1589,7 +1586,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
break; break;
} }
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
disas_openrisc_insn(dc, cpu); disas_openrisc_insn(dc, cpu);
@ -1612,7 +1609,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
&& (dc->pc < next_page_start) && (dc->pc < next_page_start)
&& num_insns < max_insns); && num_insns < max_insns);
if (tb->cflags & CF_LAST_IO) { if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end(); gen_io_end();
} }

View file

@ -51,7 +51,6 @@
/* Code translation helpers */ /* Code translation helpers */
/* global register indexes */ /* global register indexes */
static TCGv_env cpu_env;
static char cpu_reg_names[10*3 + 22*4 /* GPR */ static char cpu_reg_names[10*3 + 22*4 /* GPR */
+ 10*4 + 22*5 /* SPE GPRh */ + 10*4 + 22*5 /* SPE GPRh */
+ 10*4 + 22*5 /* FPR */ + 10*4 + 22*5 /* FPR */
@ -84,13 +83,6 @@ void ppc_translate_init(void)
int i; int i;
char* p; char* p;
size_t cpu_reg_names_size; size_t cpu_reg_names_size;
static int done_init = 0;
if (done_init)
return;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
p = cpu_reg_names; p = cpu_reg_names;
cpu_reg_names_size = sizeof(cpu_reg_names); cpu_reg_names_size = sizeof(cpu_reg_names);
@ -191,8 +183,6 @@ void ppc_translate_init(void)
cpu_access_type = tcg_global_mem_new_i32(cpu_env, cpu_access_type = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUPPCState, access_type), "access_type"); offsetof(CPUPPCState, access_type), "access_type");
done_init = 1;
} }
/* internal defines */ /* internal defines */
@ -902,7 +892,7 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
gen_set_Rc0(ctx, t0); gen_set_Rc0(ctx, t0);
} }
if (!TCGV_EQUAL(t0, ret)) { if (t0 != ret) {
tcg_gen_mov_tl(ret, t0); tcg_gen_mov_tl(ret, t0);
tcg_temp_free(t0); tcg_temp_free(t0);
} }
@ -1438,7 +1428,7 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
gen_set_Rc0(ctx, t0); gen_set_Rc0(ctx, t0);
} }
if (!TCGV_EQUAL(t0, ret)) { if (t0 != ret) {
tcg_gen_mov_tl(ret, t0); tcg_gen_mov_tl(ret, t0);
tcg_temp_free(t0); tcg_temp_free(t0);
} }
@ -7279,7 +7269,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
msr_se = 1; msr_se = 1;
#endif #endif
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -7307,7 +7297,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
LOG_DISAS("----------------\n"); LOG_DISAS("----------------\n");
LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n", LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
ctx.nip, ctx.mem_idx, (int)msr_ir); ctx.nip, ctx.mem_idx, (int)msr_ir);
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO))
gen_io_start(); gen_io_start();
if (unlikely(need_byteswap(&ctx))) { if (unlikely(need_byteswap(&ctx))) {
ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip)); ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip));
@ -7388,7 +7378,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
exit(1); exit(1);
} }
} }
if (tb->cflags & CF_LAST_IO) if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end(); gen_io_end();
if (ctx.exception == POWERPC_EXCP_NONE) { if (ctx.exception == POWERPC_EXCP_NONE) {
gen_goto_tb(&ctx, 0, ctx.nip); gen_goto_tb(&ctx, 0, ctx.nip);

View file

@ -176,11 +176,11 @@ static void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
static void spr_read_decr(DisasContext *ctx, int gprn, int sprn) static void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
{ {
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_load_decr(cpu_gpr[gprn], cpu_env); gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_stop_exception(ctx); gen_stop_exception(ctx);
} }
@ -188,11 +188,11 @@ static void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
static void spr_write_decr(DisasContext *ctx, int sprn, int gprn) static void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
{ {
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_store_decr(cpu_env, cpu_gpr[gprn]); gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_stop_exception(ctx); gen_stop_exception(ctx);
} }
@ -203,11 +203,11 @@ static void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
/* Time base */ /* Time base */
static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn) static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
{ {
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_load_tbl(cpu_gpr[gprn], cpu_env); gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_stop_exception(ctx); gen_stop_exception(ctx);
} }
@ -215,11 +215,11 @@ static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn) static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
{ {
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_load_tbu(cpu_gpr[gprn], cpu_env); gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_stop_exception(ctx); gen_stop_exception(ctx);
} }
@ -240,11 +240,11 @@ static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
{ {
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]); gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_stop_exception(ctx); gen_stop_exception(ctx);
} }
@ -252,11 +252,11 @@ static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
{ {
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]); gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_stop_exception(ctx); gen_stop_exception(ctx);
} }
@ -284,11 +284,11 @@ static void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
/* HDECR */ /* HDECR */
static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
{ {
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env); gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_stop_exception(ctx); gen_stop_exception(ctx);
} }
@ -296,11 +296,11 @@ static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
{ {
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]); gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
if (ctx->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_stop_exception(ctx); gen_stop_exception(ctx);
} }
@ -10499,10 +10499,6 @@ static void ppc_cpu_initfn(Object *obj)
env->sps = (env->mmu_model & POWERPC_MMU_64K) ? defsps_64k : defsps_4k; env->sps = (env->mmu_model & POWERPC_MMU_64K) ? defsps_64k : defsps_4k;
} }
#endif /* defined(TARGET_PPC64) */ #endif /* defined(TARGET_PPC64) */
if (tcg_enabled()) {
ppc_translate_init();
}
} }
static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr) static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr)
@ -10582,6 +10578,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
cc->virtio_is_big_endian = ppc_cpu_is_big_endian; cc->virtio_is_big_endian = ppc_cpu_is_big_endian;
#endif #endif
cc->tcg_initialize = ppc_translate_init;
dc->fw_name = "PowerPC,UNKNOWN"; dc->fw_name = "PowerPC,UNKNOWN";
} }

View file

@ -241,7 +241,6 @@ static void s390_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
S390CPU *cpu = S390_CPU(obj); S390CPU *cpu = S390_CPU(obj);
CPUS390XState *env = &cpu->env; CPUS390XState *env = &cpu->env;
static bool inited;
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
struct tm tm; struct tm tm;
#endif #endif
@ -259,11 +258,6 @@ static void s390_cpu_initfn(Object *obj)
env->cpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); env->cpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu);
s390_cpu_set_state(CPU_STATE_STOPPED, cpu); s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
#endif #endif
if (tcg_enabled() && !inited) {
inited = true;
s390x_translate_init();
}
} }
static void s390_cpu_finalize(Object *obj) static void s390_cpu_finalize(Object *obj)
@ -503,6 +497,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
#endif #endif
#endif #endif
cc->disas_set_info = s390_cpu_disas_set_info; cc->disas_set_info = s390_cpu_disas_set_info;
cc->tcg_initialize = s390x_translate_init;
cc->gdb_num_core_regs = S390_NUM_CORE_REGS; cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
cc->gdb_core_xml_file = "s390x-core64.xml"; cc->gdb_core_xml_file = "s390x-core64.xml";

View file

@ -34,7 +34,9 @@ DEF_HELPER_3(celgb, i64, env, i64, i32)
DEF_HELPER_3(cdlgb, i64, env, i64, i32) DEF_HELPER_3(cdlgb, i64, env, i64, i32)
DEF_HELPER_3(cxlgb, i64, env, i64, i32) DEF_HELPER_3(cxlgb, i64, env, i64, i32)
DEF_HELPER_4(cdsg, void, env, i64, i32, i32) DEF_HELPER_4(cdsg, void, env, i64, i32, i32)
DEF_HELPER_4(cdsg_parallel, void, env, i64, i32, i32)
DEF_HELPER_4(csst, i32, env, i32, i64, i64) DEF_HELPER_4(csst, i32, env, i32, i64, i64)
DEF_HELPER_4(csst_parallel, i32, env, i32, i64, i64)
DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
@ -106,7 +108,9 @@ DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_2(stfle, i32, env, i64) DEF_HELPER_2(stfle, i32, env, i64)
DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_2(lpq_parallel, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64) DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(stpq_parallel, TCG_CALL_NO_WG, void, env, i64, i64, i64)
DEF_HELPER_4(mvcos, i32, env, i64, i64, i64) DEF_HELPER_4(mvcos, i32, env, i64, i64, i64)
DEF_HELPER_4(cu12, i32, env, i32, i32, i32) DEF_HELPER_4(cu12, i32, env, i32, i32, i32)
DEF_HELPER_4(cu14, i32, env, i32, i32, i32) DEF_HELPER_4(cu14, i32, env, i32, i32, i32)

View file

@ -1361,8 +1361,8 @@ uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
return cc; return cc;
} }
void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, static void do_cdsg(CPUS390XState *env, uint64_t addr,
uint32_t r1, uint32_t r3) uint32_t r1, uint32_t r3, bool parallel)
{ {
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
@ -1370,7 +1370,7 @@ void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
Int128 oldv; Int128 oldv;
bool fail; bool fail;
if (parallel_cpus) { if (parallel) {
#ifndef CONFIG_ATOMIC128 #ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else #else
@ -1402,7 +1402,20 @@ void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
env->regs[r1 + 1] = int128_getlo(oldv); env->regs[r1 + 1] = int128_getlo(oldv);
} }
uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2) void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
uint32_t r1, uint32_t r3)
{
do_cdsg(env, addr, r1, r3, false);
}
void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
uint32_t r1, uint32_t r3)
{
do_cdsg(env, addr, r1, r3, true);
}
static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
uint64_t a2, bool parallel)
{ {
#if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128) #if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128)
uint32_t mem_idx = cpu_mmu_index(env, false); uint32_t mem_idx = cpu_mmu_index(env, false);
@ -1438,7 +1451,7 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
the complete operation is not. Therefore we do not need to assert serial the complete operation is not. Therefore we do not need to assert serial
context in order to implement this. That said, restart early if we can't context in order to implement this. That said, restart early if we can't
support either operation that is supposed to be atomic. */ support either operation that is supposed to be atomic. */
if (parallel_cpus) { if (parallel) {
int mask = 0; int mask = 0;
#if !defined(CONFIG_ATOMIC64) #if !defined(CONFIG_ATOMIC64)
mask = -8; mask = -8;
@ -1462,7 +1475,7 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
uint32_t cv = env->regs[r3]; uint32_t cv = env->regs[r3];
uint32_t ov; uint32_t ov;
if (parallel_cpus) { if (parallel) {
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
uint32_t *haddr = g2h(a1); uint32_t *haddr = g2h(a1);
ov = atomic_cmpxchg__nocheck(haddr, cv, nv); ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
@ -1485,7 +1498,7 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
uint64_t cv = env->regs[r3]; uint64_t cv = env->regs[r3];
uint64_t ov; uint64_t ov;
if (parallel_cpus) { if (parallel) {
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
# ifdef CONFIG_USER_ONLY # ifdef CONFIG_USER_ONLY
uint64_t *haddr = g2h(a1); uint64_t *haddr = g2h(a1);
@ -1495,7 +1508,7 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
# endif # endif
#else #else
/* Note that we asserted !parallel_cpus above. */ /* Note that we asserted !parallel above. */
g_assert_not_reached(); g_assert_not_reached();
#endif #endif
} else { } else {
@ -1515,13 +1528,13 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
Int128 ov; Int128 ov;
if (parallel_cpus) { if (parallel) {
#ifdef CONFIG_ATOMIC128 #ifdef CONFIG_ATOMIC128
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
cc = !int128_eq(ov, cv); cc = !int128_eq(ov, cv);
#else #else
/* Note that we asserted !parallel_cpus above. */ /* Note that we asserted !parallel above. */
g_assert_not_reached(); g_assert_not_reached();
#endif #endif
} else { } else {
@ -1565,13 +1578,13 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
cpu_stq_data_ra(env, a2, svh, ra); cpu_stq_data_ra(env, a2, svh, ra);
break; break;
case 4: case 4:
if (parallel_cpus) { if (parallel) {
#ifdef CONFIG_ATOMIC128 #ifdef CONFIG_ATOMIC128
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 sv = int128_make128(svl, svh); Int128 sv = int128_make128(svl, svh);
helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
#else #else
/* Note that we asserted !parallel_cpus above. */ /* Note that we asserted !parallel above. */
g_assert_not_reached(); g_assert_not_reached();
#endif #endif
} else { } else {
@ -1592,6 +1605,17 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
g_assert_not_reached(); g_assert_not_reached();
} }
uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
{
return do_csst(env, r3, a1, a2, false);
}
uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1,
uint64_t a2)
{
return do_csst(env, r3, a1, a2, true);
}
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{ {
@ -2011,12 +2035,12 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
#endif #endif
/* load pair from quadword */ /* load pair from quadword */
uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) static uint64_t do_lpq(CPUS390XState *env, uint64_t addr, bool parallel)
{ {
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
uint64_t hi, lo; uint64_t hi, lo;
if (parallel_cpus) { if (parallel) {
#ifndef CONFIG_ATOMIC128 #ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else #else
@ -2037,13 +2061,23 @@ uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
return hi; return hi;
} }
uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
{
return do_lpq(env, addr, false);
}
uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
{
return do_lpq(env, addr, true);
}
/* store pair to quadword */ /* store pair to quadword */
void HELPER(stpq)(CPUS390XState *env, uint64_t addr, static void do_stpq(CPUS390XState *env, uint64_t addr,
uint64_t low, uint64_t high) uint64_t low, uint64_t high, bool parallel)
{ {
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
if (parallel_cpus) { if (parallel) {
#ifndef CONFIG_ATOMIC128 #ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else #else
@ -2061,6 +2095,18 @@ void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
} }
} }
void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
uint64_t low, uint64_t high)
{
do_stpq(env, addr, low, high, false);
}
void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
uint64_t low, uint64_t high)
{
do_stpq(env, addr, low, high, true);
}
/* Execute instruction. This instruction executes an insn modified with /* Execute instruction. This instruction executes an insn modified with
the contents of r1. It does not change the executed instruction in memory; the contents of r1. It does not change the executed instruction in memory;
it does not change the program counter. it does not change the program counter.

View file

@ -37,10 +37,6 @@
#include "qemu/log.h" #include "qemu/log.h"
#include "qemu/host-utils.h" #include "qemu/host-utils.h"
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
/* global register indexes */
static TCGv_env cpu_env;
#include "exec/gen-icount.h" #include "exec/gen-icount.h"
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "exec/helper-gen.h" #include "exec/helper-gen.h"
@ -112,8 +108,6 @@ void s390x_translate_init(void)
{ {
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
psw_addr = tcg_global_mem_new_i64(cpu_env, psw_addr = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUS390XState, psw.addr), offsetof(CPUS390XState, psw.addr),
"psw_addr"); "psw_addr");
@ -554,7 +548,7 @@ static void gen_op_calc_cc(DisasContext *s)
static bool use_exit_tb(DisasContext *s) static bool use_exit_tb(DisasContext *s)
{ {
return (s->singlestep_enabled || return (s->singlestep_enabled ||
(s->tb->cflags & CF_LAST_IO) || (tb_cflags(s->tb) & CF_LAST_IO) ||
(s->tb->flags & FLAG_MASK_PER)); (s->tb->flags & FLAG_MASK_PER));
} }
@ -1966,7 +1960,11 @@ static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
addr = get_address(s, 0, b2, d2); addr = get_address(s, 0, b2, d2);
t_r1 = tcg_const_i32(r1); t_r1 = tcg_const_i32(r1);
t_r3 = tcg_const_i32(r3); t_r3 = tcg_const_i32(r3);
gen_helper_cdsg(cpu_env, addr, t_r1, t_r3); if (tb_cflags(s->tb) & CF_PARALLEL) {
gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
} else {
gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
}
tcg_temp_free_i64(addr); tcg_temp_free_i64(addr);
tcg_temp_free_i32(t_r1); tcg_temp_free_i32(t_r1);
tcg_temp_free_i32(t_r3); tcg_temp_free_i32(t_r3);
@ -1980,7 +1978,11 @@ static ExitStatus op_csst(DisasContext *s, DisasOps *o)
int r3 = get_field(s->fields, r3); int r3 = get_field(s->fields, r3);
TCGv_i32 t_r3 = tcg_const_i32(r3); TCGv_i32 t_r3 = tcg_const_i32(r3);
gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2); if (tb_cflags(s->tb) & CF_PARALLEL) {
gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
} else {
gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
}
tcg_temp_free_i32(t_r3); tcg_temp_free_i32(t_r3);
set_cc_static(s); set_cc_static(s);
@ -2939,7 +2941,7 @@ static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
TCGMemOp mop = s->insn->data; TCGMemOp mop = s->insn->data;
/* In a parallel context, stop the world and single step. */ /* In a parallel context, stop the world and single step. */
if (parallel_cpus) { if (tb_cflags(s->tb) & CF_PARALLEL) {
potential_page_fault(s); potential_page_fault(s);
gen_exception(EXCP_ATOMIC); gen_exception(EXCP_ATOMIC);
return EXIT_NORETURN; return EXIT_NORETURN;
@ -2960,7 +2962,11 @@ static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
static ExitStatus op_lpq(DisasContext *s, DisasOps *o) static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
{ {
gen_helper_lpq(o->out, cpu_env, o->in2); if (tb_cflags(s->tb) & CF_PARALLEL) {
gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
} else {
gen_helper_lpq(o->out, cpu_env, o->in2);
}
return_low128(o->out2); return_low128(o->out2);
return NO_EXIT; return NO_EXIT;
} }
@ -4281,7 +4287,11 @@ static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
static ExitStatus op_stpq(DisasContext *s, DisasOps *o) static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
{ {
gen_helper_stpq(cpu_env, o->in2, o->out2, o->out); if (tb_cflags(s->tb) & CF_PARALLEL) {
gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
} else {
gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
}
return NO_EXIT; return NO_EXIT;
} }
@ -5883,7 +5893,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -5908,7 +5918,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
break; break;
} }
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -5927,7 +5937,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
} }
} while (status == NO_EXIT); } while (status == NO_EXIT);
if (tb->cflags & CF_LAST_IO) { if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end(); gen_io_end();
} }

View file

@ -258,10 +258,6 @@ static void superh_cpu_initfn(Object *obj)
cs->env_ptr = env; cs->env_ptr = env;
env->movcal_backup_tail = &(env->movcal_backup); env->movcal_backup_tail = &(env->movcal_backup);
if (tcg_enabled()) {
sh4_translate_init();
}
} }
static const VMStateDescription vmstate_sh_cpu = { static const VMStateDescription vmstate_sh_cpu = {
@ -297,6 +293,7 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug; cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
#endif #endif
cc->disas_set_info = superh_cpu_disas_set_info; cc->disas_set_info = superh_cpu_disas_set_info;
cc->tcg_initialize = sh4_translate_init;
cc->gdb_num_core_regs = 59; cc->gdb_num_core_regs = 59;

View file

@ -65,7 +65,6 @@ enum {
}; };
/* global register indexes */ /* global register indexes */
static TCGv_env cpu_env;
static TCGv cpu_gregs[32]; static TCGv cpu_gregs[32];
static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t; static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr; static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
@ -81,7 +80,6 @@ static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
void sh4_translate_init(void) void sh4_translate_init(void)
{ {
int i; int i;
static int done_init = 0;
static const char * const gregnames[24] = { static const char * const gregnames[24] = {
"R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0", "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
"R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0", "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
@ -100,13 +98,6 @@ void sh4_translate_init(void)
"FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1", "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
}; };
if (done_init) {
return;
}
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
for (i = 0; i < 24; i++) { for (i = 0; i < 24; i++) {
cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env, cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUSH4State, gregs[i]), offsetof(CPUSH4State, gregs[i]),
@ -163,8 +154,6 @@ void sh4_translate_init(void)
cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env, cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUSH4State, fregs[i]), offsetof(CPUSH4State, fregs[i]),
fregnames[i]); fregnames[i]);
done_init = 1;
} }
void superh_cpu_dump_state(CPUState *cs, FILE *f, void superh_cpu_dump_state(CPUState *cs, FILE *f,
@ -528,7 +517,7 @@ static void _decode_opc(DisasContext * ctx)
/* Detect the start of a gUSA region. If so, update envflags /* Detect the start of a gUSA region. If so, update envflags
and end the TB. This will allow us to see the end of the and end the TB. This will allow us to see the end of the
region (stored in R0) in the next TB. */ region (stored in R0) in the next TB. */
if (B11_8 == 15 && B7_0s < 0 && parallel_cpus) { if (B11_8 == 15 && B7_0s < 0 && (tb_cflags(ctx->tb) & CF_PARALLEL)) {
ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s); ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
ctx->bstate = BS_STOP; ctx->bstate = BS_STOP;
} }
@ -2255,7 +2244,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
(ctx.tbflags & (1 << SR_RB))) * 0x10; (ctx.tbflags & (1 << SR_RB))) * 0x10;
ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0; ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -2299,7 +2288,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
break; break;
} }
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -2307,7 +2296,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
decode_opc(&ctx); decode_opc(&ctx);
ctx.pc += 2; ctx.pc += 2;
} }
if (tb->cflags & CF_LAST_IO) { if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end(); gen_io_end();
} }

View file

@ -784,10 +784,6 @@ static void sparc_cpu_initfn(Object *obj)
cs->env_ptr = env; cs->env_ptr = env;
if (tcg_enabled()) {
gen_intermediate_code_init(env);
}
if (scc->cpu_def) { if (scc->cpu_def) {
env->def = *scc->cpu_def; env->def = *scc->cpu_def;
} }
@ -891,6 +887,7 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
cc->vmsd = &vmstate_sparc_cpu; cc->vmsd = &vmstate_sparc_cpu;
#endif #endif
cc->disas_set_info = cpu_sparc_disas_set_info; cc->disas_set_info = cpu_sparc_disas_set_info;
cc->tcg_initialize = sparc_tcg_init;
#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
cc->gdb_num_core_regs = 86; cc->gdb_num_core_regs = 86;

View file

@ -594,7 +594,7 @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
/* translate.c */ /* translate.c */
void gen_intermediate_code_init(CPUSPARCState *env); void sparc_tcg_init(void);
/* cpu-exec.c */ /* cpu-exec.c */

View file

@ -41,7 +41,6 @@
according to jump_pc[T2] */ according to jump_pc[T2] */
/* global register indexes */ /* global register indexes */
static TCGv_env cpu_env;
static TCGv_ptr cpu_regwptr; static TCGv_ptr cpu_regwptr;
static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst; static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
static TCGv_i32 cpu_cc_op; static TCGv_i32 cpu_cc_op;
@ -171,18 +170,13 @@ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
return TCGV_HIGH(cpu_fpr[src / 2]); return TCGV_HIGH(cpu_fpr[src / 2]);
} }
#else #else
TCGv_i32 ret = get_temp_i32(dc);
if (src & 1) { if (src & 1) {
return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2])); tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
} else { } else {
TCGv_i32 ret = get_temp_i32(dc); tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
tcg_gen_extrl_i64_i32(ret, t);
tcg_temp_free_i64(t);
return ret;
} }
return ret;
#endif #endif
} }
@ -195,7 +189,7 @@ static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v); tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
} }
#else #else
TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v)); TCGv_i64 t = (TCGv_i64)v;
tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t, tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
(dst & 1 ? 0 : 32), 32); (dst & 1 ? 0 : 32), 32);
#endif #endif
@ -2442,7 +2436,7 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
default: default:
/* ??? In theory, this should be raise DAE_invalid_asi. /* ??? In theory, this should be raise DAE_invalid_asi.
But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */ But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
if (parallel_cpus) { if (tb_cflags(dc->tb) & CF_PARALLEL) {
gen_helper_exit_atomic(cpu_env); gen_helper_exit_atomic(cpu_env);
} else { } else {
TCGv_i32 r_asi = tcg_const_i32(da.asi); TCGv_i32 r_asi = tcg_const_i32(da.asi);
@ -5772,7 +5766,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
#endif #endif
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -5801,7 +5795,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
goto exit_gen_loop; goto exit_gen_loop;
} }
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -5828,7 +5822,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
num_insns < max_insns); num_insns < max_insns);
exit_gen_loop: exit_gen_loop:
if (tb->cflags & CF_LAST_IO) { if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end(); gen_io_end();
} }
if (!dc->is_br) { if (!dc->is_br) {
@ -5862,9 +5856,8 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
#endif #endif
} }
void gen_intermediate_code_init(CPUSPARCState *env) void sparc_tcg_init(void)
{ {
static int inited;
static const char gregnames[32][4] = { static const char gregnames[32][4] = {
"g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7", "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
"o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
@ -5917,15 +5910,6 @@ void gen_intermediate_code_init(CPUSPARCState *env)
unsigned int i; unsigned int i;
/* init various static tables */
if (inited) {
return;
}
inited = 1;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
cpu_regwptr = tcg_global_mem_new_ptr(cpu_env, cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
offsetof(CPUSPARCState, regwptr), offsetof(CPUSPARCState, regwptr),
"regwptr"); "regwptr");

View file

@ -103,14 +103,8 @@ static void tilegx_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
TileGXCPU *cpu = TILEGX_CPU(obj); TileGXCPU *cpu = TILEGX_CPU(obj);
CPUTLGState *env = &cpu->env; CPUTLGState *env = &cpu->env;
static bool tcg_initialized;
cs->env_ptr = env; cs->env_ptr = env;
if (tcg_enabled() && !tcg_initialized) {
tcg_initialized = true;
tilegx_tcg_init();
}
} }
static void tilegx_cpu_do_interrupt(CPUState *cs) static void tilegx_cpu_do_interrupt(CPUState *cs)
@ -161,6 +155,7 @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = tilegx_cpu_set_pc; cc->set_pc = tilegx_cpu_set_pc;
cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault; cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault;
cc->gdb_num_core_regs = 0; cc->gdb_num_core_regs = 0;
cc->tcg_initialize = tilegx_tcg_init;
} }
static const TypeInfo tilegx_cpu_type_info = { static const TypeInfo tilegx_cpu_type_info = {

View file

@ -33,7 +33,6 @@
#define FMT64X "%016" PRIx64 #define FMT64X "%016" PRIx64
static TCGv_env cpu_env;
static TCGv cpu_pc; static TCGv cpu_pc;
static TCGv cpu_regs[TILEGX_R_COUNT]; static TCGv cpu_regs[TILEGX_R_COUNT];
@ -2378,7 +2377,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
uint64_t pc_start = tb->pc; uint64_t pc_start = tb->pc;
uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
int num_insns = 0; int num_insns = 0;
int max_insns = tb->cflags & CF_COUNT_MASK; int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
dc->pc = pc_start; dc->pc = pc_start;
dc->mmuidx = 0; dc->mmuidx = 0;
@ -2445,8 +2444,6 @@ void tilegx_tcg_init(void)
{ {
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
cpu_pc = tcg_global_mem_new_i64(cpu_env, offsetof(CPUTLGState, pc), "pc"); cpu_pc = tcg_global_mem_new_i64(cpu_env, offsetof(CPUTLGState, pc), "pc");
for (i = 0; i < TILEGX_R_COUNT; i++) { for (i = 0; i < TILEGX_R_COUNT; i++) {
cpu_regs[i] = tcg_global_mem_new_i64(cpu_env, cpu_regs[i] = tcg_global_mem_new_i64(cpu_env,

View file

@ -109,10 +109,6 @@ static void tricore_cpu_initfn(Object *obj)
CPUTriCoreState *env = &cpu->env; CPUTriCoreState *env = &cpu->env;
cs->env_ptr = env; cs->env_ptr = env;
if (tcg_enabled()) {
tricore_tcg_init();
}
} }
static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model) static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model)
@ -182,6 +178,7 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
cc->set_pc = tricore_cpu_set_pc; cc->set_pc = tricore_cpu_set_pc;
cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb; cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
cc->get_phys_page_attrs_debug = tricore_cpu_get_phys_page_attrs_debug; cc->get_phys_page_attrs_debug = tricore_cpu_get_phys_page_attrs_debug;
cc->tcg_initialize = tricore_tcg_init;
} }
static void cpu_register(const TriCoreCPUInfo *info) static void cpu_register(const TriCoreCPUInfo *info)

View file

@ -47,8 +47,6 @@ static TCGv cpu_PSW_V;
static TCGv cpu_PSW_SV; static TCGv cpu_PSW_SV;
static TCGv cpu_PSW_AV; static TCGv cpu_PSW_AV;
static TCGv cpu_PSW_SAV; static TCGv cpu_PSW_SAV;
/* CPU env */
static TCGv_env cpu_env;
#include "exec/gen-icount.h" #include "exec/gen-icount.h"
@ -8790,7 +8788,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
int num_insns, max_insns; int num_insns, max_insns;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -8880,12 +8878,7 @@ static void tricore_tcg_init_csfr(void)
void tricore_tcg_init(void) void tricore_tcg_init(void)
{ {
int i; int i;
static int inited;
if (inited) {
return;
}
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
/* reg init */ /* reg init */
for (i = 0 ; i < 16 ; i++) { for (i = 0 ; i < 16 ; i++) {
cpu_gpr_a[i] = tcg_global_mem_new(cpu_env, cpu_gpr_a[i] = tcg_global_mem_new(cpu_env,

View file

@ -117,7 +117,6 @@ static void uc32_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj); CPUState *cs = CPU(obj);
UniCore32CPU *cpu = UNICORE32_CPU(obj); UniCore32CPU *cpu = UNICORE32_CPU(obj);
CPUUniCore32State *env = &cpu->env; CPUUniCore32State *env = &cpu->env;
static bool inited;
cs->env_ptr = env; cs->env_ptr = env;
@ -130,11 +129,6 @@ static void uc32_cpu_initfn(Object *obj)
#endif #endif
tlb_flush(cs); tlb_flush(cs);
if (tcg_enabled() && !inited) {
inited = true;
uc32_translate_init();
}
} }
static const VMStateDescription vmstate_uc32_cpu = { static const VMStateDescription vmstate_uc32_cpu = {
@ -162,6 +156,7 @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
#else #else
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug; cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
#endif #endif
cc->tcg_initialize = uc32_translate_init;
dc->vmsd = &vmstate_uc32_cpu; dc->vmsd = &vmstate_uc32_cpu;
} }

View file

@ -54,7 +54,6 @@ typedef struct DisasContext {
conditional executions state has been updated. */ conditional executions state has been updated. */
#define DISAS_SYSCALL DISAS_TARGET_3 #define DISAS_SYSCALL DISAS_TARGET_3
static TCGv_env cpu_env;
static TCGv_i32 cpu_R[32]; static TCGv_i32 cpu_R[32];
/* FIXME: These should be removed. */ /* FIXME: These should be removed. */
@ -74,9 +73,6 @@ void uc32_translate_init(void)
{ {
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
cpu_R[i] = tcg_global_mem_new_i32(cpu_env, cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUUniCore32State, regs[i]), regnames[i]); offsetof(CPUUniCore32State, regs[i]), regnames[i]);
@ -1900,7 +1896,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
cpu_F1d = tcg_temp_new_i64(); cpu_F1d = tcg_temp_new_i64();
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0; num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -1933,7 +1929,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
goto done_generating; goto done_generating;
} }
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -1958,7 +1954,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
dc->pc < next_page_start && dc->pc < next_page_start &&
num_insns < max_insns); num_insns < max_insns);
if (tb->cflags & CF_LAST_IO) { if (tb_cflags(tb) & CF_LAST_IO) {
if (dc->condjmp) { if (dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying /* FIXME: This can theoretically happen with self-modifying
code. */ code. */

View file

@ -121,7 +121,6 @@ static void xtensa_cpu_initfn(Object *obj)
XtensaCPU *cpu = XTENSA_CPU(obj); XtensaCPU *cpu = XTENSA_CPU(obj);
XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj); XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj);
CPUXtensaState *env = &cpu->env; CPUXtensaState *env = &cpu->env;
static bool tcg_inited;
cs->env_ptr = env; cs->env_ptr = env;
env->config = xcc->config; env->config = xcc->config;
@ -131,11 +130,6 @@ static void xtensa_cpu_initfn(Object *obj)
memory_region_init_io(env->system_er, NULL, NULL, env, "er", memory_region_init_io(env->system_er, NULL, NULL, env, "er",
UINT64_C(0x100000000)); UINT64_C(0x100000000));
address_space_init(env->address_space_er, env->system_er, "ER"); address_space_init(env->address_space_er, env->system_er, "ER");
if (tcg_enabled() && !tcg_inited) {
tcg_inited = true;
xtensa_translate_init();
}
} }
static const VMStateDescription vmstate_xtensa_cpu = { static const VMStateDescription vmstate_xtensa_cpu = {
@ -170,6 +164,7 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
cc->do_unassigned_access = xtensa_cpu_do_unassigned_access; cc->do_unassigned_access = xtensa_cpu_do_unassigned_access;
#endif #endif
cc->debug_excp_handler = xtensa_breakpoint_handler; cc->debug_excp_handler = xtensa_breakpoint_handler;
cc->tcg_initialize = xtensa_translate_init;
dc->vmsd = &vmstate_xtensa_cpu; dc->vmsd = &vmstate_xtensa_cpu;
} }

View file

@ -77,7 +77,6 @@ typedef struct DisasContext {
unsigned cpenable; unsigned cpenable;
} DisasContext; } DisasContext;
static TCGv_env cpu_env;
static TCGv_i32 cpu_pc; static TCGv_i32 cpu_pc;
static TCGv_i32 cpu_R[16]; static TCGv_i32 cpu_R[16];
static TCGv_i32 cpu_FR[16]; static TCGv_i32 cpu_FR[16];
@ -221,8 +220,6 @@ void xtensa_translate_init(void)
}; };
int i; int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
cpu_pc = tcg_global_mem_new_i32(cpu_env, cpu_pc = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUXtensaState, pc), "pc"); offsetof(CPUXtensaState, pc), "pc");
@ -517,12 +514,12 @@ static bool gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access)
static bool gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr) static bool gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
{ {
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_update_ccount(cpu_env); gen_helper_update_ccount(cpu_env);
tcg_gen_mov_i32(d, cpu_SR[sr]); tcg_gen_mov_i32(d, cpu_SR[sr]);
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
return true; return true;
} }
@ -702,11 +699,11 @@ static bool gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
static void gen_check_interrupts(DisasContext *dc) static void gen_check_interrupts(DisasContext *dc)
{ {
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_check_interrupts(cpu_env); gen_helper_check_interrupts(cpu_env);
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
} }
@ -760,11 +757,11 @@ static bool gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
static bool gen_wsr_ccount(DisasContext *dc, uint32_t sr, TCGv_i32 v) static bool gen_wsr_ccount(DisasContext *dc, uint32_t sr, TCGv_i32 v)
{ {
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_wsr_ccount(cpu_env, v); gen_helper_wsr_ccount(cpu_env, v);
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_jumpi_check_loop_end(dc, 0); gen_jumpi_check_loop_end(dc, 0);
return true; return true;
@ -801,11 +798,11 @@ static bool gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
tcg_gen_mov_i32(cpu_SR[sr], v); tcg_gen_mov_i32(cpu_SR[sr], v);
tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit); tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_update_ccompare(cpu_env, tmp); gen_helper_update_ccompare(cpu_env, tmp);
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
gen_jumpi_check_loop_end(dc, 0); gen_jumpi_check_loop_end(dc, 0);
ret = true; ret = true;
@ -900,11 +897,11 @@ static void gen_waiti(DisasContext *dc, uint32_t imm4)
TCGv_i32 pc = tcg_const_i32(dc->next_pc); TCGv_i32 pc = tcg_const_i32(dc->next_pc);
TCGv_i32 intlevel = tcg_const_i32(imm4); TCGv_i32 intlevel = tcg_const_i32(imm4);
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
gen_helper_waiti(cpu_env, pc, intlevel); gen_helper_waiti(cpu_env, pc, intlevel);
if (dc->tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
tcg_temp_free(pc); tcg_temp_free(pc);
@ -3126,7 +3123,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
CPUXtensaState *env = cs->env_ptr; CPUXtensaState *env = cs->env_ptr;
DisasContext dc; DisasContext dc;
int insn_count = 0; int insn_count = 0;
int max_insns = tb->cflags & CF_COUNT_MASK; int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
uint32_t pc_start = tb->pc; uint32_t pc_start = tb->pc;
uint32_t next_page_start = uint32_t next_page_start =
(pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
@ -3162,7 +3159,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
gen_tb_start(tb); gen_tb_start(tb);
if ((tb->cflags & CF_USE_ICOUNT) && if ((tb_cflags(tb) & CF_USE_ICOUNT) &&
(tb->flags & XTENSA_TBFLAG_YIELD)) { (tb->flags & XTENSA_TBFLAG_YIELD)) {
tcg_gen_insn_start(dc.pc); tcg_gen_insn_start(dc.pc);
++insn_count; ++insn_count;
@ -3194,7 +3191,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
break; break;
} }
if (insn_count == max_insns && (tb->cflags & CF_LAST_IO)) { if (insn_count == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start(); gen_io_start();
} }
@ -3235,7 +3232,7 @@ done:
tcg_temp_free(dc.next_icount); tcg_temp_free(dc.next_icount);
} }
if (tb->cflags & CF_LAST_IO) { if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end(); gen_io_end();
} }

File diff suppressed because it is too large Load diff

View file

@ -46,113 +46,83 @@ extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
Up to and including filling in the forward link immediately. We'll do Up to and including filling in the forward link immediately. We'll do
proper termination of the end of the list after we finish translation. */ proper termination of the end of the list after we finish translation. */
static void tcg_emit_op(TCGContext *ctx, TCGOpcode opc, int args) static inline TCGOp *tcg_emit_op(TCGOpcode opc)
{ {
TCGContext *ctx = tcg_ctx;
int oi = ctx->gen_next_op_idx; int oi = ctx->gen_next_op_idx;
int ni = oi + 1; int ni = oi + 1;
int pi = oi - 1; int pi = oi - 1;
TCGOp *op = &ctx->gen_op_buf[oi];
tcg_debug_assert(oi < OPC_BUF_SIZE); tcg_debug_assert(oi < OPC_BUF_SIZE);
ctx->gen_op_buf[0].prev = oi; ctx->gen_op_buf[0].prev = oi;
ctx->gen_next_op_idx = ni; ctx->gen_next_op_idx = ni;
ctx->gen_op_buf[oi] = (TCGOp){ memset(op, 0, offsetof(TCGOp, args));
.opc = opc, op->opc = opc;
.args = args, op->prev = pi;
.prev = pi, op->next = ni;
.next = ni
}; return op;
} }
void tcg_gen_op1(TCGContext *ctx, TCGOpcode opc, TCGArg a1) void tcg_gen_op1(TCGOpcode opc, TCGArg a1)
{ {
int pi = ctx->gen_next_parm_idx; TCGOp *op = tcg_emit_op(opc);
op->args[0] = a1;
tcg_debug_assert(pi + 1 <= OPPARAM_BUF_SIZE);
ctx->gen_next_parm_idx = pi + 1;
ctx->gen_opparam_buf[pi] = a1;
tcg_emit_op(ctx, opc, pi);
} }
void tcg_gen_op2(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2) void tcg_gen_op2(TCGOpcode opc, TCGArg a1, TCGArg a2)
{ {
int pi = ctx->gen_next_parm_idx; TCGOp *op = tcg_emit_op(opc);
op->args[0] = a1;
tcg_debug_assert(pi + 2 <= OPPARAM_BUF_SIZE); op->args[1] = a2;
ctx->gen_next_parm_idx = pi + 2;
ctx->gen_opparam_buf[pi + 0] = a1;
ctx->gen_opparam_buf[pi + 1] = a2;
tcg_emit_op(ctx, opc, pi);
} }
void tcg_gen_op3(TCGContext *ctx, TCGOpcode opc, TCGArg a1, void tcg_gen_op3(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3)
TCGArg a2, TCGArg a3)
{ {
int pi = ctx->gen_next_parm_idx; TCGOp *op = tcg_emit_op(opc);
op->args[0] = a1;
tcg_debug_assert(pi + 3 <= OPPARAM_BUF_SIZE); op->args[1] = a2;
ctx->gen_next_parm_idx = pi + 3; op->args[2] = a3;
ctx->gen_opparam_buf[pi + 0] = a1;
ctx->gen_opparam_buf[pi + 1] = a2;
ctx->gen_opparam_buf[pi + 2] = a3;
tcg_emit_op(ctx, opc, pi);
} }
void tcg_gen_op4(TCGContext *ctx, TCGOpcode opc, TCGArg a1, void tcg_gen_op4(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4)
TCGArg a2, TCGArg a3, TCGArg a4)
{ {
int pi = ctx->gen_next_parm_idx; TCGOp *op = tcg_emit_op(opc);
op->args[0] = a1;
tcg_debug_assert(pi + 4 <= OPPARAM_BUF_SIZE); op->args[1] = a2;
ctx->gen_next_parm_idx = pi + 4; op->args[2] = a3;
ctx->gen_opparam_buf[pi + 0] = a1; op->args[3] = a4;
ctx->gen_opparam_buf[pi + 1] = a2;
ctx->gen_opparam_buf[pi + 2] = a3;
ctx->gen_opparam_buf[pi + 3] = a4;
tcg_emit_op(ctx, opc, pi);
} }
void tcg_gen_op5(TCGContext *ctx, TCGOpcode opc, TCGArg a1, void tcg_gen_op5(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
TCGArg a2, TCGArg a3, TCGArg a4, TCGArg a5) TCGArg a4, TCGArg a5)
{ {
int pi = ctx->gen_next_parm_idx; TCGOp *op = tcg_emit_op(opc);
op->args[0] = a1;
tcg_debug_assert(pi + 5 <= OPPARAM_BUF_SIZE); op->args[1] = a2;
ctx->gen_next_parm_idx = pi + 5; op->args[2] = a3;
ctx->gen_opparam_buf[pi + 0] = a1; op->args[3] = a4;
ctx->gen_opparam_buf[pi + 1] = a2; op->args[4] = a5;
ctx->gen_opparam_buf[pi + 2] = a3;
ctx->gen_opparam_buf[pi + 3] = a4;
ctx->gen_opparam_buf[pi + 4] = a5;
tcg_emit_op(ctx, opc, pi);
} }
void tcg_gen_op6(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6) TCGArg a4, TCGArg a5, TCGArg a6)
{ {
int pi = ctx->gen_next_parm_idx; TCGOp *op = tcg_emit_op(opc);
op->args[0] = a1;
tcg_debug_assert(pi + 6 <= OPPARAM_BUF_SIZE); op->args[1] = a2;
ctx->gen_next_parm_idx = pi + 6; op->args[2] = a3;
ctx->gen_opparam_buf[pi + 0] = a1; op->args[3] = a4;
ctx->gen_opparam_buf[pi + 1] = a2; op->args[4] = a5;
ctx->gen_opparam_buf[pi + 2] = a3; op->args[5] = a6;
ctx->gen_opparam_buf[pi + 3] = a4;
ctx->gen_opparam_buf[pi + 4] = a5;
ctx->gen_opparam_buf[pi + 5] = a6;
tcg_emit_op(ctx, opc, pi);
} }
void tcg_gen_mb(TCGBar mb_type) void tcg_gen_mb(TCGBar mb_type)
{ {
if (parallel_cpus) { if (tcg_ctx->tb_cflags & CF_PARALLEL) {
tcg_gen_op1(&tcg_ctx, INDEX_op_mb, mb_type); tcg_gen_op1(INDEX_op_mb, mb_type);
} }
} }
@ -2487,10 +2457,10 @@ void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_mov_i32(ret, TCGV_LOW(arg)); tcg_gen_mov_i32(ret, TCGV_LOW(arg));
} else if (TCG_TARGET_HAS_extrl_i64_i32) { } else if (TCG_TARGET_HAS_extrl_i64_i32) {
tcg_gen_op2(&tcg_ctx, INDEX_op_extrl_i64_i32, tcg_gen_op2(INDEX_op_extrl_i64_i32,
GET_TCGV_I32(ret), GET_TCGV_I64(arg)); tcgv_i32_arg(ret), tcgv_i64_arg(arg));
} else { } else {
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg))); tcg_gen_mov_i32(ret, (TCGv_i32)arg);
} }
} }
@ -2499,12 +2469,12 @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_mov_i32(ret, TCGV_HIGH(arg)); tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
} else if (TCG_TARGET_HAS_extrh_i64_i32) { } else if (TCG_TARGET_HAS_extrh_i64_i32) {
tcg_gen_op2(&tcg_ctx, INDEX_op_extrh_i64_i32, tcg_gen_op2(INDEX_op_extrh_i64_i32,
GET_TCGV_I32(ret), GET_TCGV_I64(arg)); tcgv_i32_arg(ret), tcgv_i64_arg(arg));
} else { } else {
TCGv_i64 t = tcg_temp_new_i64(); TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, arg, 32); tcg_gen_shri_i64(t, arg, 32);
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(t))); tcg_gen_mov_i32(ret, (TCGv_i32)t);
tcg_temp_free_i64(t); tcg_temp_free_i64(t);
} }
} }
@ -2515,8 +2485,8 @@ void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
tcg_gen_mov_i32(TCGV_LOW(ret), arg); tcg_gen_mov_i32(TCGV_LOW(ret), arg);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0); tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
} else { } else {
tcg_gen_op2(&tcg_ctx, INDEX_op_extu_i32_i64, tcg_gen_op2(INDEX_op_extu_i32_i64,
GET_TCGV_I64(ret), GET_TCGV_I32(arg)); tcgv_i64_arg(ret), tcgv_i32_arg(arg));
} }
} }
@ -2526,8 +2496,8 @@ void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
tcg_gen_mov_i32(TCGV_LOW(ret), arg); tcg_gen_mov_i32(TCGV_LOW(ret), arg);
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
} else { } else {
tcg_gen_op2(&tcg_ctx, INDEX_op_ext_i32_i64, tcg_gen_op2(INDEX_op_ext_i32_i64,
GET_TCGV_I64(ret), GET_TCGV_I32(arg)); tcgv_i64_arg(ret), tcgv_i32_arg(arg));
} }
} }
@ -2582,8 +2552,8 @@ void tcg_gen_goto_tb(unsigned idx)
tcg_debug_assert(idx <= 1); tcg_debug_assert(idx <= 1);
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
/* Verify that we havn't seen this numbered exit before. */ /* Verify that we havn't seen this numbered exit before. */
tcg_debug_assert((tcg_ctx.goto_tb_issue_mask & (1 << idx)) == 0); tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0);
tcg_ctx.goto_tb_issue_mask |= 1 << idx; tcg_ctx->goto_tb_issue_mask |= 1 << idx;
#endif #endif
tcg_gen_op1i(INDEX_op_goto_tb, idx); tcg_gen_op1i(INDEX_op_goto_tb, idx);
} }
@ -2592,8 +2562,8 @@ void tcg_gen_lookup_and_goto_ptr(void)
{ {
if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
TCGv_ptr ptr = tcg_temp_new_ptr(); TCGv_ptr ptr = tcg_temp_new_ptr();
gen_helper_lookup_tb_ptr(ptr, tcg_ctx.tcg_env); gen_helper_lookup_tb_ptr(ptr, cpu_env);
tcg_gen_op1i(INDEX_op_goto_ptr, GET_TCGV_PTR(ptr)); tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr); tcg_temp_free_ptr(ptr);
} else { } else {
tcg_gen_exit_tb(0); tcg_gen_exit_tb(0);
@ -2638,7 +2608,7 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi); tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi);
} else { } else {
tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(val), GET_TCGV_I64(addr), oi); tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi);
} }
#endif #endif
} }
@ -2651,7 +2621,7 @@ static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi); tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
} else { } else {
tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(val), GET_TCGV_I32(addr), oi); tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi);
} }
#else #else
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
@ -2678,7 +2648,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{ {
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0); memop = tcg_canonicalize_memop(memop, 0, 0);
trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env, trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 0)); addr, trace_mem_get_info(memop, 0));
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
} }
@ -2687,7 +2657,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{ {
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1); memop = tcg_canonicalize_memop(memop, 0, 1);
trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env, trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 1)); addr, trace_mem_get_info(memop, 1));
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
} }
@ -2706,7 +2676,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
} }
memop = tcg_canonicalize_memop(memop, 1, 0); memop = tcg_canonicalize_memop(memop, 1, 0);
trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env, trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 0)); addr, trace_mem_get_info(memop, 0));
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
} }
@ -2720,7 +2690,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
} }
memop = tcg_canonicalize_memop(memop, 1, 1); memop = tcg_canonicalize_memop(memop, 1, 1);
trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env, trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 1)); addr, trace_mem_get_info(memop, 1));
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
} }
@ -2810,7 +2780,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
{ {
memop = tcg_canonicalize_memop(memop, 0, 0); memop = tcg_canonicalize_memop(memop, 0, 0);
if (!parallel_cpus) { if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
TCGv_i32 t1 = tcg_temp_new_i32(); TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32(); TCGv_i32 t2 = tcg_temp_new_i32();
@ -2836,11 +2806,11 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
{ {
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi); gen(retv, cpu_env, addr, cmpv, newv, oi);
tcg_temp_free_i32(oi); tcg_temp_free_i32(oi);
} }
#else #else
gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv); gen(retv, cpu_env, addr, cmpv, newv);
#endif #endif
if (memop & MO_SIGN) { if (memop & MO_SIGN) {
@ -2854,7 +2824,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
{ {
memop = tcg_canonicalize_memop(memop, 1, 0); memop = tcg_canonicalize_memop(memop, 1, 0);
if (!parallel_cpus) { if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64();
@ -2881,14 +2851,14 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
{ {
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx)); TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx));
gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi); gen(retv, cpu_env, addr, cmpv, newv, oi);
tcg_temp_free_i32(oi); tcg_temp_free_i32(oi);
} }
#else #else
gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv); gen(retv, cpu_env, addr, cmpv, newv);
#endif #endif
#else #else
gen_helper_exit_atomic(tcg_ctx.tcg_env); gen_helper_exit_atomic(cpu_env);
/* Produce a result, so that we have a well-formed opcode stream /* Produce a result, so that we have a well-formed opcode stream
with respect to uses of the result in the (dead) code following. */ with respect to uses of the result in the (dead) code following. */
tcg_gen_movi_i64(retv, 0); tcg_gen_movi_i64(retv, 0);
@ -2944,11 +2914,11 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
{ {
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
gen(ret, tcg_ctx.tcg_env, addr, val, oi); gen(ret, cpu_env, addr, val, oi);
tcg_temp_free_i32(oi); tcg_temp_free_i32(oi);
} }
#else #else
gen(ret, tcg_ctx.tcg_env, addr, val); gen(ret, cpu_env, addr, val);
#endif #endif
if (memop & MO_SIGN) { if (memop & MO_SIGN) {
@ -2989,14 +2959,14 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
{ {
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
gen(ret, tcg_ctx.tcg_env, addr, val, oi); gen(ret, cpu_env, addr, val, oi);
tcg_temp_free_i32(oi); tcg_temp_free_i32(oi);
} }
#else #else
gen(ret, tcg_ctx.tcg_env, addr, val); gen(ret, cpu_env, addr, val);
#endif #endif
#else #else
gen_helper_exit_atomic(tcg_ctx.tcg_env); gen_helper_exit_atomic(cpu_env);
/* Produce a result, so that we have a well-formed opcode stream /* Produce a result, so that we have a well-formed opcode stream
with respect to uses of the result in the (dead) code following. */ with respect to uses of the result in the (dead) code following. */
tcg_gen_movi_i64(ret, 0); tcg_gen_movi_i64(ret, 0);
@ -3031,7 +3001,7 @@ static void * const table_##NAME[16] = { \
void tcg_gen_atomic_##NAME##_i32 \ void tcg_gen_atomic_##NAME##_i32 \
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \ (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
{ \ { \
if (parallel_cpus) { \ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
} else { \ } else { \
do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \ do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
@ -3041,7 +3011,7 @@ void tcg_gen_atomic_##NAME##_i32 \
void tcg_gen_atomic_##NAME##_i64 \ void tcg_gen_atomic_##NAME##_i64 \
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \ (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
{ \ { \
if (parallel_cpus) { \ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
} else { \ } else { \
do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \ do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \

View file

@ -28,224 +28,219 @@
/* Basic output routines. Not for general consumption. */ /* Basic output routines. Not for general consumption. */
void tcg_gen_op1(TCGContext *, TCGOpcode, TCGArg); void tcg_gen_op1(TCGOpcode, TCGArg);
void tcg_gen_op2(TCGContext *, TCGOpcode, TCGArg, TCGArg); void tcg_gen_op2(TCGOpcode, TCGArg, TCGArg);
void tcg_gen_op3(TCGContext *, TCGOpcode, TCGArg, TCGArg, TCGArg); void tcg_gen_op3(TCGOpcode, TCGArg, TCGArg, TCGArg);
void tcg_gen_op4(TCGContext *, TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg); void tcg_gen_op4(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg);
void tcg_gen_op5(TCGContext *, TCGOpcode, TCGArg, TCGArg, TCGArg, void tcg_gen_op5(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
TCGArg, TCGArg); void tcg_gen_op6(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
void tcg_gen_op6(TCGContext *, TCGOpcode, TCGArg, TCGArg, TCGArg,
TCGArg, TCGArg, TCGArg);
static inline void tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1) static inline void tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1)
{ {
tcg_gen_op1(&tcg_ctx, opc, GET_TCGV_I32(a1)); tcg_gen_op1(opc, tcgv_i32_arg(a1));
} }
static inline void tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1) static inline void tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1)
{ {
tcg_gen_op1(&tcg_ctx, opc, GET_TCGV_I64(a1)); tcg_gen_op1(opc, tcgv_i64_arg(a1));
} }
static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg a1) static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg a1)
{ {
tcg_gen_op1(&tcg_ctx, opc, a1); tcg_gen_op1(opc, a1);
} }
static inline void tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2) static inline void tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2)
{ {
tcg_gen_op2(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2)); tcg_gen_op2(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2));
} }
static inline void tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2) static inline void tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2)
{ {
tcg_gen_op2(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2)); tcg_gen_op2(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2));
} }
static inline void tcg_gen_op2i_i32(TCGOpcode opc, TCGv_i32 a1, TCGArg a2) static inline void tcg_gen_op2i_i32(TCGOpcode opc, TCGv_i32 a1, TCGArg a2)
{ {
tcg_gen_op2(&tcg_ctx, opc, GET_TCGV_I32(a1), a2); tcg_gen_op2(opc, tcgv_i32_arg(a1), a2);
} }
static inline void tcg_gen_op2i_i64(TCGOpcode opc, TCGv_i64 a1, TCGArg a2) static inline void tcg_gen_op2i_i64(TCGOpcode opc, TCGv_i64 a1, TCGArg a2)
{ {
tcg_gen_op2(&tcg_ctx, opc, GET_TCGV_I64(a1), a2); tcg_gen_op2(opc, tcgv_i64_arg(a1), a2);
} }
static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg a1, TCGArg a2) static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg a1, TCGArg a2)
{ {
tcg_gen_op2(&tcg_ctx, opc, a1, a2); tcg_gen_op2(opc, a1, a2);
} }
static inline void tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1, static inline void tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1,
TCGv_i32 a2, TCGv_i32 a3) TCGv_i32 a2, TCGv_i32 a3)
{ {
tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(a1), tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3));
GET_TCGV_I32(a2), GET_TCGV_I32(a3));
} }
static inline void tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1, static inline void tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1,
TCGv_i64 a2, TCGv_i64 a3) TCGv_i64 a2, TCGv_i64 a3)
{ {
tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(a1), tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3));
GET_TCGV_I64(a2), GET_TCGV_I64(a3));
} }
static inline void tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1, static inline void tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1,
TCGv_i32 a2, TCGArg a3) TCGv_i32 a2, TCGArg a3)
{ {
tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), a3); tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3);
} }
static inline void tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1, static inline void tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1,
TCGv_i64 a2, TCGArg a3) TCGv_i64 a2, TCGArg a3)
{ {
tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), a3); tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3);
} }
static inline void tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val, static inline void tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val,
TCGv_ptr base, TCGArg offset) TCGv_ptr base, TCGArg offset)
{ {
tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(val), GET_TCGV_PTR(base), offset); tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_ptr_arg(base), offset);
} }
static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val, static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
TCGv_ptr base, TCGArg offset) TCGv_ptr base, TCGArg offset)
{ {
tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(val), GET_TCGV_PTR(base), offset); tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_ptr_arg(base), offset);
} }
static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4) TCGv_i32 a3, TCGv_i32 a4)
{ {
tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
GET_TCGV_I32(a3), GET_TCGV_I32(a4)); tcgv_i32_arg(a3), tcgv_i32_arg(a4));
} }
static inline void tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, static inline void tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4) TCGv_i64 a3, TCGv_i64 a4)
{ {
tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
GET_TCGV_I64(a3), GET_TCGV_I64(a4)); tcgv_i64_arg(a3), tcgv_i64_arg(a4));
} }
static inline void tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, static inline void tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGArg a4) TCGv_i32 a3, TCGArg a4)
{ {
tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
GET_TCGV_I32(a3), a4); tcgv_i32_arg(a3), a4);
} }
static inline void tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, static inline void tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGArg a4) TCGv_i64 a3, TCGArg a4)
{ {
tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
GET_TCGV_I64(a3), a4); tcgv_i64_arg(a3), a4);
} }
static inline void tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, static inline void tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGArg a3, TCGArg a4) TCGArg a3, TCGArg a4)
{ {
tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), a3, a4); tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4);
} }
static inline void tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, static inline void tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGArg a3, TCGArg a4) TCGArg a3, TCGArg a4)
{ {
tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), a3, a4); tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4);
} }
static inline void tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, static inline void tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5) TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5)
{ {
tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
GET_TCGV_I32(a3), GET_TCGV_I32(a4), GET_TCGV_I32(a5)); tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5));
} }
static inline void tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, static inline void tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5) TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5)
{ {
tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
GET_TCGV_I64(a3), GET_TCGV_I64(a4), GET_TCGV_I64(a5)); tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5));
} }
static inline void tcg_gen_op5i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, static inline void tcg_gen_op5i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4, TCGArg a5) TCGv_i32 a3, TCGv_i32 a4, TCGArg a5)
{ {
tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
GET_TCGV_I32(a3), GET_TCGV_I32(a4), a5); tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5);
} }
static inline void tcg_gen_op5i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, static inline void tcg_gen_op5i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4, TCGArg a5) TCGv_i64 a3, TCGv_i64 a4, TCGArg a5)
{ {
tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
GET_TCGV_I64(a3), GET_TCGV_I64(a4), a5); tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5);
} }
static inline void tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, static inline void tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGArg a4, TCGArg a5) TCGv_i32 a3, TCGArg a4, TCGArg a5)
{ {
tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
GET_TCGV_I32(a3), a4, a5); tcgv_i32_arg(a3), a4, a5);
} }
static inline void tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, static inline void tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGArg a4, TCGArg a5) TCGv_i64 a3, TCGArg a4, TCGArg a5)
{ {
tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
GET_TCGV_I64(a3), a4, a5); tcgv_i64_arg(a3), a4, a5);
} }
static inline void tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, static inline void tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a3, TCGv_i32 a4,
TCGv_i32 a5, TCGv_i32 a6) TCGv_i32 a5, TCGv_i32 a6)
{ {
tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
GET_TCGV_I32(a3), GET_TCGV_I32(a4), GET_TCGV_I32(a5), tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5),
GET_TCGV_I32(a6)); tcgv_i32_arg(a6));
} }
static inline void tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, static inline void tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a3, TCGv_i64 a4,
TCGv_i64 a5, TCGv_i64 a6) TCGv_i64 a5, TCGv_i64 a6)
{ {
tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
GET_TCGV_I64(a3), GET_TCGV_I64(a4), GET_TCGV_I64(a5), tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5),
GET_TCGV_I64(a6)); tcgv_i64_arg(a6));
} }
static inline void tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, static inline void tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a3, TCGv_i32 a4,
TCGv_i32 a5, TCGArg a6) TCGv_i32 a5, TCGArg a6)
{ {
tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
GET_TCGV_I32(a3), GET_TCGV_I32(a4), GET_TCGV_I32(a5), a6); tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), a6);
} }
static inline void tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, static inline void tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a3, TCGv_i64 a4,
TCGv_i64 a5, TCGArg a6) TCGv_i64 a5, TCGArg a6)
{ {
tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
GET_TCGV_I64(a3), GET_TCGV_I64(a4), GET_TCGV_I64(a5), a6); tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), a6);
} }
static inline void tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, static inline void tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a3, TCGv_i32 a4,
TCGArg a5, TCGArg a6) TCGArg a5, TCGArg a6)
{ {
tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
GET_TCGV_I32(a3), GET_TCGV_I32(a4), a5, a6); tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6);
} }
static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a3, TCGv_i64 a4,
TCGArg a5, TCGArg a6) TCGArg a5, TCGArg a6)
{ {
tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
GET_TCGV_I64(a3), GET_TCGV_I64(a4), a5, a6); tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5, a6);
} }
@ -253,12 +248,12 @@ static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
static inline void gen_set_label(TCGLabel *l) static inline void gen_set_label(TCGLabel *l)
{ {
tcg_gen_op1(&tcg_ctx, INDEX_op_set_label, label_arg(l)); tcg_gen_op1(INDEX_op_set_label, label_arg(l));
} }
static inline void tcg_gen_br(TCGLabel *l) static inline void tcg_gen_br(TCGLabel *l)
{ {
tcg_gen_op1(&tcg_ctx, INDEX_op_br, label_arg(l)); tcg_gen_op1(INDEX_op_br, label_arg(l));
} }
void tcg_gen_mb(TCGBar); void tcg_gen_mb(TCGBar);
@ -333,7 +328,7 @@ static inline void tcg_gen_discard_i32(TCGv_i32 arg)
static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg) static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
{ {
if (!TCGV_EQUAL_I32(ret, arg)) { if (ret != arg) {
tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg); tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg);
} }
} }
@ -527,7 +522,7 @@ static inline void tcg_gen_discard_i64(TCGv_i64 arg)
static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
{ {
if (!TCGV_EQUAL_I64(ret, arg)) { if (ret != arg) {
tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg); tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg);
} }
} }
@ -732,25 +727,24 @@ static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS # if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
static inline void tcg_gen_insn_start(target_ulong pc) static inline void tcg_gen_insn_start(target_ulong pc)
{ {
tcg_gen_op1(&tcg_ctx, INDEX_op_insn_start, pc); tcg_gen_op1(INDEX_op_insn_start, pc);
} }
# else # else
static inline void tcg_gen_insn_start(target_ulong pc) static inline void tcg_gen_insn_start(target_ulong pc)
{ {
tcg_gen_op2(&tcg_ctx, INDEX_op_insn_start, tcg_gen_op2(INDEX_op_insn_start, (uint32_t)pc, (uint32_t)(pc >> 32));
(uint32_t)pc, (uint32_t)(pc >> 32));
} }
# endif # endif
#elif TARGET_INSN_START_WORDS == 2 #elif TARGET_INSN_START_WORDS == 2
# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS # if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1) static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
{ {
tcg_gen_op2(&tcg_ctx, INDEX_op_insn_start, pc, a1); tcg_gen_op2(INDEX_op_insn_start, pc, a1);
} }
# else # else
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1) static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
{ {
tcg_gen_op4(&tcg_ctx, INDEX_op_insn_start, tcg_gen_op4(INDEX_op_insn_start,
(uint32_t)pc, (uint32_t)(pc >> 32), (uint32_t)pc, (uint32_t)(pc >> 32),
(uint32_t)a1, (uint32_t)(a1 >> 32)); (uint32_t)a1, (uint32_t)(a1 >> 32));
} }
@ -760,13 +754,13 @@ static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1, static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
target_ulong a2) target_ulong a2)
{ {
tcg_gen_op3(&tcg_ctx, INDEX_op_insn_start, pc, a1, a2); tcg_gen_op3(INDEX_op_insn_start, pc, a1, a2);
} }
# else # else
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1, static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
target_ulong a2) target_ulong a2)
{ {
tcg_gen_op6(&tcg_ctx, INDEX_op_insn_start, tcg_gen_op6(INDEX_op_insn_start,
(uint32_t)pc, (uint32_t)(pc >> 32), (uint32_t)pc, (uint32_t)(pc >> 32),
(uint32_t)a1, (uint32_t)(a1 >> 32), (uint32_t)a1, (uint32_t)(a1 >> 32),
(uint32_t)a2, (uint32_t)(a2 >> 32)); (uint32_t)a2, (uint32_t)(a2 >> 32));
@ -815,7 +809,6 @@ void tcg_gen_lookup_and_goto_ptr(void);
#define tcg_temp_free tcg_temp_free_i32 #define tcg_temp_free tcg_temp_free_i32
#define TCGV_UNUSED(x) TCGV_UNUSED_I32(x) #define TCGV_UNUSED(x) TCGV_UNUSED_I32(x)
#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I32(x) #define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I32(x)
#define TCGV_EQUAL(a, b) TCGV_EQUAL_I32(a, b)
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32 #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32 #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
#else #else
@ -826,7 +819,6 @@ void tcg_gen_lookup_and_goto_ptr(void);
#define tcg_temp_free tcg_temp_free_i64 #define tcg_temp_free tcg_temp_free_i64
#define TCGV_UNUSED(x) TCGV_UNUSED_I64(x) #define TCGV_UNUSED(x) TCGV_UNUSED_I64(x)
#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I64(x) #define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I64(x)
#define TCGV_EQUAL(a, b) TCGV_EQUAL_I64(a, b)
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64 #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
#endif #endif

1116
tcg/tcg.c

File diff suppressed because it is too large Load diff

310
tcg/tcg.h
View file

@ -51,8 +51,6 @@
#define OPC_BUF_SIZE 640 #define OPC_BUF_SIZE 640
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
#define CPU_TEMP_BUF_NLONGS 128 #define CPU_TEMP_BUF_NLONGS 128
/* Default target word size to pointer size. */ /* Default target word size to pointer size. */
@ -416,10 +414,7 @@ typedef tcg_target_ulong TCGArg;
integers, but keeping them in pointer types like this means that the integers, but keeping them in pointer types like this means that the
compiler will complain if you accidentally pass a TCGv_i32 to a compiler will complain if you accidentally pass a TCGv_i32 to a
function which takes a TCGv_i64, and so on. Only the internals of function which takes a TCGv_i64, and so on. Only the internals of
TCG need to care about the actual contents of the types, and they always TCG need to care about the actual contents of the types. */
box and unbox via the MAKE_TCGV_* and GET_TCGV_* functions.
Converting to and from intptr_t rather than int reduces the number
of sign-extension instructions that get implied on 64-bit hosts. */
typedef struct TCGv_i32_d *TCGv_i32; typedef struct TCGv_i32_d *TCGv_i32;
typedef struct TCGv_i64_d *TCGv_i64; typedef struct TCGv_i64_d *TCGv_i64;
@ -433,53 +428,14 @@ typedef TCGv_ptr TCGv_env;
#error Unhandled TARGET_LONG_BITS value #error Unhandled TARGET_LONG_BITS value
#endif #endif
static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i) /* See the comment before tcgv_i32_temp. */
{ #define TCGV_UNUSED_I32(x) (x = (TCGv_i32)NULL)
return (TCGv_i32)i; #define TCGV_UNUSED_I64(x) (x = (TCGv_i64)NULL)
} #define TCGV_UNUSED_PTR(x) (x = (TCGv_ptr)NULL)
static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i) #define TCGV_IS_UNUSED_I32(x) ((x) == (TCGv_i32)NULL)
{ #define TCGV_IS_UNUSED_I64(x) ((x) == (TCGv_i64)NULL)
return (TCGv_i64)i; #define TCGV_IS_UNUSED_PTR(x) ((x) == (TCGv_ptr)NULL)
}
static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i)
{
return (TCGv_ptr)i;
}
static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t)
{
return (intptr_t)t;
}
static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t)
{
return (intptr_t)t;
}
static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t)
{
return (intptr_t)t;
}
#if TCG_TARGET_REG_BITS == 32
#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t))
#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1)
#endif
#define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
#define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
#define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b))
/* Dummy definition to avoid compiler warnings. */
#define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1)
#define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1)
#define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1)
#define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1)
#define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1)
#define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1)
/* call flags */ /* call flags */
/* Helper does not read globals (either directly or through an exception). It /* Helper does not read globals (either directly or through an exception). It
@ -497,9 +453,8 @@ static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t)
#define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
#define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
/* used to align parameters */ /* Used to align parameters. See the comment before tcgv_i32_temp. */
#define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1) #define TCG_CALL_DUMMY_ARG ((TCGArg)0)
#define TCG_CALL_DUMMY_ARG ((TCGArg)(-1))
/* Conditions. Note that these are laid out for easy manipulation by /* Conditions. Note that these are laid out for easy manipulation by
the functions below: the functions below:
@ -581,15 +536,25 @@ typedef struct TCGTemp {
unsigned int indirect_base:1; unsigned int indirect_base:1;
unsigned int mem_coherent:1; unsigned int mem_coherent:1;
unsigned int mem_allocated:1; unsigned int mem_allocated:1;
unsigned int temp_local:1; /* If true, the temp is saved across /* If true, the temp is saved across both basic blocks and
basic blocks. Otherwise, it is not translation blocks. */
preserved across basic blocks. */ unsigned int temp_global:1;
unsigned int temp_allocated:1; /* never used for code gen */ /* If true, the temp is saved across basic blocks but dead
at the end of translation blocks. If false, the temp is
dead at the end of basic blocks. */
unsigned int temp_local:1;
unsigned int temp_allocated:1;
tcg_target_long val; tcg_target_long val;
struct TCGTemp *mem_base; struct TCGTemp *mem_base;
intptr_t mem_offset; intptr_t mem_offset;
const char *name; const char *name;
/* Pass-specific information that can be stored for a temporary.
One word worth of integer data, and one pointer to data
allocated separately. */
uintptr_t state;
void *state_ptr;
} TCGTemp; } TCGTemp;
typedef struct TCGContext TCGContext; typedef struct TCGContext TCGContext;
@ -606,58 +571,35 @@ typedef struct TCGTempSet {
#define SYNC_ARG 1 #define SYNC_ARG 1
typedef uint16_t TCGLifeData; typedef uint16_t TCGLifeData;
/* The layout here is designed to avoid crossing of a 32-bit boundary. /* The layout here is designed to avoid a bitfield crossing of
If we do so, gcc adds padding, expanding the size to 12. */ a 32-bit boundary, which would cause GCC to add extra padding. */
typedef struct TCGOp { typedef struct TCGOp {
TCGOpcode opc : 8; /* 8 */ TCGOpcode opc : 8; /* 8 */
/* Index of the prev/next op, or 0 for the end of the list. */
unsigned prev : 10; /* 18 */
unsigned next : 10; /* 28 */
/* The number of out and in parameter for a call. */ /* The number of out and in parameter for a call. */
unsigned calli : 4; /* 32 */ unsigned calli : 4; /* 12 */
unsigned callo : 2; /* 34 */ unsigned callo : 2; /* 14 */
unsigned : 2; /* 16 */
/* Index of the arguments for this op, or 0 for zero-operand ops. */ /* Index of the prev/next op, or 0 for the end of the list. */
unsigned args : 14; /* 48 */ unsigned prev : 16; /* 32 */
unsigned next : 16; /* 48 */
/* Lifetime data of the operands. */ /* Lifetime data of the operands. */
unsigned life : 16; /* 64 */ unsigned life : 16; /* 64 */
/* Arguments for the opcode. */
TCGArg args[MAX_OPC_PARAM];
} TCGOp; } TCGOp;
/* Make sure that we don't expand the structure without noticing. */
QEMU_BUILD_BUG_ON(sizeof(TCGOp) != 8 + sizeof(TCGArg) * MAX_OPC_PARAM);
/* Make sure operands fit in the bitfields above. */ /* Make sure operands fit in the bitfields above. */
QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 10)); QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 16));
QEMU_BUILD_BUG_ON(OPPARAM_BUF_SIZE > (1 << 14));
/* Make sure that we don't overflow 64 bits without noticing. */ typedef struct TCGProfile {
QEMU_BUILD_BUG_ON(sizeof(TCGOp) > 8);
struct TCGContext {
uint8_t *pool_cur, *pool_end;
TCGPool *pool_first, *pool_current, *pool_first_large;
int nb_labels;
int nb_globals;
int nb_temps;
int nb_indirects;
/* goto_tb support */
tcg_insn_unit *code_buf;
uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
TCGRegSet reserved_regs;
intptr_t current_frame_offset;
intptr_t frame_start;
intptr_t frame_end;
TCGTemp *frame_temp;
tcg_insn_unit *code_ptr;
#ifdef CONFIG_PROFILER
/* profiling info */
int64_t tb_count1; int64_t tb_count1;
int64_t tb_count; int64_t tb_count;
int64_t op_count; /* total insn count */ int64_t op_count; /* total insn count */
@ -674,6 +616,34 @@ struct TCGContext {
int64_t opt_time; int64_t opt_time;
int64_t restore_count; int64_t restore_count;
int64_t restore_time; int64_t restore_time;
int64_t table_op_count[NB_OPS];
} TCGProfile;
struct TCGContext {
uint8_t *pool_cur, *pool_end;
TCGPool *pool_first, *pool_current, *pool_first_large;
int nb_labels;
int nb_globals;
int nb_temps;
int nb_indirects;
/* goto_tb support */
tcg_insn_unit *code_buf;
uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
TCGRegSet reserved_regs;
uint32_t tb_cflags; /* cflags of the current TB */
intptr_t current_frame_offset;
intptr_t frame_start;
intptr_t frame_end;
TCGTemp *frame_temp;
tcg_insn_unit *code_ptr;
#ifdef CONFIG_PROFILER
TCGProfile prof;
#endif #endif
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
@ -682,7 +652,6 @@ struct TCGContext {
#endif #endif
int gen_next_op_idx; int gen_next_op_idx;
int gen_next_parm_idx;
/* Code generation. Note that we specifically do not use tcg_insn_unit /* Code generation. Note that we specifically do not use tcg_insn_unit
here, because there's too much arithmetic throughout that relies here, because there's too much arithmetic throughout that relies
@ -698,11 +667,8 @@ struct TCGContext {
/* Threshold to flush the translated code buffer. */ /* Threshold to flush the translated code buffer. */
void *code_gen_highwater; void *code_gen_highwater;
TBContext tb_ctx;
/* Track which vCPU triggers events */ /* Track which vCPU triggers events */
CPUState *cpu; /* *_trans */ CPUState *cpu; /* *_trans */
TCGv_env tcg_env; /* *_exec */
/* These structures are private to tcg-target.inc.c. */ /* These structures are private to tcg-target.inc.c. */
#ifdef TCG_TARGET_NEED_LDST_LABELS #ifdef TCG_TARGET_NEED_LDST_LABELS
@ -712,6 +678,8 @@ struct TCGContext {
struct TCGLabelPoolData *pool_labels; struct TCGLabelPoolData *pool_labels;
#endif #endif
TCGLabel *exitreq_label;
TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
@ -720,25 +688,105 @@ struct TCGContext {
TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
TCGOp gen_op_buf[OPC_BUF_SIZE]; TCGOp gen_op_buf[OPC_BUF_SIZE];
TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];
uint16_t gen_insn_end_off[TCG_MAX_INSNS]; uint16_t gen_insn_end_off[TCG_MAX_INSNS];
target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
}; };
extern TCGContext tcg_ctx; extern TCGContext tcg_init_ctx;
extern bool parallel_cpus; extern __thread TCGContext *tcg_ctx;
extern TCGv_env cpu_env;
static inline size_t temp_idx(TCGTemp *ts)
{
ptrdiff_t n = ts - tcg_ctx->temps;
tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
return n;
}
static inline TCGArg temp_arg(TCGTemp *ts)
{
return (uintptr_t)ts;
}
static inline TCGTemp *arg_temp(TCGArg a)
{
return (TCGTemp *)(uintptr_t)a;
}
/* Using the offset of a temporary, relative to TCGContext, rather than
its index means that we don't use 0. That leaves offset 0 free for
a NULL representation without having to leave index 0 unused. */
static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
{
uintptr_t o = (uintptr_t)v;
TCGTemp *t = (void *)tcg_ctx + o;
tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
return t;
}
static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
{
return tcgv_i32_temp((TCGv_i32)v);
}
static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
{
return tcgv_i32_temp((TCGv_i32)v);
}
static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
{
return temp_arg(tcgv_i32_temp(v));
}
static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
{
return temp_arg(tcgv_i64_temp(v));
}
static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
{
return temp_arg(tcgv_ptr_temp(v));
}
static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
{
(void)temp_idx(t); /* trigger embedded assert */
return (TCGv_i32)((void *)t - (void *)tcg_ctx);
}
static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
{
return (TCGv_i64)temp_tcgv_i32(t);
}
static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
{
return (TCGv_ptr)temp_tcgv_i32(t);
}
#if TCG_TARGET_REG_BITS == 32
static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
{
return temp_tcgv_i32(tcgv_i64_temp(t));
}
static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
{
return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
}
#endif
static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v) static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v)
{ {
int op_argi = tcg_ctx.gen_op_buf[op_idx].args; tcg_ctx->gen_op_buf[op_idx].args[arg] = v;
tcg_ctx.gen_opparam_buf[op_argi + arg] = v;
} }
/* The number of opcodes emitted so far. */ /* The number of opcodes emitted so far. */
static inline int tcg_op_buf_count(void) static inline int tcg_op_buf_count(void)
{ {
return tcg_ctx.gen_next_op_idx; return tcg_ctx->gen_next_op_idx;
} }
/* Test for whether to terminate the TB for using too many opcodes. */ /* Test for whether to terminate the TB for using too many opcodes. */
@ -749,15 +797,21 @@ static inline bool tcg_op_buf_full(void)
/* pool based memory allocation */ /* pool based memory allocation */
/* tb_lock must be held for tcg_malloc_internal. */ /* user-mode: tb_lock must be held for tcg_malloc_internal. */
void *tcg_malloc_internal(TCGContext *s, int size); void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s); void tcg_pool_reset(TCGContext *s);
TranslationBlock *tcg_tb_alloc(TCGContext *s); TranslationBlock *tcg_tb_alloc(TCGContext *s);
/* Called with tb_lock held. */ void tcg_region_init(void);
void tcg_region_reset_all(void);
size_t tcg_code_size(void);
size_t tcg_code_capacity(void);
/* user-mode: Called with tb_lock held. */
static inline void *tcg_malloc(int size) static inline void *tcg_malloc(int size)
{ {
TCGContext *s = &tcg_ctx; TCGContext *s = tcg_ctx;
uint8_t *ptr, *ptr_end; uint8_t *ptr, *ptr_end;
/* ??? This is a weak placeholder for minimum malloc alignment. */ /* ??? This is a weak placeholder for minimum malloc alignment. */
@ -766,7 +820,7 @@ static inline void *tcg_malloc(int size)
ptr = s->pool_cur; ptr = s->pool_cur;
ptr_end = ptr + size; ptr_end = ptr + size;
if (unlikely(ptr_end > s->pool_end)) { if (unlikely(ptr_end > s->pool_end)) {
return tcg_malloc_internal(&tcg_ctx, size); return tcg_malloc_internal(tcg_ctx, size);
} else { } else {
s->pool_cur = ptr_end; s->pool_cur = ptr_end;
return ptr; return ptr;
@ -774,6 +828,7 @@ static inline void *tcg_malloc(int size)
} }
void tcg_context_init(TCGContext *s); void tcg_context_init(TCGContext *s);
void tcg_register_thread(void);
void tcg_prologue_init(TCGContext *s); void tcg_prologue_init(TCGContext *s);
void tcg_func_start(TCGContext *s); void tcg_func_start(TCGContext *s);
@ -781,10 +836,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
int tcg_global_mem_new_internal(TCGType, TCGv_ptr, intptr_t, const char *); TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
intptr_t, const char *);
TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name);
TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name);
TCGv_i32 tcg_temp_new_internal_i32(int temp_local); TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
TCGv_i64 tcg_temp_new_internal_i64(int temp_local); TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
@ -795,8 +848,8 @@ void tcg_temp_free_i64(TCGv_i64 arg);
static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset, static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
const char *name) const char *name)
{ {
int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name); TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
return MAKE_TCGV_I32(idx); return temp_tcgv_i32(t);
} }
static inline TCGv_i32 tcg_temp_new_i32(void) static inline TCGv_i32 tcg_temp_new_i32(void)
@ -812,8 +865,8 @@ static inline TCGv_i32 tcg_temp_local_new_i32(void)
static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset, static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
const char *name) const char *name)
{ {
int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name); TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
return MAKE_TCGV_I64(idx); return temp_tcgv_i64(t);
} }
static inline TCGv_i64 tcg_temp_new_i64(void) static inline TCGv_i64 tcg_temp_new_i64(void)
@ -900,23 +953,19 @@ do {\
} while (0) } while (0)
#if UINTPTR_MAX == UINT32_MAX #if UINTPTR_MAX == UINT32_MAX
#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n)) static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i32 n) { return (TCGv_ptr)n; }
#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n)) static inline TCGv_i32 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i32)n; }
#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V))) #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
#define tcg_global_reg_new_ptr(R, N) \
TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N)))
#define tcg_global_mem_new_ptr(R, O, N) \ #define tcg_global_mem_new_ptr(R, O, N) \
TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N))) TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32()) #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
#define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T)) #define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
#else #else
#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n)) static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i64 n) { return (TCGv_ptr)n; }
#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n)) static inline TCGv_i64 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i64)n; }
#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V))) #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
#define tcg_global_reg_new_ptr(R, N) \
TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N)))
#define tcg_global_mem_new_ptr(R, O, N) \ #define tcg_global_mem_new_ptr(R, O, N) \
TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N))) TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64()) #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
@ -925,8 +974,7 @@ do {\
bool tcg_op_supported(TCGOpcode op); bool tcg_op_supported(TCGOpcode op);
void tcg_gen_callN(TCGContext *s, void *func, void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
TCGArg ret, int nargs, TCGArg *args);
void tcg_op_remove(TCGContext *s, TCGOp *op); void tcg_op_remove(TCGContext *s, TCGOp *op);
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg); TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
@ -1106,7 +1154,7 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr); uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
#else #else
# define tcg_qemu_tb_exec(env, tb_ptr) \ # define tcg_qemu_tb_exec(env, tb_ptr) \
((uintptr_t (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr) ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr)
#endif #endif
void tcg_register_jit(void *buf, size_t buf_size); void tcg_register_jit(void *buf, size_t buf_size);

View file

@ -103,7 +103,7 @@ static bool is_equal(const void *obj, const void *userp)
static inline uint32_t h(unsigned long v) static inline uint32_t h(unsigned long v)
{ {
return tb_hash_func6(v, 0, 0, 0); return tb_hash_func7(v, 0, 0, 0, 0);
} }
/* /*

View file

@ -73,6 +73,47 @@ int qemu_madvise(void *addr, size_t len, int advice)
#endif #endif
} }
static int qemu_mprotect__osdep(void *addr, size_t size, int prot)
{
g_assert(!((uintptr_t)addr & ~qemu_real_host_page_mask));
g_assert(!(size & ~qemu_real_host_page_mask));
#ifdef _WIN32
DWORD old_protect;
if (!VirtualProtect(addr, size, prot, &old_protect)) {
error_report("%s: VirtualProtect failed with error code %ld",
__func__, GetLastError());
return -1;
}
return 0;
#else
if (mprotect(addr, size, prot)) {
error_report("%s: mprotect failed: %s", __func__, strerror(errno));
return -1;
}
return 0;
#endif
}
int qemu_mprotect_rwx(void *addr, size_t size)
{
#ifdef _WIN32
return qemu_mprotect__osdep(addr, size, PAGE_EXECUTE_READWRITE);
#else
return qemu_mprotect__osdep(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
#endif
}
int qemu_mprotect_none(void *addr, size_t size)
{
#ifdef _WIN32
return qemu_mprotect__osdep(addr, size, PAGE_NOACCESS);
#else
return qemu_mprotect__osdep(addr, size, PROT_NONE);
#endif
}
#ifndef _WIN32 #ifndef _WIN32
static int fcntl_op_setlk = -1; static int fcntl_op_setlk = -1;