tcg/ppc: Make direct jump patching thread-safe

Ensure direct jump patching in PPC is atomic by:
 * limiting translation buffer size in 32-bit mode to be addressable by
   Branch I-form instruction;
 * using atomic_read()/atomic_set() for code patching.

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <1461341333-19646-5-git-send-email-sergey.fedorov@linaro.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Sergey Fedorov 2016-04-22 19:08:46 +03:00 committed by Richard Henderson
parent 76442a939e
commit 399f164857
2 changed files with 20 additions and 4 deletions

View file

@ -1237,6 +1237,7 @@ static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5])); tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
} }
#ifdef __powerpc64__
void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
{ {
tcg_insn_unit i1, i2; tcg_insn_unit i1, i2;
@ -1265,11 +1266,18 @@ void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
pair = (uint64_t)i2 << 32 | i1; pair = (uint64_t)i2 << 32 | i1;
#endif #endif
/* ??? __atomic_store_8, presuming there's some way to do that atomic_set((uint64_t *)jmp_addr, pair);
for 32-bit, otherwise this is good enough for 64-bit. */
*(uint64_t *)jmp_addr = pair;
flush_icache_range(jmp_addr, jmp_addr + 8); flush_icache_range(jmp_addr, jmp_addr + 8);
} }
#else
void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
{
intptr_t diff = addr - jmp_addr;
tcg_debug_assert(in_range_b(diff));
atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
flush_icache_range(jmp_addr, jmp_addr + 4);
}
#endif
static void tcg_out_call(TCGContext *s, tcg_insn_unit *target) static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
{ {
@ -1895,7 +1903,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
break; break;
case INDEX_op_goto_tb: case INDEX_op_goto_tb:
tcg_debug_assert(s->tb_jmp_offset); tcg_debug_assert(s->tb_jmp_offset);
/* Direct jump. Ensure the next insns are 8-byte aligned. */ /* Direct jump. */
#ifdef __powerpc64__
/* Ensure the next insns are 8-byte aligned. */
if ((uintptr_t)s->code_ptr & 7) { if ((uintptr_t)s->code_ptr & 7) {
tcg_out32(s, NOP); tcg_out32(s, NOP);
} }
@ -1904,6 +1914,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
s->code_ptr += 2; s->code_ptr += 2;
tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR); tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
tcg_out32(s, BCCTR | BO_ALWAYS); tcg_out32(s, BCCTR | BO_ALWAYS);
#else
/* To be replaced by a branch. */
s->code_ptr++;
#endif
s->tb_next_offset[args[0]] = tcg_current_code_size(s); s->tb_next_offset[args[0]] = tcg_current_code_size(s);
break; break;
case INDEX_op_br: case INDEX_op_br:

View file

@ -464,6 +464,8 @@ static inline PageDesc *page_find(tb_page_addr_t index)
# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
#elif defined(__powerpc64__) #elif defined(__powerpc64__)
# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
#elif defined(__powerpc__)
# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
#elif defined(__aarch64__) #elif defined(__aarch64__)
# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
#elif defined(__arm__) #elif defined(__arm__)