tcg: Introduce zero and sign-extended versions of load helpers

Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Richard Henderson 2013-08-27 14:09:14 -07:00
parent e58eb53413
commit c8f94df593
3 changed files with 64 additions and 21 deletions

View file

@ -29,23 +29,39 @@
#if DATA_SIZE == 8 #if DATA_SIZE == 8
#define SUFFIX q #define SUFFIX q
#define LSUFFIX q #define LSUFFIX q
#define DATA_TYPE uint64_t #define SDATA_TYPE int64_t
#elif DATA_SIZE == 4 #elif DATA_SIZE == 4
#define SUFFIX l #define SUFFIX l
#define LSUFFIX l #define LSUFFIX l
#define DATA_TYPE uint32_t #define SDATA_TYPE int32_t
#elif DATA_SIZE == 2 #elif DATA_SIZE == 2
#define SUFFIX w #define SUFFIX w
#define LSUFFIX uw #define LSUFFIX uw
#define DATA_TYPE uint16_t #define SDATA_TYPE int16_t
#elif DATA_SIZE == 1 #elif DATA_SIZE == 1
#define SUFFIX b #define SUFFIX b
#define LSUFFIX ub #define LSUFFIX ub
#define DATA_TYPE uint8_t #define SDATA_TYPE int8_t
#else #else
#error unsupported data size #error unsupported data size
#endif #endif
#define DATA_TYPE glue(u, SDATA_TYPE)
/* For the benefit of TCG generated code, we want to avoid the complication
of ABI-specific return type promotion and always return a value extended
to the register size of the host. This is tcg_target_long, except in the
case of a 32-bit host and 64-bit data, and for that we always have
uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
# define WORD_TYPE DATA_TYPE
# define USUFFIX SUFFIX
#else
# define WORD_TYPE tcg_target_ulong
# define USUFFIX glue(u, SUFFIX)
# define SSUFFIX glue(s, SUFFIX)
#endif
#ifdef SOFTMMU_CODE_ACCESS #ifdef SOFTMMU_CODE_ACCESS
#define READ_ACCESS_TYPE 2 #define READ_ACCESS_TYPE 2
#define ADDR_READ addr_code #define ADDR_READ addr_code
@ -77,10 +93,10 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
#ifdef SOFTMMU_CODE_ACCESS #ifdef SOFTMMU_CODE_ACCESS
static static
#endif #endif
DATA_TYPE WORD_TYPE
glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx, target_ulong addr, int mmu_idx,
uintptr_t retaddr) uintptr_t retaddr)
{ {
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
@ -126,9 +142,9 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
addr2 = addr1 + DATA_SIZE; addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function. /* Note the adjustment at the beginning of the function.
Undo that for the recursion. */ Undo that for the recursion. */
res1 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX) res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr1, mmu_idx, retaddr + GETPC_ADJ); (env, addr1, mmu_idx, retaddr + GETPC_ADJ);
res2 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX) res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr2, mmu_idx, retaddr + GETPC_ADJ); (env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8; shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN #ifdef TARGET_WORDS_BIGENDIAN
@ -147,19 +163,33 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif #endif
haddr = addr + env->tlb_table[mmu_idx][index].addend; haddr = addr + env->tlb_table[mmu_idx][index].addend;
return glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr); /* Note that ldl_raw is defined with type "int". */
return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
} }
DATA_TYPE DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx) int mmu_idx)
{ {
return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx, return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
GETRA_EXT()); GETRA_EXT());
} }
#ifndef SOFTMMU_CODE_ACCESS #ifndef SOFTMMU_CODE_ACCESS
/* Provide signed versions of the load routines as well. We can of course
avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
WORD_TYPE
glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
{
return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr, mmu_idx, retaddr);
}
#endif
static inline void glue(io_write, SUFFIX)(CPUArchState *env, static inline void glue(io_write, SUFFIX)(CPUArchState *env,
hwaddr physaddr, hwaddr physaddr,
DATA_TYPE val, DATA_TYPE val,
@ -267,3 +297,7 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
#undef LSUFFIX #undef LSUFFIX
#undef DATA_SIZE #undef DATA_SIZE
#undef ADDR_READ #undef ADDR_READ
#undef WORD_TYPE
#undef SDATA_TYPE
#undef USUFFIX
#undef SSUFFIX

View file

@ -1025,9 +1025,9 @@ static void tcg_out_jmp(TCGContext *s, uintptr_t dest)
* int mmu_idx, uintptr_t ra) * int mmu_idx, uintptr_t ra)
*/ */
static const void * const qemu_ld_helpers[4] = { static const void * const qemu_ld_helpers[4] = {
helper_ret_ldb_mmu, helper_ret_ldub_mmu,
helper_ret_ldw_mmu, helper_ret_lduw_mmu,
helper_ret_ldl_mmu, helper_ret_ldul_mmu,
helper_ret_ldq_mmu, helper_ret_ldq_mmu,
}; };

View file

@ -754,15 +754,24 @@ void tcg_out_tb_finalize(TCGContext *s);
* Memory helpers that will be used by TCG generated code. * Memory helpers that will be used by TCG generated code.
*/ */
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
uint8_t helper_ret_ldb_mmu(CPUArchState *env, target_ulong addr, /* Value zero-extended to tcg register size. */
int mmu_idx, uintptr_t retaddr); tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
uint16_t helper_ret_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx, uintptr_t retaddr);
int mmu_idx, uintptr_t retaddr); tcg_target_ulong helper_ret_lduw_mmu(CPUArchState *env, target_ulong addr,
uint32_t helper_ret_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx, uintptr_t retaddr);
int mmu_idx, uintptr_t retaddr); tcg_target_ulong helper_ret_ldul_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr, uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr); int mmu_idx, uintptr_t retaddr);
/* Value sign-extended to tcg register size. */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldsw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldsl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx, uintptr_t retaddr); int mmu_idx, uintptr_t retaddr);
void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,