Split TLB addend and target_phys_addr_t

Historically the qemu tlb "addend" field was used for both RAM and IO accesses,
so needed to be able to hold both host addresses (unsigned long) and guest
physical addresses (target_phys_addr_t).  However since the introduction of
the iotlb field it has only been used for RAM accesses.

This means we can change the type of addend to unsigned long, and remove
associated hacks in the big-endian TCG backends.

We can also remove the host dependence from target_phys_addr_t.

Signed-off-by: Paul Brook <paul@codesourcery.com>
This commit is contained in:
Paul Brook 2010-04-05 00:28:53 +01:00
parent 5bd2c0d7a6
commit 355b194369
9 changed files with 32 additions and 59 deletions

3
configure vendored
View file

@ -2538,9 +2538,6 @@ if [ "$TARGET_ABI_DIR" = "" ]; then
TARGET_ABI_DIR=$TARGET_ARCH TARGET_ABI_DIR=$TARGET_ARCH
fi fi
echo "TARGET_ABI_DIR=$TARGET_ABI_DIR" >> $config_target_mak echo "TARGET_ABI_DIR=$TARGET_ABI_DIR" >> $config_target_mak
if [ $target_phys_bits -lt $hostlongbits ] ; then
target_phys_bits=$hostlongbits
fi
case "$target_arch2" in case "$target_arch2" in
i386|x86_64) i386|x86_64)
if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then

View file

@ -76,7 +76,7 @@ typedef uint64_t target_ulong;
#define CPU_TLB_BITS 8 #define CPU_TLB_BITS 8
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS) #define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
#if TARGET_PHYS_ADDR_BITS == 32 && TARGET_LONG_BITS == 32 #if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
#define CPU_TLB_ENTRY_BITS 4 #define CPU_TLB_ENTRY_BITS 4
#else #else
#define CPU_TLB_ENTRY_BITS 5 #define CPU_TLB_ENTRY_BITS 5
@ -92,21 +92,18 @@ typedef struct CPUTLBEntry {
target_ulong addr_read; target_ulong addr_read;
target_ulong addr_write; target_ulong addr_write;
target_ulong addr_code; target_ulong addr_code;
/* Addend to virtual address to get physical address. IO accesses /* Addend to virtual address to get host address. IO accesses
use the corresponding iotlb value. */ use the corresponding iotlb value. */
#if TARGET_PHYS_ADDR_BITS == 64 unsigned long addend;
/* on i386 Linux make sure it is aligned */
target_phys_addr_t addend __attribute__((aligned(8)));
#else
target_phys_addr_t addend;
#endif
/* padding to get a power of two size */ /* padding to get a power of two size */
uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
(sizeof(target_ulong) * 3 + (sizeof(target_ulong) * 3 +
((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + ((-sizeof(target_ulong) * 3) & (sizeof(unsigned long) - 1)) +
sizeof(target_phys_addr_t))]; sizeof(unsigned long))];
} CPUTLBEntry; } CPUTLBEntry;
extern int CPUTLBEntry_wrong_size[sizeof(CPUTLBEntry) == (1 << CPU_TLB_ENTRY_BITS) ? 1 : -1];
#define CPU_COMMON_TLB \ #define CPU_COMMON_TLB \
/* The meaning of the MMU modes is defined in the target code. */ \ /* The meaning of the MMU modes is defined in the target code. */ \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \

2
exec.c
View file

@ -2188,7 +2188,7 @@ void tlb_set_page(CPUState *env, target_ulong vaddr,
unsigned int index; unsigned int index;
target_ulong address; target_ulong address;
target_ulong code_address; target_ulong code_address;
target_phys_addr_t addend; unsigned long addend;
CPUTLBEntry *te; CPUTLBEntry *te;
CPUWatchpoint *wp; CPUWatchpoint *wp;
target_phys_addr_t iotlb; target_phys_addr_t iotlb;

View file

@ -87,7 +87,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE res; DATA_TYPE res;
int index; int index;
target_ulong tlb_addr; target_ulong tlb_addr;
target_phys_addr_t addend; target_phys_addr_t ioaddr;
unsigned long addend;
void *retaddr; void *retaddr;
/* test if there is match for unaligned or IO access */ /* test if there is match for unaligned or IO access */
@ -101,8 +102,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
if ((addr & (DATA_SIZE - 1)) != 0) if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access; goto do_unaligned_access;
retaddr = GETPC(); retaddr = GETPC();
addend = env->iotlb[mmu_idx][index]; ioaddr = env->iotlb[mmu_idx][index];
res = glue(io_read, SUFFIX)(addend, addr, retaddr); res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */ /* slow unaligned access (it spans two pages or IO) */
do_unaligned_access: do_unaligned_access:
@ -143,7 +144,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
{ {
DATA_TYPE res, res1, res2; DATA_TYPE res, res1, res2;
int index, shift; int index, shift;
target_phys_addr_t addend; target_phys_addr_t ioaddr;
unsigned long addend;
target_ulong tlb_addr, addr1, addr2; target_ulong tlb_addr, addr1, addr2;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@ -154,8 +156,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
/* IO access */ /* IO access */
if ((addr & (DATA_SIZE - 1)) != 0) if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access; goto do_unaligned_access;
addend = env->iotlb[mmu_idx][index]; ioaddr = env->iotlb[mmu_idx][index];
res = glue(io_read, SUFFIX)(addend, addr, retaddr); res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access: do_unaligned_access:
/* slow unaligned access (it spans two pages) */ /* slow unaligned access (it spans two pages) */
@ -224,7 +226,8 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE val, DATA_TYPE val,
int mmu_idx) int mmu_idx)
{ {
target_phys_addr_t addend; target_phys_addr_t ioaddr;
unsigned long addend;
target_ulong tlb_addr; target_ulong tlb_addr;
void *retaddr; void *retaddr;
int index; int index;
@ -238,8 +241,8 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
if ((addr & (DATA_SIZE - 1)) != 0) if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access; goto do_unaligned_access;
retaddr = GETPC(); retaddr = GETPC();
addend = env->iotlb[mmu_idx][index]; ioaddr = env->iotlb[mmu_idx][index];
glue(io_write, SUFFIX)(addend, val, addr, retaddr); glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access: do_unaligned_access:
retaddr = GETPC(); retaddr = GETPC();
@ -277,7 +280,8 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
int mmu_idx, int mmu_idx,
void *retaddr) void *retaddr)
{ {
target_phys_addr_t addend; target_phys_addr_t ioaddr;
unsigned long addend;
target_ulong tlb_addr; target_ulong tlb_addr;
int index, i; int index, i;
@ -289,8 +293,8 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
/* IO access */ /* IO access */
if ((addr & (DATA_SIZE - 1)) != 0) if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access; goto do_unaligned_access;
addend = env->iotlb[mmu_idx][index]; ioaddr = env->iotlb[mmu_idx][index];
glue(io_write, SUFFIX)(addend, val, addr, retaddr); glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access: do_unaligned_access:
/* XXX: not efficient, but simple */ /* XXX: not efficient, but simple */

View file

@ -5,10 +5,7 @@
#ifdef TARGET_PHYS_ADDR_BITS #ifdef TARGET_PHYS_ADDR_BITS
/* target_phys_addr_t is the type of a physical address (its size can /* target_phys_addr_t is the type of a physical address (its size can
be different from 'target_ulong'). We have sizeof(target_phys_addr) be different from 'target_ulong'). */
= max(sizeof(unsigned long),
sizeof(size_of_target_physical_address)) because we must pass a
host pointer to memory operations in some cases */
#if TARGET_PHYS_ADDR_BITS == 32 #if TARGET_PHYS_ADDR_BITS == 32
typedef uint32_t target_phys_addr_t; typedef uint32_t target_phys_addr_t;

View file

@ -867,7 +867,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr); reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0,
offsetof(CPUState, tlb_table[mem_index][0].addend) + addr_meml); offsetof(CPUState, tlb_table[mem_index][0].addend));
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_V0, TCG_REG_A0, addr_regl); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_V0, TCG_REG_A0, addr_regl);
#else #else
if (GUEST_BASE == (int16_t)GUEST_BASE) { if (GUEST_BASE == (int16_t)GUEST_BASE) {
@ -1054,7 +1054,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr); reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0,
offsetof(CPUState, tlb_table[mem_index][0].addend) + addr_meml); offsetof(CPUState, tlb_table[mem_index][0].addend));
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, addr_regl); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, addr_regl);
#else #else
if (GUEST_BASE == (int16_t)GUEST_BASE) { if (GUEST_BASE == (int16_t)GUEST_BASE) {

View file

@ -37,14 +37,6 @@ static uint8_t *tb_ret_addr;
#define FAST_PATH #define FAST_PATH
#ifdef CONFIG_SOFTMMU
#if TARGET_PHYS_ADDR_BITS <= 32
#define ADDEND_OFFSET 0
#else
#define ADDEND_OFFSET 4
#endif
#endif
#ifndef GUEST_BASE #ifndef GUEST_BASE
#define GUEST_BASE 0 #define GUEST_BASE 0
#endif #endif
@ -648,7 +640,7 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
tcg_out32 (s, (LWZ tcg_out32 (s, (LWZ
| RT (r0) | RT (r0)
| RA (r0) | RA (r0)
| (ADDEND_OFFSET + offsetof (CPUTLBEntry, addend) | (offsetof (CPUTLBEntry, addend)
- offsetof (CPUTLBEntry, addr_read)) - offsetof (CPUTLBEntry, addr_read))
)); ));
/* r0 = env->tlb_table[mem_index][index].addend */ /* r0 = env->tlb_table[mem_index][index].addend */
@ -847,7 +839,7 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
tcg_out32 (s, (LWZ tcg_out32 (s, (LWZ
| RT (r0) | RT (r0)
| RA (r0) | RA (r0)
| (ADDEND_OFFSET + offsetof (CPUTLBEntry, addend) | (offsetof (CPUTLBEntry, addend)
- offsetof (CPUTLBEntry, addr_write)) - offsetof (CPUTLBEntry, addr_write))
)); ));
/* r0 = env->tlb_table[mem_index][index].addend */ /* r0 = env->tlb_table[mem_index][index].addend */

View file

@ -28,14 +28,6 @@ static uint8_t *tb_ret_addr;
#define FAST_PATH #define FAST_PATH
#ifdef CONFIG_SOFTMMU
#if TARGET_PHYS_ADDR_BITS == 32
#define LD_ADDEND LWZ
#else
#define LD_ADDEND LD
#endif
#endif
#if TARGET_LONG_BITS == 32 #if TARGET_LONG_BITS == 32
#define LD_ADDR LWZU #define LD_ADDR LWZU
#define CMP_L 0 #define CMP_L 0
@ -684,7 +676,7 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
#endif #endif
/* r0 now contains &env->tlb_table[mem_index][index].addr_read */ /* r0 now contains &env->tlb_table[mem_index][index].addr_read */
tcg_out32 (s, (LD_ADDEND tcg_out32 (s, (LD
| RT (r0) | RT (r0)
| RA (r0) | RA (r0)
| (offsetof (CPUTLBEntry, addend) | (offsetof (CPUTLBEntry, addend)
@ -812,7 +804,7 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr); reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
#endif #endif
tcg_out32 (s, (LD_ADDEND tcg_out32 (s, (LD
| RT (r0) | RT (r0)
| RA (r0) | RA (r0)
| (offsetof (CPUTLBEntry, addend) | (offsetof (CPUTLBEntry, addend)

View file

@ -726,13 +726,7 @@ static const void * const qemu_st_helpers[4] = {
#endif #endif
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
#if TARGET_PHYS_ADDR_BITS == 32 #if HOST_LONG_BITS == 32
#define TARGET_ADDEND_LD_OP LDUW
#else
#define TARGET_ADDEND_LD_OP LDX
#endif
#else
#if TARGET_ABI_BITS == 32
#define TARGET_ADDEND_LD_OP LDUW #define TARGET_ADDEND_LD_OP LDUW
#else #else
#define TARGET_ADDEND_LD_OP LDX #define TARGET_ADDEND_LD_OP LDX