x86_64 fixes (initial patch by Filip Navara)

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1517 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
bellard 2005-07-23 17:41:26 +00:00
parent 2efbe911d3
commit 8f091a5960
6 changed files with 170 additions and 38 deletions

View file

@ -214,6 +214,12 @@
#define MSR_IA32_SYSENTER_ESP 0x175
#define MSR_IA32_SYSENTER_EIP 0x176
#define MSR_MCG_CAP 0x179
#define MSR_MCG_STATUS 0x17a
#define MSR_MCG_CTL 0x17b
#define MSR_PAT 0x277
#define MSR_EFER 0xc0000080
#define MSR_EFER_SCE (1 << 0)
@ -246,6 +252,8 @@
#define CPUID_PGE (1 << 13)
#define CPUID_MCA (1 << 14)
#define CPUID_CMOV (1 << 15)
#define CPUID_PAT (1 << 16)
#define CPUID_CLFLUSH (1 << 19)
/* ... */
#define CPUID_MMX (1 << 23)
#define CPUID_FXSR (1 << 24)
@ -474,6 +482,8 @@ typedef struct CPUX86State {
target_ulong kernelgsbase;
#endif
uint64_t pat;
/* temporary data for USE_CODE_COPY mode */
#ifdef USE_CODE_COPY
uint32_t tmp0;

View file

@ -157,11 +157,11 @@ void helper_lldt_T0(void);
void helper_ltr_T0(void);
void helper_movl_crN_T0(int reg);
void helper_movl_drN_T0(int reg);
void helper_invlpg(unsigned int addr);
void helper_invlpg(target_ulong addr);
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr);
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr);
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write, int is_user, int is_softmmu);
void tlb_fill(target_ulong addr, int is_write, int is_user,
@ -190,6 +190,7 @@ void helper_idivq_EAX_T0(void);
void helper_cmpxchg8b(void);
void helper_cpuid(void);
void helper_enter_level(int level, int data32);
void helper_enter64_level(int level, int data64);
void helper_sysenter(void);
void helper_sysexit(void);
void helper_syscall(int next_eip_addend);

View file

@ -1334,6 +1334,20 @@ void helper_cpuid(void)
ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
break;
case 0x80000005:
/* cache info (L1 cache) */
EAX = 0x01ff01ff;
EBX = 0x01ff01ff;
ECX = 0x40020140;
EDX = 0x40020140;
break;
case 0x80000006:
/* cache info (L2 cache) */
EAX = 0;
EBX = 0x42004200;
ECX = 0x02008140;
EDX = 0;
break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
EAX = 0x00003028;
@ -1383,6 +1397,37 @@ void helper_enter_level(int level, int data32)
}
}
#ifdef TARGET_X86_64
void helper_enter64_level(int level, int data64)
{
target_ulong esp, ebp;
ebp = EBP;
esp = ESP;
if (data64) {
/* 64 bit */
esp -= 8;
while (--level) {
esp -= 8;
ebp -= 8;
stq(esp, ldq(ebp));
}
esp -= 8;
stq(esp, T1);
} else {
/* 16 bit */
esp -= 2;
while (--level) {
esp -= 2;
ebp -= 2;
stw(esp, lduw(ebp));
}
esp -= 2;
stw(esp, T1);
}
}
#endif
void helper_lldt_T0(void)
{
int selector;
@ -1963,6 +2008,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
#endif
sp_mask = get_sp_mask(env->segs[R_SS].flags);
sp = ESP;
/* XXX: ssp is zero in 64 bit ? */
ssp = env->segs[R_SS].base;
new_eflags = 0; /* avoid warning */
#ifdef TARGET_X86_64
@ -2271,7 +2317,7 @@ void helper_movl_drN_T0(int reg)
env->dr[reg] = T0;
}
void helper_invlpg(unsigned int addr)
void helper_invlpg(target_ulong addr)
{
cpu_x86_flush_tlb(env, addr);
}
@ -2332,6 +2378,9 @@ void helper_wrmsr(void)
case MSR_STAR:
env->star = val;
break;
case MSR_PAT:
env->pat = val;
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
env->lstar = val;
@ -2380,6 +2429,9 @@ void helper_rdmsr(void)
case MSR_STAR:
val = env->star;
break;
case MSR_PAT:
val = env->pat;
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
val = env->lstar;

View file

@ -106,7 +106,9 @@ CPUX86State *cpu_x86_init(void)
env->cpuid_version = (family << 8) | (model << 4) | stepping;
env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
CPUID_TSC | CPUID_MSR | CPUID_MCE |
CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
CPUID_PAT);
env->pat = 0x0007040600070406ULL;
env->cpuid_ext_features = 0;
env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
env->cpuid_xlevel = 0;
@ -128,6 +130,9 @@ CPUX86State *cpu_x86_init(void)
env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL;
env->cpuid_xlevel = 0x80000008;
/* these features are needed for Win64 and aren't fully implemented */
env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
#endif
}
cpu_single_env = env;
@ -546,7 +551,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
}
/* XXX: also flush 4MB pages */
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
{
tlb_flush_page(env, addr);
}

View file

@ -898,6 +898,11 @@ void op_addw_ESP_im(void)
}
#ifdef TARGET_X86_64
void op_subq_A0_2(void)
{
A0 -= 2;
}
void op_subq_A0_8(void)
{
A0 -= 8;
@ -929,6 +934,13 @@ void OPPROTO op_enter_level(void)
helper_enter_level(PARAM1, PARAM2);
}
#ifdef TARGET_X86_64
void OPPROTO op_enter64_level(void)
{
helper_enter64_level(PARAM1, PARAM2);
}
#endif
void OPPROTO op_sysenter(void)
{
helper_sysenter();

View file

@ -1627,7 +1627,14 @@ static void gen_add_A0_ds_seg(DisasContext *s)
override = R_DS;
}
if (must_add_seg) {
gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
#ifdef TARGET_X86_64
if (CODE64(s)) {
gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base));
} else
#endif
{
gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
}
}
}
@ -1948,10 +1955,14 @@ static void gen_push_T0(DisasContext *s)
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
/* XXX: check 16 bit behaviour */
gen_op_movq_A0_reg[R_ESP]();
gen_op_subq_A0_8();
gen_op_st_T0_A0[OT_QUAD + s->mem_index]();
if (s->dflag) {
gen_op_subq_A0_8();
gen_op_st_T0_A0[OT_QUAD + s->mem_index]();
} else {
gen_op_subq_A0_2();
gen_op_st_T0_A0[OT_WORD + s->mem_index]();
}
gen_op_movq_ESP_A0();
} else
#endif
@ -1985,10 +1996,14 @@ static void gen_push_T1(DisasContext *s)
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
/* XXX: check 16 bit behaviour */
gen_op_movq_A0_reg[R_ESP]();
gen_op_subq_A0_8();
gen_op_st_T1_A0[OT_QUAD + s->mem_index]();
if (s->dflag) {
gen_op_subq_A0_8();
gen_op_st_T1_A0[OT_QUAD + s->mem_index]();
} else {
gen_op_subq_A0_2();
gen_op_st_T0_A0[OT_WORD + s->mem_index]();
}
gen_op_movq_ESP_A0();
} else
#endif
@ -2020,9 +2035,8 @@ static void gen_pop_T0(DisasContext *s)
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
/* XXX: check 16 bit behaviour */
gen_op_movq_A0_reg[R_ESP]();
gen_op_ld_T0_A0[OT_QUAD + s->mem_index]();
gen_op_ld_T0_A0[(s->dflag ? OT_QUAD : OT_WORD) + s->mem_index]();
} else
#endif
{
@ -2041,7 +2055,7 @@ static void gen_pop_T0(DisasContext *s)
static void gen_pop_update(DisasContext *s)
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (CODE64(s) && s->dflag) {
gen_stack_update(s, 8);
} else
#endif
@ -2105,26 +2119,48 @@ static void gen_enter(DisasContext *s, int esp_addend, int level)
{
int ot, opsize;
ot = s->dflag + OT_WORD;
level &= 0x1f;
opsize = 2 << s->dflag;
#ifdef TARGET_X86_64
if (CODE64(s)) {
ot = s->dflag ? OT_QUAD : OT_WORD;
opsize = 1 << ot;
gen_op_movl_A0_ESP();
gen_op_addq_A0_im(-opsize);
gen_op_movl_T1_A0();
gen_op_movl_A0_ESP();
gen_op_addl_A0_im(-opsize);
if (!s->ss32)
gen_op_andl_A0_ffff();
gen_op_movl_T1_A0();
if (s->addseg)
gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
/* push bp */
gen_op_mov_TN_reg[OT_LONG][0][R_EBP]();
gen_op_st_T0_A0[ot + s->mem_index]();
if (level) {
gen_op_enter_level(level, s->dflag);
/* push bp */
gen_op_mov_TN_reg[OT_LONG][0][R_EBP]();
gen_op_st_T0_A0[ot + s->mem_index]();
if (level) {
gen_op_enter64_level(level, (ot == OT_QUAD));
}
gen_op_mov_reg_T1[ot][R_EBP]();
gen_op_addl_T1_im( -esp_addend + (-opsize * level) );
gen_op_mov_reg_T1[OT_QUAD][R_ESP]();
} else
#endif
{
ot = s->dflag + OT_WORD;
opsize = 2 << s->dflag;
gen_op_movl_A0_ESP();
gen_op_addl_A0_im(-opsize);
if (!s->ss32)
gen_op_andl_A0_ffff();
gen_op_movl_T1_A0();
if (s->addseg)
gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
/* push bp */
gen_op_mov_TN_reg[OT_LONG][0][R_EBP]();
gen_op_st_T0_A0[ot + s->mem_index]();
if (level) {
gen_op_enter_level(level, s->dflag);
}
gen_op_mov_reg_T1[ot][R_EBP]();
gen_op_addl_T1_im( -esp_addend + (-opsize * level) );
gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP]();
}
gen_op_mov_reg_T1[ot][R_EBP]();
gen_op_addl_T1_im( -esp_addend + (-opsize * level) );
gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP]();
}
static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
@ -2901,7 +2937,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (mod != 3)
goto illegal_op;
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (s->aflag == 2) {
gen_op_movq_A0_reg[R_EDI]();
} else
#endif
@ -3697,7 +3733,6 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 0xc8: /* enter */
{
/* XXX: long mode support */
int level;
val = lduw_code(s->pc);
s->pc += 2;
@ -3707,7 +3742,6 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 0xc9: /* leave */
/* XXX: exception not precise (ESP is updated before potential exception) */
/* XXX: may be invalid for 16 bit in long mode */
if (CODE64(s)) {
gen_op_mov_TN_reg[OT_QUAD][0][R_EBP]();
gen_op_mov_reg_T0[OT_QUAD][R_ESP]();
@ -3926,7 +3960,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (s->aflag == 2) {
offset_addr = ldq_code(s->pc);
s->pc += 8;
if (offset_addr == (int32_t)offset_addr)
@ -3955,7 +3989,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 0xd7: /* xlat */
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (s->aflag == 2) {
gen_op_movq_A0_reg[R_EBX]();
gen_op_addq_A0_AL();
} else
@ -4779,6 +4813,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
val = ldsw_code(s->pc);
s->pc += 2;
gen_pop_T0(s);
if (CODE64(s) && s->dflag)
s->dflag = 2;
gen_stack_update(s, val + (2 << s->dflag));
if (s->dflag == 0)
gen_op_andl_T0_ffff();
@ -5782,14 +5818,30 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 5: /* lfence */
case 6: /* mfence */
case 7: /* sfence */
if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
goto illegal_op;
break;
case 7: /* sfence / clflush */
if ((modrm & 0xc7) == 0xc0) {
/* sfence */
if (!(s->cpuid_features & CPUID_SSE))
goto illegal_op;
} else {
/* clflush */
if (!(s->cpuid_features & CPUID_CLFLUSH))
goto illegal_op;
gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
}
break;
default:
goto illegal_op;
}
break;
case 0x10d: /* prefetch */
modrm = ldub_code(s->pc++);
gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
/* ignore for now */
break;
case 0x110 ... 0x117:
case 0x128 ... 0x12f:
case 0x150 ... 0x177: