diff --git a/target-arm/exec.h b/target-arm/exec.h index d95309811d..af1818171a 100644 --- a/target-arm/exec.h +++ b/target-arm/exec.h @@ -61,13 +61,6 @@ static inline int cpu_halted(CPUState *env) { /* In op_helper.c */ -void helper_set_cp(CPUState *, uint32_t, uint32_t); -uint32_t helper_get_cp(CPUState *, uint32_t); -void helper_set_cp15(CPUState *, uint32_t, uint32_t); -uint32_t helper_get_cp15(CPUState *, uint32_t); -uint32_t helper_v7m_mrs(CPUState *env, int reg); -void helper_v7m_msr(CPUState *env, int reg, uint32_t val); - void helper_mark_exclusive(CPUARMState *, uint32_t addr); int helper_test_exclusive(CPUARMState *, uint32_t addr); void helper_clrex(CPUARMState *env); diff --git a/target-arm/helper.c b/target-arm/helper.c index 257960a945..6438882913 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -470,38 +470,38 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) } /* These should probably raise undefined insn exceptions. */ -void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val) +void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val) { int op1 = (insn >> 8) & 0xf; cpu_abort(env, "cp%i insn %08x\n", op1, insn); return; } -uint32_t helper_get_cp(CPUState *env, uint32_t insn) +uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn) { int op1 = (insn >> 8) & 0xf; cpu_abort(env, "cp%i insn %08x\n", op1, insn); return 0; } -void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val) +void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val) { cpu_abort(env, "cp15 insn %08x\n", insn); } -uint32_t helper_get_cp15(CPUState *env, uint32_t insn) +uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn) { cpu_abort(env, "cp15 insn %08x\n", insn); return 0; } /* These should probably raise undefined insn exceptions. */ -void helper_v7m_msr(CPUState *env, int reg, uint32_t val) +void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val) { cpu_abort(env, "v7m_mrs %d\n", reg); } -uint32_t helper_v7m_mrs(CPUState *env, int reg) +uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg) { cpu_abort(env, "v7m_mrs %d\n", reg); return 0; @@ -1191,7 +1191,7 @@ void helper_clrex(CPUState *env) env->mmon_addr = -1; } -void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val) +void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val) { int cp_num = (insn >> 8) & 0xf; int cp_info = (insn >> 5) & 7; @@ -1203,7 +1203,7 @@ void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val) cp_info, src, operand, val); } -uint32_t helper_get_cp(CPUState *env, uint32_t insn) +uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn) { int cp_num = (insn >> 8) & 0xf; int cp_info = (insn >> 5) & 7; @@ -1246,7 +1246,7 @@ static uint32_t extended_mpu_ap_bits(uint32_t val) return ret; } -void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val) +void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val) { int op1; int op2; @@ -1530,7 +1530,7 @@ bad_reg: (insn >> 16) & 0xf, crm, op1, op2); } -uint32_t helper_get_cp15(CPUState *env, uint32_t insn) +uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn) { int op1; int op2; @@ -1803,7 +1803,7 @@ uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode) return env->banked_r13[bank_number(mode)]; } -uint32_t helper_v7m_mrs(CPUState *env, int reg) +uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg) { switch (reg) { case 0: /* APSR */ @@ -1840,7 +1840,7 @@ uint32_t helper_v7m_mrs(CPUState *env, int reg) } } -void helper_v7m_msr(CPUState *env, int reg, uint32_t val) +void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val) { switch (reg) { case 0: /* APSR */ diff --git a/target-arm/helpers.h b/target-arm/helpers.h index 4a22ef3e8c..08eb590114 100644 --- a/target-arm/helpers.h +++ b/target-arm/helpers.h @@ -118,6 +118,15 @@ DEF_HELPER_0_0(wfi, void, (void)) DEF_HELPER_0_2(cpsr_write, void, (uint32_t, uint32_t)) DEF_HELPER_1_0(cpsr_read, uint32_t, (void)) +DEF_HELPER_0_3(v7m_msr, void, (CPUState *, uint32_t, uint32_t)) +DEF_HELPER_1_2(v7m_mrs, uint32_t, (CPUState *, uint32_t)) + +DEF_HELPER_0_3(set_cp15, void, (CPUState *, uint32_t, uint32_t)) +DEF_HELPER_1_2(get_cp15, uint32_t, (CPUState *, uint32_t)) + +DEF_HELPER_0_3(set_cp, void, (CPUState *, uint32_t, uint32_t)) +DEF_HELPER_1_2(get_cp, uint32_t, (CPUState *, uint32_t)) + DEF_HELPER_1_2(get_r13_banked, uint32_t, (CPUState *, uint32_t)) DEF_HELPER_0_3(set_r13_banked, void, (CPUState *, uint32_t, uint32_t)) @@ -187,6 +196,20 @@ DEF_HELPER_1_2(rsqrte_f32, float32, (float32, CPUState *)) DEF_HELPER_1_2(recpe_u32, uint32_t, (uint32_t, CPUState *)) DEF_HELPER_1_2(rsqrte_u32, uint32_t, (uint32_t, CPUState *)) +DEF_HELPER_1_2(add_cc, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(adc_cc, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(sub_cc, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(sbc_cc, uint32_t, (uint32_t, uint32_t)) + +DEF_HELPER_1_2(shl, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(shr, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(sar, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(ror, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(shl_cc, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(shr_cc, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(sar_cc, uint32_t, (uint32_t, uint32_t)) +DEF_HELPER_1_2(ror_cc, uint32_t, (uint32_t, uint32_t)) + #undef DEF_HELPER #undef DEF_HELPER_0_0 #undef DEF_HELPER_0_1 diff --git a/target-arm/op.c b/target-arm/op.c index 7c6d6a16aa..57086e349c 100644 --- a/target-arm/op.c +++ b/target-arm/op.c @@ -20,66 +20,6 @@ */ #include "exec.h" -void OPPROTO op_addl_T0_T1_cc(void) -{ - unsigned int src1; - src1 = T0; - T0 += T1; - env->NZF = T0; - env->CF = T0 < src1; - env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0); -} - -void OPPROTO op_adcl_T0_T1_cc(void) -{ - unsigned int src1; - src1 = T0; - if (!env->CF) { - T0 += T1; - env->CF = T0 < src1; - } else { - T0 += T1 + 1; - env->CF = T0 <= src1; - } - env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0); - env->NZF = T0; - FORCE_RET(); -} - -#define OPSUB(sub, sbc, res, T0, T1) \ - \ -void OPPROTO op_ ## sub ## l_T0_T1_cc(void) \ -{ \ - unsigned int src1; \ - src1 = T0; \ - T0 -= T1; \ - env->NZF = T0; \ - env->CF = src1 >= T1; \ - env->VF = (src1 ^ T1) & (src1 ^ T0); \ - res = T0; \ -} \ - \ -void OPPROTO op_ ## sbc ## l_T0_T1_cc(void) \ -{ \ - unsigned int src1; \ - src1 = T0; \ - if (!env->CF) { \ - T0 = T0 - T1 - 1; \ - env->CF = src1 > T1; \ - } else { \ - T0 = T0 - T1; \ - env->CF = src1 >= T1; \ - } \ - env->VF = (src1 ^ T1) & (src1 ^ T0); \ - env->NZF = T0; \ - res = T0; \ - FORCE_RET(); \ -} - -OPSUB(sub, sbc, T0, T0, T1) - -OPSUB(rsb, rsc, T0, T1, T0) - /* memory access */ #define MEMSUFFIX _raw @@ -92,164 +32,6 @@ OPSUB(rsb, rsc, T0, T1, T0) #include "op_mem.h" #endif -void OPPROTO op_clrex(void) -{ - cpu_lock(); - helper_clrex(env); - cpu_unlock(); -} - -/* T1 based, use T0 as shift count */ - -void OPPROTO op_shll_T1_T0(void) -{ - int shift; - shift = T0 & 0xff; - if (shift >= 32) - T1 = 0; - else - T1 = T1 << shift; - FORCE_RET(); -} - -void OPPROTO op_shrl_T1_T0(void) -{ - int shift; - shift = T0 & 0xff; - if (shift >= 32) - T1 = 0; - else - T1 = (uint32_t)T1 >> shift; - FORCE_RET(); -} - -void OPPROTO op_sarl_T1_T0(void) -{ - int shift; - shift = T0 & 0xff; - if (shift >= 32) - shift = 31; - T1 = (int32_t)T1 >> shift; -} - -void OPPROTO op_rorl_T1_T0(void) -{ - int shift; - shift = T0 & 0x1f; - if (shift) { - T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift)); - } - FORCE_RET(); -} - -/* T1 based, use T0 as shift count and compute CF */ - -void OPPROTO op_shll_T1_T0_cc(void) -{ - int shift; - shift = T0 & 0xff; - if (shift >= 32) { - if (shift == 32) - env->CF = T1 & 1; - else - env->CF = 0; - T1 = 0; - } else if (shift != 0) { - env->CF = (T1 >> (32 - shift)) & 1; - T1 = T1 << shift; - } - FORCE_RET(); -} - -void OPPROTO op_shrl_T1_T0_cc(void) -{ - int shift; - shift = T0 & 0xff; - if (shift >= 32) { - if (shift == 32) - env->CF = (T1 >> 31) & 1; - else - env->CF = 0; - T1 = 0; - } else if (shift != 0) { - env->CF = (T1 >> (shift - 1)) & 1; - T1 = (uint32_t)T1 >> shift; - } - FORCE_RET(); -} - -void OPPROTO op_sarl_T1_T0_cc(void) -{ - int shift; - shift = T0 & 0xff; - if (shift >= 32) { - env->CF = (T1 >> 31) & 1; - T1 = (int32_t)T1 >> 31; - } else if (shift != 0) { - env->CF = (T1 >> (shift - 1)) & 1; - T1 = (int32_t)T1 >> shift; - } - FORCE_RET(); -} - -void OPPROTO op_rorl_T1_T0_cc(void) -{ - int shift1, shift; - shift1 = T0 & 0xff; - shift = shift1 & 0x1f; - if (shift == 0) { - if (shift1 != 0) - env->CF = (T1 >> 31) & 1; - } else { - env->CF = (T1 >> (shift - 1)) & 1; - T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift)); - } - FORCE_RET(); -} - -void OPPROTO op_movl_cp_T0(void) -{ - helper_set_cp(env, PARAM1, T0); - FORCE_RET(); -} - -void OPPROTO op_movl_T0_cp(void) -{ - T0 = helper_get_cp(env, PARAM1); - FORCE_RET(); -} - -void OPPROTO op_movl_cp15_T0(void) -{ - helper_set_cp15(env, PARAM1, T0); - FORCE_RET(); -} - -void OPPROTO op_movl_T0_cp15(void) -{ - T0 = helper_get_cp15(env, PARAM1); - FORCE_RET(); -} - -void OPPROTO op_v7m_mrs_T0(void) -{ - T0 = helper_v7m_mrs(env, PARAM1); -} - -void OPPROTO op_v7m_msr_T0(void) -{ - helper_v7m_msr(env, PARAM1, T0); -} - -void OPPROTO op_movl_T0_sp(void) -{ - if (PARAM1 == env->v7m.current_sp) - T0 = env->regs[13]; - else - T0 = env->v7m.other_sp; - FORCE_RET(); -} - #include "op_neon.h" /* iwMMXt support */ diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c index 6748b06058..79b3f1a456 100644 --- a/target-arm/op_helper.c +++ b/target-arm/op_helper.c @@ -304,3 +304,151 @@ void HELPER(set_user_reg)(uint32_t regno, uint32_t val) } } +/* ??? Flag setting arithmetic is awkward because we need to do comparisons. + The only way to do that in TCG is a conditional branch, which clobbers + all our temporaries. For now implement these as helper functions. */ + +uint32_t HELPER (add_cc)(uint32_t a, uint32_t b) +{ + uint32_t result; + result = T0 + T1; + env->NZF = result; + env->CF = result < a; + env->VF = (a ^ b ^ -1) & (a ^ result); + return result; +} + +uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b) +{ + uint32_t result; + if (!env->CF) { + result = a + b; + env->CF = result < a; + } else { + result = a + b + 1; + env->CF = result <= a; + } + env->VF = (a ^ b ^ -1) & (a ^ result); + env->NZF = result; + return result; +} + +uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b) +{ + uint32_t result; + result = a - b; + env->NZF = result; + env->CF = a >= b; + env->VF = (a ^ b) & (a ^ result); + return result; +} + +uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b) +{ + uint32_t result; + if (!env->CF) { + result = a - b - 1; + env->CF = a > b; + } else { + result = a - b; + env->CF = a >= b; + } + env->VF = (a ^ b) & (a ^ result); + env->NZF = result; + return result; +} + +/* Similarly for variable shift instructions. */ + +uint32_t HELPER(shl)(uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) + return 0; + return x << shift; +} + +uint32_t HELPER(shr)(uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) + return 0; + return (uint32_t)x >> shift; +} + +uint32_t HELPER(sar)(uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) + shift = 31; + return (int32_t)x >> shift; +} + +uint32_t HELPER(ror)(uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift == 0) + return x; + return (x >> shift) | (x << (32 - shift)); +} + +uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) { + if (shift == 32) + env->CF = x & 1; + else + env->CF = 0; + return 0; + } else if (shift != 0) { + env->CF = (x >> (32 - shift)) & 1; + return x << shift; + } + return x; +} + +uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) { + if (shift == 32) + env->CF = (x >> 31) & 1; + else + env->CF = 0; + return 0; + } else if (shift != 0) { + env->CF = (x >> (shift - 1)) & 1; + return x >> shift; + } + return x; +} + +uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) { + env->CF = (x >> 31) & 1; + return (int32_t)x >> 31; + } else if (shift != 0) { + env->CF = (x >> (shift - 1)) & 1; + return (int32_t)x >> shift; + } + return x; +} + +uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i) +{ + int shift1, shift; + shift1 = i & 0xff; + shift = shift1 & 0x1f; + if (shift == 0) { + if (shift1 != 0) + env->CF = (x >> 31) & 1; + return x; + } else { + env->CF = (x >> (shift - 1)) & 1; + return ((uint32_t)x >> shift) | (x << (32 - shift)); + } +} + diff --git a/target-arm/op_mem.h b/target-arm/op_mem.h index 0e2e7b65ef..519544d0ef 100644 --- a/target-arm/op_mem.h +++ b/target-arm/op_mem.h @@ -1,24 +1,5 @@ /* ARM memory operations. */ -/* Swap T0 with memory at address T1. */ -/* ??? Is this exception safe? */ -#define MEM_SWP_OP(name, lname) \ -void OPPROTO glue(op_swp##name,MEMSUFFIX)(void) \ -{ \ - uint32_t tmp; \ - cpu_lock(); \ - tmp = glue(ld##lname,MEMSUFFIX)(T1); \ - glue(st##name,MEMSUFFIX)(T1, T0); \ - T0 = tmp; \ - cpu_unlock(); \ - FORCE_RET(); \ -} - -MEM_SWP_OP(b, ub) -MEM_SWP_OP(l, l) - -#undef MEM_SWP_OP - /* Load-locked, store exclusive. */ #define EXCLUSIVE_OP(suffix, ldsuffix) \ void OPPROTO glue(op_ld##suffix##ex,MEMSUFFIX)(void) \ diff --git a/target-arm/translate.c b/target-arm/translate.c index 369dfabe96..0220d6ac53 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -201,6 +201,13 @@ static void store_reg(DisasContext *s, int reg, TCGv var) #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1]) #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0]) +#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1]) +#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1]) +#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1]) +#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1]) +#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0]) +#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0]) + #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]) #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1]) #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1]) @@ -538,6 +545,27 @@ static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags) } }; +static inline void gen_arm_shift_reg(TCGv var, int shiftop, + TCGv shift, int flags) +{ + if (flags) { + switch (shiftop) { + case 0: gen_helper_shl_cc(var, var, shift); break; + case 1: gen_helper_shr_cc(var, var, shift); break; + case 2: gen_helper_sar_cc(var, var, shift); break; + case 3: gen_helper_ror_cc(var, var, shift); break; + } + } else { + switch (shiftop) { + case 0: gen_helper_shl(var, var, shift); break; + case 1: gen_helper_shr(var, var, shift); break; + case 2: gen_helper_sar(var, var, shift); break; + case 3: gen_helper_ror(var, var, shift); break; + } + } + dead_tmp(shift); +} + #define PAS_OP(pfx) \ switch (op2) { \ case 0: gen_pas_helper(glue(pfx,add16)); break; \ @@ -746,20 +774,6 @@ const uint8_t table_logic_cc[16] = { 1, /* mvn */ }; -static GenOpFunc *gen_shift_T1_T0[4] = { - gen_op_shll_T1_T0, - gen_op_shrl_T1_T0, - gen_op_sarl_T1_T0, - gen_op_rorl_T1_T0, -}; - -static GenOpFunc *gen_shift_T1_T0_cc[4] = { - gen_op_shll_T1_T0_cc, - gen_op_shrl_T1_T0_cc, - gen_op_sarl_T1_T0_cc, - gen_op_rorl_T1_T0_cc, -}; - /* Set PC and Thumb state from an immediate address. */ static inline void gen_bx_im(DisasContext *s, uint32_t addr) { @@ -2249,6 +2263,7 @@ static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn) instruction is not defined. */ static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn) { + TCGv tmp; uint32_t rd = (insn >> 12) & 0xf; uint32_t cp = (insn >> 8) & 0xf; if (IS_USER(s)) { @@ -2258,17 +2273,16 @@ static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn) if (insn & ARM_CP_RW_BIT) { if (!env->cp[cp].cp_read) return 1; - gen_op_movl_T0_im((uint32_t) s->pc); - gen_set_pc_T0(); - gen_op_movl_T0_cp(insn); - gen_movl_reg_T0(s, rd); + gen_set_pc_im(s->pc); + tmp = new_tmp(); + gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn)); + store_reg(s, rd, tmp); } else { if (!env->cp[cp].cp_write) return 1; - gen_op_movl_T0_im((uint32_t) s->pc); - gen_set_pc_T0(); - gen_movl_T0_reg(s, rd); - gen_op_movl_cp_T0(insn); + gen_set_pc_im(s->pc); + tmp = load_reg(s, rd); + gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp); } return 0; } @@ -2298,6 +2312,7 @@ static int cp15_user_ok(uint32_t insn) static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn) { uint32_t rd; + TCGv tmp; /* M profile cores use memory mapped registers instead of cp15. */ if (arm_feature(env, ARM_FEATURE_M)) @@ -2321,20 +2336,23 @@ static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn) if ((insn & 0x0fff0fff) == 0x0e070f90 || (insn & 0x0fff0fff) == 0x0e070f58) { /* Wait for interrupt. */ - gen_op_movl_T0_im((long)s->pc); - gen_set_pc_T0(); + gen_set_pc_im(s->pc); s->is_jmp = DISAS_WFI; return 0; } rd = (insn >> 12) & 0xf; if (insn & ARM_CP_RW_BIT) { - gen_op_movl_T0_cp15(insn); + tmp = new_tmp(); + gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn)); /* If the destination register is r15 then sets condition codes. */ if (rd != 15) - gen_movl_reg_T0(s, rd); + store_reg(s, rd, tmp); + else + dead_tmp(tmp); } else { - gen_movl_T0_reg(s, rd); - gen_op_movl_cp15_T0(insn); + tmp = load_reg(s, rd); + gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp); + dead_tmp(tmp); /* Normally we would always end the TB here, but Linux * arch/arm/mach-pxa/sleep.S expects two instructions following * an MMU enable to execute from cache. Imitate this behaviour. */ @@ -3052,12 +3070,10 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) tb = s->tb; if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { tcg_gen_goto_tb(n); - gen_op_movl_T0_im(dest); - gen_set_pc_T0(); + gen_set_pc_im(dest); tcg_gen_exit_tb((long)tb + n); } else { - gen_op_movl_T0_im(dest); - gen_set_pc_T0(); + gen_set_pc_im(dest); tcg_gen_exit_tb(0); } } @@ -3173,8 +3189,7 @@ static void gen_nop_hint(DisasContext *s, int val) { switch (val) { case 3: /* wfi */ - gen_op_movl_T0_im((long)s->pc); - gen_set_pc_T0(); + gen_set_pc_im(s->pc); s->is_jmp = DISAS_WFI; break; case 2: /* wfe */ @@ -5770,12 +5785,8 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc); } else { rs = (insn >> 8) & 0xf; - gen_movl_T0_reg(s, rs); - if (logic_cc) { - gen_shift_T1_T0_cc[shiftop](); - } else { - gen_shift_T1_T0[shiftop](); - } + tmp = load_reg(s, rs); + gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc); } } if (op1 != 0x0f && op1 != 0x0d) { @@ -5977,14 +5988,20 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) /* SWP instruction */ rm = (insn) & 0xf; - gen_movl_T0_reg(s, rm); - gen_movl_T1_reg(s, rn); + /* ??? This is not really atomic. However we know + we never have multiple CPUs running in parallel, + so it is good enough. */ + addr = load_reg(s, rn); + tmp = load_reg(s, rm); if (insn & (1 << 22)) { - gen_ldst(swpb, s); + tmp2 = gen_ld8u(addr, IS_USER(s)); + gen_st8(tmp, addr, IS_USER(s)); } else { - gen_ldst(swpl, s); + tmp2 = gen_ld32(addr, IS_USER(s)); + gen_st32(tmp, addr, IS_USER(s)); } - gen_movl_reg_T0(s, rd); + dead_tmp(addr); + store_reg(s, rd, tmp2); } } } else { @@ -6903,18 +6920,16 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) goto illegal_op; switch (op) { case 0: /* Register controlled shift. */ - gen_movl_T0_reg(s, rm); - gen_movl_T1_reg(s, rn); + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); if ((insn & 0x70) != 0) goto illegal_op; op = (insn >> 21) & 3; - if (insn & (1 << 20)) { - gen_shift_T1_T0_cc[op](); - gen_op_logic_T1_cc(); - } else { - gen_shift_T1_T0[op](); - } - gen_movl_reg_T1(s, rd); + logic_cc = (insn & (1 << 20)) != 0; + gen_arm_shift_reg(tmp, op, tmp2, logic_cc); + if (logic_cc) + gen_logic_CC(tmp); + store_reg(s, rd, tmp); break; case 1: /* Sign/zero extend. */ tmp = load_reg(s, rm); @@ -7208,8 +7223,9 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) switch (op) { case 0: /* msr cpsr. */ if (IS_M(env)) { - gen_op_v7m_msr_T0(insn & 0xff); - gen_movl_reg_T0(s, rn); + tmp = load_reg(s, rn); + addr = tcg_const_i32(insn & 0xff); + gen_helper_v7m_msr(cpu_env, addr, tmp); gen_lookup_tb(s); break; } @@ -7276,12 +7292,14 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) /* Unpredictable in user mode. */ goto illegal_op; case 6: /* mrs cpsr. */ + tmp = new_tmp(); if (IS_M(env)) { - gen_op_v7m_mrs_T0(insn & 0xff); + addr = tcg_const_i32(insn & 0xff); + gen_helper_v7m_mrs(tmp, cpu_env, addr); } else { - gen_helper_cpsr_read(cpu_T[0]); + gen_helper_cpsr_read(tmp); } - gen_movl_reg_T0(s, rd); + store_reg(s, rd, tmp); break; case 7: /* mrs spsr. */ /* Not accessible in user mode. */ @@ -7753,25 +7771,25 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) break; case 0x2: /* lsl */ if (s->condexec_mask) { - gen_op_shll_T1_T0(); + gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]); } else { - gen_op_shll_T1_T0_cc(); + gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]); gen_op_logic_T1_cc(); } break; case 0x3: /* lsr */ if (s->condexec_mask) { - gen_op_shrl_T1_T0(); + gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]); } else { - gen_op_shrl_T1_T0_cc(); + gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]); gen_op_logic_T1_cc(); } break; case 0x4: /* asr */ if (s->condexec_mask) { - gen_op_sarl_T1_T0(); + gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]); } else { - gen_op_sarl_T1_T0_cc(); + gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]); gen_op_logic_T1_cc(); } break; @@ -7789,9 +7807,9 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) break; case 0x7: /* ror */ if (s->condexec_mask) { - gen_op_rorl_T1_T0(); + gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]); } else { - gen_op_rorl_T1_T0_cc(); + gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]); gen_op_logic_T1_cc(); } break; @@ -8118,15 +8136,17 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) if (IS_USER(s)) break; if (IS_M(env)) { - val = (insn & (1 << 4)) != 0; - gen_op_movl_T0_im(val); + tmp = tcg_const_i32((insn & (1 << 4)) != 0); /* PRIMASK */ - if (insn & 1) - gen_op_v7m_msr_T0(16); + if (insn & 1) { + addr = tcg_const_i32(16); + gen_helper_v7m_msr(cpu_env, addr, tmp); + } /* FAULTMASK */ - if (insn & 2) - gen_op_v7m_msr_T0(17); - + if (insn & 2) { + addr = tcg_const_i32(17); + gen_helper_v7m_msr(cpu_env, addr, tmp); + } gen_lookup_tb(s); } else { if (insn & (1 << 4))