target/arm: Merge gen_aa32_frob64 into gen_aa32_ld_i64

This is the only caller.  Adjust some commentary to talk
about SCTLR_B instead of the vanishing function.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210419202257.161730-13-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2021-04-19 13:22:38 -07:00 committed by Peter Maydell
parent 9d486b40e8
commit 37bf7a055f

View file

@ -975,20 +975,17 @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
gen_aa32_st_i32(s, val, a32, index, OPC); \
}
static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
{
/* Not needed for user-mode BE32, where we use MO_BE instead. */
if (!IS_USER_ONLY && s->sctlr_b) {
tcg_gen_rotri_i64(val, val, 32);
}
}
static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
int index, MemOp opc)
{
TCGv addr = gen_aa32_addr(s, a32, opc);
tcg_gen_qemu_ld_i64(val, addr, index, opc);
gen_aa32_frob64(s, val);
/* Not needed for user-mode BE32, where we use MO_BE instead. */
if (!IS_USER_ONLY && s->sctlr_b) {
tcg_gen_rotri_i64(val, val, 32);
}
tcg_temp_free(addr);
}
@ -4987,16 +4984,13 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i32 tmp2 = tcg_temp_new_i32();
TCGv_i64 t64 = tcg_temp_new_i64();
/* For AArch32, architecturally the 32-bit word at the lowest
/*
* For AArch32, architecturally the 32-bit word at the lowest
* address is always Rt and the one at addr+4 is Rt2, even if
* the CPU is big-endian. That means we don't want to do a
* gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
* for an architecturally 64-bit access, but instead do a
* 64-bit access using MO_BE if appropriate and then split
* the two halves.
* This only makes a difference for BE32 user-mode, where
* frob64() must not flip the two halves of the 64-bit data
* but this code must treat BE32 user-mode like BE32 system.
* gen_aa32_ld_i64(), which checks SCTLR_B as if for an
* architecturally 64-bit access, but instead do a 64-bit access
* using MO_BE if appropriate and then split the two halves.
*/
TCGv taddr = gen_aa32_addr(s, addr, opc);
@ -5056,14 +5050,15 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i64 n64 = tcg_temp_new_i64();
t2 = load_reg(s, rt2);
/* For AArch32, architecturally the 32-bit word at the lowest
/*
* For AArch32, architecturally the 32-bit word at the lowest
* address is always Rt and the one at addr+4 is Rt2, even if
* the CPU is big-endian. Since we're going to treat this as a
* single 64-bit BE store, we need to put the two halves in the
* opposite order for BE to LE, so that they end up in the right
* places.
* We don't want gen_aa32_frob64() because that does the wrong
* thing for BE32 usermode.
* places. We don't want gen_aa32_st_i64, because that checks
* SCTLR_B as if for an architectural 64-bit access.
*/
if (s->be_data == MO_BE) {
tcg_gen_concat_i32_i64(n64, t2, t1);