Fixes for 3 tcg bugs

-----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAl0rDcUdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+lhwf5AXr70c2OT2lXG+Jo
 EzGhNTfEp5+NHW5Pq9YzyS9XgI2JarvsdANiK+BQPG3m1vgKmbU5wyluMPEdJ734
 pmZT6uie9z4t0U2xTwwJvw5zrDNcZB07pTg4X4AdSUn3UnqLGVR1yuk1Tisf8fBD
 46O5JDJQkXjctWEJYyqPU0wCiOTrDiwmI+EJjvOAupabAcYLJSFrg6cv7a/oUTjY
 D4lckL2Hc0RyAFRnCnxcXVd+ivYkVdyVyzz0p8oGlWeQvmHZ8K77vJJ3YD1jz8S0
 6c/MJMyCsTEzV68D7UZijFWACCPI/htoScTFqChwtdckSy9N0qWrKtSUQd+eIsV1
 UkGv6g==
 =zmEG
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190714' into staging

Fixes for 3 tcg bugs

# gpg: Signature made Sun 14 Jul 2019 12:11:01 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20190714:
  tcg: Release mmap_lock on translation fault
  tcg: Remove duplicate #if !defined(CODE_ACCESS)
  tcg: Remove cpu_ld*_code_ra
  tcg: Introduce set/clear_helper_retaddr
  include/qemu/atomic.h: Add signal_barrier
  tcg/aarch64: Fix output of extract2 opcodes
  tcg: Fix constant folding of INDEX_op_extract2_i32

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-07-15 09:46:12 +01:00
commit 298ad7b5a4
8 changed files with 139 additions and 66 deletions

View file

@ -64,27 +64,56 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
CPUState *cpu = current_cpu;
CPUClass *cc;
unsigned long address = (unsigned long)info->si_addr;
MMUAccessType access_type;
MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
/* We must handle PC addresses from two different sources:
* a call return address and a signal frame address.
*
* Within cpu_restore_state_from_tb we assume the former and adjust
* the address by -GETPC_ADJ so that the address is within the call
* insn so that addr does not accidentally match the beginning of the
* next guest insn.
*
* However, when the PC comes from the signal frame, it points to
* the actual faulting host insn and not a call insn. Subtracting
* GETPC_ADJ in that case may accidentally match the previous guest insn.
*
* So for the later case, adjust forward to compensate for what
* will be done later by cpu_restore_state_from_tb.
*/
if (helper_retaddr) {
switch (helper_retaddr) {
default:
/*
* Fault during host memory operation within a helper function.
* The helper's host return address, saved here, gives us a
* pointer into the generated code that will unwind to the
* correct guest pc.
*/
pc = helper_retaddr;
} else {
break;
case 0:
/*
* Fault during host memory operation within generated code.
* (Or, a unrelated bug within qemu, but we can't tell from here).
*
* We take the host pc from the signal frame. However, we cannot
* use that value directly. Within cpu_restore_state_from_tb, we
* assume PC comes from GETPC(), as used by the helper functions,
* so we adjust the address by -GETPC_ADJ to form an address that
* is within the call insn, so that the address does not accidentially
* match the beginning of the next guest insn. However, when the
* pc comes from the signal frame it points to the actual faulting
* host memory insn and not the return from a call insn.
*
* Therefore, adjust to compensate for what will be done later
* by cpu_restore_state_from_tb.
*/
pc += GETPC_ADJ;
break;
case 1:
/*
* Fault during host read for translation, or loosely, "execution".
*
* The guest pc is already pointing to the start of the TB for which
* code is being generated. If the guest translator manages the
* page crossings correctly, this is exactly the correct address
* (and if the translator doesn't handle page boundaries correctly
* there's little we can do about that here). Therefore, do not
* trigger the unwinder.
*
* Like tb_gen_code, release the memory lock before cpu_loop_exit.
*/
pc = 0;
access_type = MMU_INST_FETCH;
mmap_unlock();
break;
}
/* For synchronous signals we expect to be coming from the vCPU
@ -134,7 +163,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
* currently executing TB was modified and must be exited
* immediately. Clear helper_retaddr for next execution.
*/
helper_retaddr = 0;
clear_helper_retaddr();
cpu_exit_tb_from_sighandler(cpu, old_set);
/* NORETURN */
@ -152,10 +181,9 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
* an exception. Undo signal and retaddr state prior to longjmp.
*/
sigprocmask(SIG_SETMASK, old_set, NULL);
helper_retaddr = 0;
clear_helper_retaddr();
cc = CPU_GET_CLASS(cpu);
access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
g_assert_not_reached();
}
@ -682,14 +710,15 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if (unlikely(addr & (size - 1))) {
cpu_loop_exit_atomic(env_cpu(env), retaddr);
}
helper_retaddr = retaddr;
return g2h(addr);
void *ret = g2h(addr);
set_helper_retaddr(retaddr);
return ret;
}
/* Macro to call the above, with local variables from the use context. */
#define ATOMIC_MMU_DECLS do {} while (0)
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
#define ATOMIC_MMU_CLEANUP do { helper_retaddr = 0; } while (0)
#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define EXTRA_ARGS

View file

@ -89,6 +89,26 @@ typedef target_ulong abi_ptr;
extern __thread uintptr_t helper_retaddr;
static inline void set_helper_retaddr(uintptr_t ra)
{
helper_retaddr = ra;
/*
* Ensure that this write is visible to the SIGSEGV handler that
* may be invoked due to a subsequent invalid memory operation.
*/
signal_barrier();
}
static inline void clear_helper_retaddr(void)
{
/*
* Ensure that previous memory operations have succeeded before
* removing the data visible to the signal handler.
*/
signal_barrier();
helper_retaddr = 0;
}
/* In user-only mode we provide only the _code and _data accessors. */
#define MEMSUFFIX _data

View file

@ -64,61 +64,75 @@
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
#if !defined(CODE_ACCESS)
#ifdef CODE_ACCESS
RES_TYPE ret;
set_helper_retaddr(1);
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
return ret;
#else
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, false));
#endif
return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
#endif
}
#ifndef CODE_ACCESS
static inline RES_TYPE
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
abi_ptr ptr,
uintptr_t retaddr)
{
RES_TYPE ret;
helper_retaddr = retaddr;
set_helper_retaddr(retaddr);
ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
helper_retaddr = 0;
clear_helper_retaddr();
return ret;
}
#endif
#if DATA_SIZE <= 2
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
#if !defined(CODE_ACCESS)
#ifdef CODE_ACCESS
int ret;
set_helper_retaddr(1);
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
return ret;
#else
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, true, MO_TE, false));
#endif
return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
#endif
}
#ifndef CODE_ACCESS
static inline int
glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
abi_ptr ptr,
uintptr_t retaddr)
{
int ret;
helper_retaddr = retaddr;
set_helper_retaddr(retaddr);
ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
helper_retaddr = 0;
clear_helper_retaddr();
return ret;
}
#endif
#endif /* CODE_ACCESS */
#endif /* DATA_SIZE <= 2 */
#ifndef CODE_ACCESS
static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
RES_TYPE v)
{
#if !defined(CODE_ACCESS)
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, true));
#endif
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
}
@ -128,9 +142,9 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
RES_TYPE v,
uintptr_t retaddr)
{
helper_retaddr = retaddr;
set_helper_retaddr(retaddr);
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
helper_retaddr = 0;
clear_helper_retaddr();
}
#endif

View file

@ -88,6 +88,13 @@
#define smp_read_barrier_depends() barrier()
#endif
/*
* A signal barrier forces all pending local memory ops to be observed before
* a SIGSEGV is delivered to the *same* thread. In practice this is exactly
* the same as barrier(), but since we have the correct builtin, use it.
*/
#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
/* Sanity check that the size of an atomic operation isn't "overly large".
* Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
* want to use them because we ought not need them, and this lets us do a
@ -308,6 +315,10 @@
#define smp_read_barrier_depends() barrier()
#endif
#ifndef signal_barrier
#define signal_barrier() barrier()
#endif
/* These will only be atomic if the processor does the fetch or store
* in a single issue memory operation
*/

View file

@ -554,7 +554,7 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
/* ??? Enforce alignment. */
uint64_t *haddr = g2h(addr);
helper_retaddr = ra;
set_helper_retaddr(ra);
o0 = ldq_le_p(haddr + 0);
o1 = ldq_le_p(haddr + 1);
oldv = int128_make128(o0, o1);
@ -564,7 +564,7 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
stq_le_p(haddr + 0, int128_getlo(newv));
stq_le_p(haddr + 1, int128_gethi(newv));
}
helper_retaddr = 0;
clear_helper_retaddr();
#else
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
@ -624,7 +624,7 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
/* ??? Enforce alignment. */
uint64_t *haddr = g2h(addr);
helper_retaddr = ra;
set_helper_retaddr(ra);
o1 = ldq_be_p(haddr + 0);
o0 = ldq_be_p(haddr + 1);
oldv = int128_make128(o0, o1);
@ -634,7 +634,7 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
stq_be_p(haddr + 0, int128_gethi(newv));
stq_be_p(haddr + 1, int128_getlo(newv));
}
helper_retaddr = 0;
clear_helper_retaddr();
#else
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);

View file

@ -4125,12 +4125,11 @@ static intptr_t max_for_page(target_ulong base, intptr_t mem_off,
return MIN(split, mem_max - mem_off) + mem_off;
}
static inline void set_helper_retaddr(uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
helper_retaddr = ra;
#ifndef CONFIG_USER_ONLY
/* These are normally defined only for CONFIG_USER_ONLY in <exec/cpu_ldst.h> */
static inline void set_helper_retaddr(uintptr_t ra) { }
static inline void clear_helper_retaddr(void) { }
#endif
}
/*
* The result of tlb_vaddr_to_host for user-only is just g2h(x),
@ -4188,7 +4187,7 @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr,
if (test_host_page(host)) {
mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max);
tcg_debug_assert(mem_off == mem_max);
set_helper_retaddr(0);
clear_helper_retaddr();
/* After having taken any fault, zero leading inactive elements. */
swap_memzero(vd, reg_off);
return;
@ -4239,7 +4238,7 @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr,
}
#endif
set_helper_retaddr(0);
clear_helper_retaddr();
memcpy(vd, &scratch, reg_max);
}
@ -4312,7 +4311,7 @@ static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr,
addr += 2 * size;
} while (i & 15);
}
set_helper_retaddr(0);
clear_helper_retaddr();
/* Wait until all exceptions have been raised to write back. */
memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
@ -4341,7 +4340,7 @@ static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr,
addr += 3 * size;
} while (i & 15);
}
set_helper_retaddr(0);
clear_helper_retaddr();
/* Wait until all exceptions have been raised to write back. */
memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
@ -4372,7 +4371,7 @@ static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr,
addr += 4 * size;
} while (i & 15);
}
set_helper_retaddr(0);
clear_helper_retaddr();
/* Wait until all exceptions have been raised to write back. */
memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
@ -4494,7 +4493,7 @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr,
if (test_host_page(host)) {
mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max);
tcg_debug_assert(mem_off == mem_max);
set_helper_retaddr(0);
clear_helper_retaddr();
/* After any fault, zero any leading inactive elements. */
swap_memzero(vd, reg_off);
return;
@ -4537,7 +4536,7 @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr,
}
#endif
set_helper_retaddr(0);
clear_helper_retaddr();
record_fault(env, reg_off, reg_max);
}
@ -4740,7 +4739,7 @@ static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr,
addr += msize;
} while (i & 15);
}
set_helper_retaddr(0);
clear_helper_retaddr();
}
static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr,
@ -4766,7 +4765,7 @@ static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr,
addr += 2 * msize;
} while (i & 15);
}
set_helper_retaddr(0);
clear_helper_retaddr();
}
static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr,
@ -4794,7 +4793,7 @@ static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr,
addr += 3 * msize;
} while (i & 15);
}
set_helper_retaddr(0);
clear_helper_retaddr();
}
static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr,
@ -4824,7 +4823,7 @@ static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr,
addr += 4 * msize;
} while (i & 15);
}
set_helper_retaddr(0);
clear_helper_retaddr();
}
#define DO_STN_1(N, NAME, ESIZE) \
@ -4932,7 +4931,7 @@ static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
i += 4, pg >>= 4;
} while (i & 15);
}
set_helper_retaddr(0);
clear_helper_retaddr();
/* Wait until all exceptions have been raised to write back. */
memcpy(vd, &scratch, oprsz);
@ -4955,7 +4954,7 @@ static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
tlb_fn(env, &scratch, i * 8, base + (off << scale), oi, ra);
}
}
set_helper_retaddr(0);
clear_helper_retaddr();
/* Wait until all exceptions have been raised to write back. */
memcpy(vd, &scratch, oprsz * 8);
@ -5133,7 +5132,7 @@ static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
tlb_fn(env, vd, reg_off, addr, oi, ra);
/* The rest of the reads will be non-faulting. */
set_helper_retaddr(0);
clear_helper_retaddr();
}
/* After any fault, zero the leading predicated false elements. */
@ -5175,7 +5174,7 @@ static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
tlb_fn(env, vd, reg_off, addr, oi, ra);
/* The rest of the reads will be non-faulting. */
set_helper_retaddr(0);
clear_helper_retaddr();
}
/* After any fault, zero the leading predicated false elements. */
@ -5299,7 +5298,7 @@ static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
i += 4, pg >>= 4;
} while (i & 15);
}
set_helper_retaddr(0);
clear_helper_retaddr();
}
static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
@ -5318,7 +5317,7 @@ static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
tlb_fn(env, vd, i * 8, base + (off << scale), oi, ra);
}
}
set_helper_retaddr(0);
clear_helper_retaddr();
}
#define DO_ST1_ZPZ_S(MEM, OFS) \

View file

@ -2226,7 +2226,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_extract2_i64:
case INDEX_op_extract2_i32:
tcg_out_extr(s, ext, a0, a1, a2, args[3]);
tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]);
break;
case INDEX_op_add2_i32:

View file

@ -1213,8 +1213,8 @@ void tcg_optimize(TCGContext *s)
if (opc == INDEX_op_extract2_i64) {
tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3]));
} else {
tmp = (v1 >> op->args[3]) | (v2 << (32 - op->args[3]));
tmp = (int32_t)tmp;
tmp = (int32_t)(((uint32_t)v1 >> op->args[3]) |
((uint32_t)v2 << (32 - op->args[3])));
}
tcg_opt_gen_movi(s, op, op->args[0], tmp);
break;