tcg/s390x: Add host vector framework

Add registers and function stubs.  The functionality
is disabled via squashing s390_facilities[2] to 0.

We must still include results for the mandatory opcodes in
tcg_target_op_def, as all opcodes are checked during tcg init.

Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
staging
Richard Henderson 2020-09-14 16:33:14 -07:00
parent eee6251b48
commit 34ef767609
5 changed files with 184 additions and 5 deletions

View File

@ -13,13 +13,17 @@ C_O0_I1(r)
C_O0_I2(L, L)
C_O0_I2(r, r)
C_O0_I2(r, ri)
C_O0_I2(v, r)
C_O1_I1(r, L)
C_O1_I1(r, r)
C_O1_I1(v, r)
C_O1_I1(v, vr)
C_O1_I2(r, 0, ri)
C_O1_I2(r, 0, rI)
C_O1_I2(r, 0, rJ)
C_O1_I2(r, r, ri)
C_O1_I2(r, rZ, r)
C_O1_I2(v, v, v)
C_O1_I4(r, r, ri, r, 0)
C_O1_I4(r, r, ri, rI, 0)
C_O2_I2(b, a, 0, r)

View File

@ -10,6 +10,7 @@
*/
REGS('r', ALL_GENERAL_REGS)
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
REGS('v', ALL_VECTOR_REGS)
/*
* A (single) even/odd pair for division.
* TODO: Add something to the register allocator to allow

View File

@ -43,6 +43,8 @@
#define TCG_CT_CONST_ZERO 0x800
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
/*
* For softmmu, we need to avoid conflicts with the first 3
* argument registers to perform the tlb lookup, and to call
@ -268,8 +270,13 @@ typedef enum S390Opcode {
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
"%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
"%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
"%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
"%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
"%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
};
#endif
@ -295,6 +302,32 @@ static const int tcg_target_reg_alloc_order[] = {
TCG_REG_R4,
TCG_REG_R3,
TCG_REG_R2,
/* V8-V15 are call saved, and omitted. */
TCG_REG_V0,
TCG_REG_V1,
TCG_REG_V2,
TCG_REG_V3,
TCG_REG_V4,
TCG_REG_V5,
TCG_REG_V6,
TCG_REG_V7,
TCG_REG_V16,
TCG_REG_V17,
TCG_REG_V18,
TCG_REG_V19,
TCG_REG_V20,
TCG_REG_V21,
TCG_REG_V22,
TCG_REG_V23,
TCG_REG_V24,
TCG_REG_V25,
TCG_REG_V26,
TCG_REG_V27,
TCG_REG_V28,
TCG_REG_V29,
TCG_REG_V30,
TCG_REG_V31,
};
static const int tcg_target_call_iarg_regs[] = {
@ -377,7 +410,7 @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
#endif
static const tcg_insn_unit *tb_ret_addr;
uint64_t s390_facilities[1];
uint64_t s390_facilities[3];
static bool patch_reloc(tcg_insn_unit *src_rw, int type,
intptr_t value, intptr_t addend)
@ -2293,6 +2326,42 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg src)
{
g_assert_not_reached();
}
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg base, intptr_t offset)
{
g_assert_not_reached();
}
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, int64_t val)
{
g_assert_not_reached();
}
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
const TCGArg *args, const int *const_args)
{
g_assert_not_reached();
}
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
{
return 0;
}
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...)
{
g_assert_not_reached();
}
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
switch (op) {
@ -2433,11 +2502,34 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
? C_O2_I4(r, r, 0, 1, rA, r)
: C_O2_I4(r, r, 0, 1, r, r));
case INDEX_op_st_vec:
return C_O0_I2(v, r);
case INDEX_op_ld_vec:
case INDEX_op_dupm_vec:
return C_O1_I1(v, r);
case INDEX_op_dup_vec:
return C_O1_I1(v, vr);
case INDEX_op_add_vec:
case INDEX_op_sub_vec:
case INDEX_op_and_vec:
case INDEX_op_or_vec:
case INDEX_op_xor_vec:
case INDEX_op_cmp_vec:
return C_O1_I2(v, v, v);
default:
g_assert_not_reached();
}
}
/*
* Mainline glibc added HWCAP_S390_VX before it was kernel abi.
* Some distros have fixed this up locally, others have not.
*/
#ifndef HWCAP_S390_VXRS
#define HWCAP_S390_VXRS 2048
#endif
static void query_s390_facilities(void)
{
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
@ -2452,6 +2544,16 @@ static void query_s390_facilities(void)
asm volatile(".word 0xb2b0,0x1000"
: "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
}
/*
* Use of vector registers requires os support beyond the facility bit.
* If the kernel does not advertise support, disable the facility bits.
* There is nothing else we currently care about in the 3rd word, so
* disable VECTOR with one store.
*/
if (1 || !(hwcap & HWCAP_S390_VXRS)) {
s390_facilities[2] = 0;
}
}
static void tcg_target_init(TCGContext *s)
@ -2460,6 +2562,10 @@ static void tcg_target_init(TCGContext *s)
tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
if (HAVE_FACILITY(VECTOR)) {
tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
}
tcg_target_call_clobber_regs = 0;
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
@ -2474,6 +2580,31 @@ static void tcg_target_init(TCGContext *s)
/* The return register can be considered call-clobbered. */
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
s->reserved_regs = 0;
tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
/* XXX many insns can't be used with R0, so we better avoid it for now */

View File

@ -37,11 +37,20 @@ typedef enum TCGReg {
TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11,
TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15,
TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
TCG_AREG0 = TCG_REG_R10,
TCG_REG_CALL_STACK = TCG_REG_R15
} TCGReg;
#define TCG_TARGET_NB_REGS 16
#define TCG_TARGET_NB_REGS 64
/* A list of relevant facilities used by this translator. Some of these
are required for proper operation, and these are checked at startup. */
@ -54,8 +63,9 @@ typedef enum TCGReg {
#define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND
#define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND
#define FACILITY_LOAD_ON_COND2 53
#define FACILITY_VECTOR 129
extern uint64_t s390_facilities[1];
extern uint64_t s390_facilities[3];
#define HAVE_FACILITY(X) \
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
@ -128,6 +138,27 @@ extern uint64_t s390_facilities[1];
#define TCG_TARGET_HAS_muluh_i64 0
#define TCG_TARGET_HAS_mulsh_i64 0
#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
#define TCG_TARGET_HAS_v256 0
#define TCG_TARGET_HAS_andc_vec 0
#define TCG_TARGET_HAS_orc_vec 0
#define TCG_TARGET_HAS_not_vec 0
#define TCG_TARGET_HAS_neg_vec 0
#define TCG_TARGET_HAS_abs_vec 0
#define TCG_TARGET_HAS_roti_vec 0
#define TCG_TARGET_HAS_rots_vec 0
#define TCG_TARGET_HAS_rotv_vec 0
#define TCG_TARGET_HAS_shi_vec 0
#define TCG_TARGET_HAS_shs_vec 0
#define TCG_TARGET_HAS_shv_vec 0
#define TCG_TARGET_HAS_mul_vec 0
#define TCG_TARGET_HAS_sat_vec 0
#define TCG_TARGET_HAS_minmax_vec 0
#define TCG_TARGET_HAS_bitsel_vec 0
#define TCG_TARGET_HAS_cmpsel_vec 0
/* used for function call generation */
#define TCG_TARGET_STACK_ALIGN 8
#define TCG_TARGET_CALL_STACK_OFFSET 160

View File

@ -0,0 +1,12 @@
/*
* Copyright (c) 2021 Linaro
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version.
*
* See the COPYING file in the top-level directory for details.
*
* Target-specific opcodes for host vector expansion. These will be
* emitted by tcg_expand_vec_op. For those familiar with GCC internals,
* consider these to be UNSPEC with names.
*/