tcg updates

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJUKau8AAoJEK0ScMxN0CebBNcIALqzJdKqQqYNvDdgQHhS8L+m
 UUNIpGHik1qFGxOJlDmgVDj5xei5k45ueaX/35VjYOh5I0jl2jTW7ChpRD1nJs+0
 0oRxORZmENLznserwZxBe4J5DGeqXYU+MOualx+QGCq8vPZsBk558oH65sGV/qLg
 BSFUMS2sBypUbFt3Iu17qF2jujLBP3R39FQXrTFhwzj8Hqlsbm4HUPlBdJhjno0h
 pO74zsoyYbHJVEaZvBqg804f5XjxqbiGSpSZq8HqOrnrfoGe437D0N5BLL2V8i9l
 Rhz9mbw+ALaIhISiXoV39zSD2WFbP0kkvLxo6sc9/DaQwo43PSOhy9vtglexQg0=
 =a0so
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/tcg-next-201400729' into staging

tcg updates

# gpg: Signature made Mon 29 Sep 2014 19:58:04 BST using RSA key ID 4DD0279B
# gpg: Good signature from "Richard Henderson <rth7680@gmail.com>"
# gpg:                 aka "Richard Henderson <rth@redhat.com>"
# gpg:                 aka "Richard Henderson <rth@twiddle.net>"

* remotes/rth/tags/tcg-next-201400729:
  tcg: Always enable TCGv type checking
  qemu/compiler: Define QEMU_ARTIFICIAL
  tcg-aarch64: Use 32-bit loads for qemu_ld_i32
  tcg-sparc: Use UMULXHI instruction
  tcg-sparc: Rename ADDX/SUBX insns
  tcg-sparc: Use ADDXC in setcond_i64
  tcg-sparc: Fix setcond_i32 uninitialized value
  tcg-sparc: Use ADDXC in addsub2_i64
  tcg-sparc: Support addsub2_i64

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2014-09-30 11:52:41 +01:00
commit 45c270b1ea
8 changed files with 224 additions and 113 deletions

View file

@ -1175,15 +1175,11 @@ static const struct sparc_opcode sparc_opcodes[] = {
{ "subcc", F3(2, 0x14, 0), F3(~2, ~0x14, ~0)|ASI(~0), "1,2,d", 0, v6 },
{ "subcc", F3(2, 0x14, 1), F3(~2, ~0x14, ~1), "1,i,d", 0, v6 },
{ "subx", F3(2, 0x0c, 0), F3(~2, ~0x0c, ~0)|ASI(~0), "1,2,d", 0, v6notv9 },
{ "subx", F3(2, 0x0c, 1), F3(~2, ~0x0c, ~1), "1,i,d", 0, v6notv9 },
{ "subc", F3(2, 0x0c, 0), F3(~2, ~0x0c, ~0)|ASI(~0), "1,2,d", 0, v9 },
{ "subc", F3(2, 0x0c, 1), F3(~2, ~0x0c, ~1), "1,i,d", 0, v9 },
{ "subc", F3(2, 0x0c, 0), F3(~2, ~0x0c, ~0)|ASI(~0), "1,2,d", 0, v6 },
{ "subc", F3(2, 0x0c, 1), F3(~2, ~0x0c, ~1), "1,i,d", 0, v6 },
{ "subxcc", F3(2, 0x1c, 0), F3(~2, ~0x1c, ~0)|ASI(~0), "1,2,d", 0, v6notv9 },
{ "subxcc", F3(2, 0x1c, 1), F3(~2, ~0x1c, ~1), "1,i,d", 0, v6notv9 },
{ "subccc", F3(2, 0x1c, 0), F3(~2, ~0x1c, ~0)|ASI(~0), "1,2,d", 0, v9 },
{ "subccc", F3(2, 0x1c, 1), F3(~2, ~0x1c, ~1), "1,i,d", 0, v9 },
{ "subccc", F3(2, 0x1c, 0), F3(~2, ~0x1c, ~0)|ASI(~0), "1,2,d", 0, v6 },
{ "subccc", F3(2, 0x1c, 1), F3(~2, ~0x1c, ~1), "1,i,d", 0, v6 },
{ "and", F3(2, 0x01, 0), F3(~2, ~0x01, ~0)|ASI(~0), "1,2,d", 0, v6 },
{ "and", F3(2, 0x01, 1), F3(~2, ~0x01, ~1), "1,i,d", 0, v6 },
@ -1215,19 +1211,13 @@ static const struct sparc_opcode sparc_opcodes[] = {
{ "addcc", F3(2, 0x10, 1), F3(~2, ~0x10, ~1), "1,i,d", 0, v6 },
{ "addcc", F3(2, 0x10, 1), F3(~2, ~0x10, ~1), "i,1,d", 0, v6 },
{ "addx", F3(2, 0x08, 0), F3(~2, ~0x08, ~0)|ASI(~0), "1,2,d", 0, v6notv9 },
{ "addx", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "1,i,d", 0, v6notv9 },
{ "addx", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "i,1,d", 0, v6notv9 },
{ "addc", F3(2, 0x08, 0), F3(~2, ~0x08, ~0)|ASI(~0), "1,2,d", 0, v9 },
{ "addc", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "1,i,d", 0, v9 },
{ "addc", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "i,1,d", 0, v9 },
{ "addc", F3(2, 0x08, 0), F3(~2, ~0x08, ~0)|ASI(~0), "1,2,d", 0, v6 },
{ "addc", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "1,i,d", 0, v6 },
{ "addc", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "i,1,d", 0, v6 },
{ "addxcc", F3(2, 0x18, 0), F3(~2, ~0x18, ~0)|ASI(~0), "1,2,d", 0, v6notv9 },
{ "addxcc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "1,i,d", 0, v6notv9 },
{ "addxcc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "i,1,d", 0, v6notv9 },
{ "addccc", F3(2, 0x18, 0), F3(~2, ~0x18, ~0)|ASI(~0), "1,2,d", 0, v9 },
{ "addccc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "1,i,d", 0, v9 },
{ "addccc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "i,1,d", 0, v9 },
{ "addccc", F3(2, 0x18, 0), F3(~2, ~0x18, ~0)|ASI(~0), "1,2,d", 0, v6 },
{ "addccc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "1,i,d", 0, v6 },
{ "addccc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "i,1,d", 0, v6 },
{ "smul", F3(2, 0x0b, 0), F3(~2, ~0x0b, ~0)|ASI(~0), "1,2,d", 0, v8 },
{ "smul", F3(2, 0x0b, 1), F3(~2, ~0x0b, ~1), "1,i,d", 0, v8 },
@ -2042,6 +2032,10 @@ IMPDEP ("impdep2", 0x37),
#undef IMPDEP
{ "addxc", F3F(2, 0x36, 0x011), F3F(~2, ~0x36, ~0x011), "1,2,d", 0, v9b },
{ "addxccc", F3F(2, 0x36, 0x013), F3F(~2, ~0x36, ~0x013), "1,2,d", 0, v9b },
{ "umulxhi", F3F(2, 0x36, 0x016), F3F(~2, ~0x36, ~0x016), "1,2,d", 0, v9b },
};
static const int sparc_num_opcodes = ((sizeof sparc_opcodes)/(sizeof sparc_opcodes[0]));

View file

@ -473,14 +473,35 @@ typedef struct {
#define PPC_FEATURE_TRUE_LE 0x00000002
#define PPC_FEATURE_PPC_LE 0x00000001
/* Bits present in AT_HWCAP, primarily for Sparc32. */
/* Bits present in AT_HWCAP for Sparc. */
#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
#define HWCAP_SPARC_STBAR 2
#define HWCAP_SPARC_SWAP 4
#define HWCAP_SPARC_MULDIV 8
#define HWCAP_SPARC_V9 16
#define HWCAP_SPARC_ULTRA3 32
#define HWCAP_SPARC_FLUSH 0x00000001
#define HWCAP_SPARC_STBAR 0x00000002
#define HWCAP_SPARC_SWAP 0x00000004
#define HWCAP_SPARC_MULDIV 0x00000008
#define HWCAP_SPARC_V9 0x00000010
#define HWCAP_SPARC_ULTRA3 0x00000020
#define HWCAP_SPARC_BLKINIT 0x00000040
#define HWCAP_SPARC_N2 0x00000080
#define HWCAP_SPARC_MUL32 0x00000100
#define HWCAP_SPARC_DIV32 0x00000200
#define HWCAP_SPARC_FSMULD 0x00000400
#define HWCAP_SPARC_V8PLUS 0x00000800
#define HWCAP_SPARC_POPC 0x00001000
#define HWCAP_SPARC_VIS 0x00002000
#define HWCAP_SPARC_VIS2 0x00004000
#define HWCAP_SPARC_ASI_BLK_INIT 0x00008000
#define HWCAP_SPARC_FMAF 0x00010000
#define HWCAP_SPARC_VIS3 0x00020000
#define HWCAP_SPARC_HPC 0x00040000
#define HWCAP_SPARC_RANDOM 0x00080000
#define HWCAP_SPARC_TRANS 0x00100000
#define HWCAP_SPARC_FJFMAU 0x00200000
#define HWCAP_SPARC_IMA 0x00400000
#define HWCAP_SPARC_ASI_CACHE_SPARING 0x00800000
#define HWCAP_SPARC_PAUSE 0x01000000
#define HWCAP_SPARC_CBCOND 0x02000000
#define HWCAP_SPARC_CRYPTO 0x04000000
/* Bits present in AT_HWCAP for s390. */

View file

@ -24,6 +24,12 @@
#define QEMU_WARN_UNUSED_RESULT
#endif
#if QEMU_GNUC_PREREQ(4, 3)
#define QEMU_ARTIFICIAL __attribute__((always_inline, artificial))
#else
#define QEMU_ARTIFICIAL
#endif
#if defined(_WIN32)
# define QEMU_PACKED __attribute__((gcc_struct, packed))
#else

View file

@ -1007,7 +1007,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_adr(s, TCG_REG_X3, lb->raddr);
tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
if (opc & MO_SIGN) {
tcg_out_sxt(s, TCG_TYPE_I64, size, lb->datalo_reg, TCG_REG_X0);
tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0);
} else {
tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0);
}
@ -1032,7 +1032,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
}
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
TCGReg data_reg, TCGReg addr_reg,
TCGType ext, TCGReg data_reg, TCGReg addr_reg,
int mem_index, tcg_insn_unit *raddr,
tcg_insn_unit *label_ptr)
{
@ -1040,6 +1040,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
label->is_ld = is_ld;
label->opc = opc;
label->type = ext;
label->datalo_reg = data_reg;
label->addrlo_reg = addr_reg;
label->mem_index = mem_index;
@ -1108,7 +1109,7 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits,
#endif /* CONFIG_SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop,
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext,
TCGReg data_r, TCGReg addr_r, TCGReg off_r)
{
const TCGMemOp bswap = memop & MO_BSWAP;
@ -1118,7 +1119,8 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop,
tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, off_r);
break;
case MO_SB:
tcg_out_ldst_r(s, I3312_LDRSBX, data_r, addr_r, off_r);
tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
data_r, addr_r, off_r);
break;
case MO_UW:
tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r);
@ -1130,9 +1132,10 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop,
if (bswap) {
tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r);
tcg_out_rev16(s, data_r, data_r);
tcg_out_sxt(s, TCG_TYPE_I64, MO_16, data_r, data_r);
tcg_out_sxt(s, ext, MO_16, data_r, data_r);
} else {
tcg_out_ldst_r(s, I3312_LDRSHX, data_r, addr_r, off_r);
tcg_out_ldst_r(s, ext ? I3312_LDRSHX : I3312_LDRSHW,
data_r, addr_r, off_r);
}
break;
case MO_UL:
@ -1197,18 +1200,18 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop,
}
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
TCGMemOp memop, int mem_index)
TCGMemOp memop, TCGType ext, int mem_index)
{
#ifdef CONFIG_SOFTMMU
TCGMemOp s_bits = memop & MO_SIZE;
tcg_insn_unit *label_ptr;
tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, TCG_REG_X1);
add_qemu_ldst_label(s, true, memop, data_reg, addr_reg,
tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_REG_X1);
add_qemu_ldst_label(s, true, memop, ext, data_reg, addr_reg,
mem_index, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg,
tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg,
GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR);
#endif /* CONFIG_SOFTMMU */
}
@ -1222,7 +1225,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0);
tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1);
add_qemu_ldst_label(s, false, memop, data_reg, addr_reg,
add_qemu_ldst_label(s, false, memop, s_bits == MO_64, data_reg, addr_reg,
mem_index, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg,
@ -1515,7 +1518,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, a0, a1, a2, args[3]);
tcg_out_qemu_ld(s, a0, a1, a2, ext, args[3]);
break;
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:

View file

@ -197,8 +197,8 @@ static const int tcg_target_call_oarg_regs[] = {
#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
@ -209,6 +209,9 @@ static const int tcg_target_call_oarg_regs[] = {
#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
@ -262,6 +265,10 @@ static const int tcg_target_call_oarg_regs[] = {
#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
#ifndef use_vis3_instructions
bool use_vis3_instructions;
#endif
static inline int check_fit_i64(int64_t val, unsigned int bits)
{
return val == sextract64(val, 0, bits);
@ -657,7 +664,7 @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
TCGReg c1, int32_t c2, int c2const)
{
/* For 32-bit comparisons, we can play games with ADDX/SUBX. */
/* For 32-bit comparisons, we can play games with ADDC/SUBC. */
switch (cond) {
case TCG_COND_LTU:
case TCG_COND_GEU:
@ -668,9 +675,12 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
case TCG_COND_NE:
/* For equality, we can transform to inequality vs zero. */
if (c2 != 0) {
tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
c2 = TCG_REG_T1;
} else {
c2 = c1;
}
c1 = TCG_REG_G0, c2 = ret, c2const = 0;
c1 = TCG_REG_G0, c2const = 0;
cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
break;
@ -698,15 +708,32 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
tcg_out_cmp(s, c1, c2, c2const);
if (cond == TCG_COND_LTU) {
tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
} else {
tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
}
}
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
TCGReg c1, int32_t c2, int c2const)
{
if (use_vis3_instructions) {
switch (cond) {
case TCG_COND_NE:
if (c2 != 0) {
break;
}
c2 = c1, c2const = 0, c1 = TCG_REG_G0;
/* FALLTHRU */
case TCG_COND_LTU:
tcg_out_cmp(s, c1, c2, c2const);
tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
return;
default:
break;
}
}
/* For 64-bit signed comparisons vs zero, we can avoid the compare
if the input does not overlap the output. */
if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
@ -719,9 +746,9 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh,
TCGReg al, TCGReg ah, int32_t bl, int blconst,
int32_t bh, int bhconst, int opl, int oph)
static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
TCGReg al, TCGReg ah, int32_t bl, int blconst,
int32_t bh, int bhconst, int opl, int oph)
{
TCGReg tmp = TCG_REG_T1;
@ -735,6 +762,54 @@ static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh,
tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
}
static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
TCGReg al, TCGReg ah, int32_t bl, int blconst,
int32_t bh, int bhconst, bool is_sub)
{
TCGReg tmp = TCG_REG_T1;
/* Note that the low parts are fully consumed before tmp is set. */
if (rl != ah && (bhconst || rl != bh)) {
tmp = rl;
}
tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
if (use_vis3_instructions && !is_sub) {
/* Note that ADDXC doesn't accept immediates. */
if (bhconst && bh != 0) {
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
bh = TCG_REG_T2;
}
tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
} else if (bh == TCG_REG_G0) {
/* If we have a zero, we can perform the operation in two insns,
with the arithmetic first, and a conditional move into place. */
if (rh == ah) {
tcg_out_arithi(s, TCG_REG_T2, ah, 1,
is_sub ? ARITH_SUB : ARITH_ADD);
tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
} else {
tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
}
} else {
/* Otherwise adjust BH as if there is carry into T2 ... */
if (bhconst) {
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
} else {
tcg_out_arithi(s, TCG_REG_T2, bh, 1,
is_sub ? ARITH_SUB : ARITH_ADD);
}
/* ... smoosh T2 back to original BH if carry is clear ... */
tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
/* ... and finally perform the arithmetic with the new operand. */
tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
}
tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
}
static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest)
{
ptrdiff_t disp = tcg_pcrel_diff(s, dest);
@ -1264,12 +1339,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_add2_i32:
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4],
args[5], const_args[5], ARITH_ADDCC, ARITH_ADDX);
tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
args[4], const_args[4], args[5], const_args[5],
ARITH_ADDCC, ARITH_ADDC);
break;
case INDEX_op_sub2_i32:
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4],
args[5], const_args[5], ARITH_SUBCC, ARITH_SUBX);
tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
args[4], const_args[4], args[5], const_args[5],
ARITH_SUBCC, ARITH_SUBC);
break;
case INDEX_op_mulu2_i32:
c = ARITH_UMUL;
@ -1351,6 +1428,17 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_movcond_i64:
tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
break;
case INDEX_op_add2_i64:
tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
const_args[4], args[5], const_args[5], false);
break;
case INDEX_op_sub2_i64:
tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
const_args[4], args[5], const_args[5], true);
break;
case INDEX_op_muluh_i64:
tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
break;
gen_arith:
tcg_out_arithc(s, a0, a1, a2, c2, c);
@ -1449,6 +1537,10 @@ static const TCGTargetOpDef sparc_op_defs[] = {
{ INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
{ INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } },
{ INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
{ INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
{ INDEX_op_muluh_i64, { "R", "RZ", "RZ" } },
{ INDEX_op_qemu_ld_i32, { "r", "A" } },
{ INDEX_op_qemu_ld_i64, { "R", "A" } },
{ INDEX_op_qemu_st_i32, { "sZ", "A" } },
@ -1459,6 +1551,15 @@ static const TCGTargetOpDef sparc_op_defs[] = {
static void tcg_target_init(TCGContext *s)
{
/* Only probe for the platform and capabilities if we havn't already
determined maximum values at compile time. */
#ifndef use_vis3_instructions
{
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
}
#endif
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64);

View file

@ -85,6 +85,12 @@ typedef enum {
#define TCG_TARGET_EXTEND_ARGS 1
#endif
#if defined(__VIS__) && __VIS__ >= 0x300
#define use_vis3_instructions 1
#else
extern bool use_vis3_instructions;
#endif
/* optional instructions */
#define TCG_TARGET_HAS_div_i32 1
#define TCG_TARGET_HAS_rem_i32 0
@ -133,11 +139,11 @@ typedef enum {
#define TCG_TARGET_HAS_nor_i64 0
#define TCG_TARGET_HAS_deposit_i64 0
#define TCG_TARGET_HAS_movcond_i64 1
#define TCG_TARGET_HAS_add2_i64 0
#define TCG_TARGET_HAS_sub2_i64 0
#define TCG_TARGET_HAS_add2_i64 1
#define TCG_TARGET_HAS_sub2_i64 1
#define TCG_TARGET_HAS_mulu2_i64 0
#define TCG_TARGET_HAS_muls2_i64 0
#define TCG_TARGET_HAS_muluh_i64 0
#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions
#define TCG_TARGET_HAS_mulsh_i64 0
#define TCG_AREG0 TCG_REG_I0

View file

@ -24,8 +24,9 @@
#define TCG_MAX_QEMU_LDST 640
typedef struct TCGLabelQemuLdst {
bool is_ld:1; /* qemu_ld: true, qemu_st: false */
TCGMemOp opc:4;
bool is_ld; /* qemu_ld: true, qemu_st: false */
TCGMemOp opc;
TCGType type; /* result type of a load */
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
TCGReg datalo_reg; /* reg index for low word to be loaded or stored */

View file

@ -274,75 +274,54 @@ typedef enum TCGMemOp {
typedef tcg_target_ulong TCGArg;
/* Define a type and accessor macros for variables. Using a struct is
nice because it gives some level of type safely. Ideally the compiler
be able to see through all this. However in practice this is not true,
especially on targets with braindamaged ABIs (e.g. i386).
We use plain int by default to avoid this runtime overhead.
Users of tcg_gen_* don't need to know about any of this, and should
treat TCGv as an opaque type.
/* Define a type and accessor macros for variables. Using pointer types
is nice because it gives some level of type safely. Converting to and
from intptr_t rather than int reduces the number of sign-extension
instructions that get implied on 64-bit hosts. Users of tcg_gen_* don't
need to know about any of this, and should treat TCGv as an opaque type.
In addition we do typechecking for different types of variables. TCGv_i32
and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr
are aliases for target_ulong and host pointer sized values respectively.
*/
are aliases for target_ulong and host pointer sized values respectively. */
#ifdef CONFIG_DEBUG_TCG
#define DEBUG_TCGV 1
#endif
typedef struct TCGv_i32_d *TCGv_i32;
typedef struct TCGv_i64_d *TCGv_i64;
typedef struct TCGv_ptr_d *TCGv_ptr;
#ifdef DEBUG_TCGV
typedef struct
static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i)
{
int i32;
} TCGv_i32;
return (TCGv_i32)i;
}
typedef struct
static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i)
{
int i64;
} TCGv_i64;
return (TCGv_i64)i;
}
typedef struct {
int iptr;
} TCGv_ptr;
static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i)
{
return (TCGv_ptr)i;
}
static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t)
{
return (intptr_t)t;
}
static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t)
{
return (intptr_t)t;
}
static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t)
{
return (intptr_t)t;
}
#define MAKE_TCGV_I32(i) __extension__ \
({ TCGv_i32 make_tcgv_tmp = {i}; make_tcgv_tmp;})
#define MAKE_TCGV_I64(i) __extension__ \
({ TCGv_i64 make_tcgv_tmp = {i}; make_tcgv_tmp;})
#define MAKE_TCGV_PTR(i) __extension__ \
({ TCGv_ptr make_tcgv_tmp = {i}; make_tcgv_tmp; })
#define GET_TCGV_I32(t) ((t).i32)
#define GET_TCGV_I64(t) ((t).i64)
#define GET_TCGV_PTR(t) ((t).iptr)
#if TCG_TARGET_REG_BITS == 32
#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t))
#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1)
#endif
#else /* !DEBUG_TCGV */
typedef int TCGv_i32;
typedef int TCGv_i64;
#if TCG_TARGET_REG_BITS == 32
#define TCGv_ptr TCGv_i32
#else
#define TCGv_ptr TCGv_i64
#endif
#define MAKE_TCGV_I32(x) (x)
#define MAKE_TCGV_I64(x) (x)
#define MAKE_TCGV_PTR(x) (x)
#define GET_TCGV_I32(t) (t)
#define GET_TCGV_I64(t) (t)
#define GET_TCGV_PTR(t) (t)
#if TCG_TARGET_REG_BITS == 32
#define TCGV_LOW(t) (t)
#define TCGV_HIGH(t) ((t) + 1)
#endif
#endif /* DEBUG_TCGV */
#define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
#define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
#define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b))