target-ppc: Fix add and subf carry generation in narrow mode

The set of computations used in b5a73f8d8a
are only valid if the current word size == target_long size.  This failed
to take ppc64 in 32-bit (narrow) mode into account.

Add a NARROW_MODE macro to avoid conditional compilation.

Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Richard Henderson 2013-03-21 10:01:45 +00:00 committed by Alexander Graf
parent b632a148b6
commit 79482e5ab3

View file

@ -204,6 +204,13 @@ typedef struct DisasContext {
int singlestep_enabled; int singlestep_enabled;
} DisasContext; } DisasContext;
/* True when active word size < size of target_long. */
#ifdef TARGET_PPC64
# define NARROW_MODE(C) (!(C)->sf_mode)
#else
# define NARROW_MODE(C) 0
#endif
struct opc_handler_t { struct opc_handler_t {
/* invalid bits for instruction 1 (Rc(opcode) == 0) */ /* invalid bits for instruction 1 (Rc(opcode) == 0) */
uint32_t inval1; uint32_t inval1;
@ -778,14 +785,26 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
} }
if (compute_ca) { if (compute_ca) {
TCGv zero = tcg_const_tl(0); if (NARROW_MODE(ctx)) {
if (add_ca) { TCGv t1 = tcg_temp_new();
tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero); tcg_gen_ext32u_tl(t1, arg2);
tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero); tcg_gen_ext32u_tl(t0, arg1);
tcg_gen_add_tl(t0, t0, t1);
tcg_temp_free(t1);
if (add_ca) {
tcg_gen_add_tl(t0, t0, cpu_ca);
}
tcg_gen_shri_tl(cpu_ca, t0, 32);
} else { } else {
tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero); TCGv zero = tcg_const_tl(0);
if (add_ca) {
tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero);
tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero);
} else {
tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
}
tcg_temp_free(zero);
} }
tcg_temp_free(zero);
} else { } else {
tcg_gen_add_tl(t0, arg1, arg2); tcg_gen_add_tl(t0, arg1, arg2);
if (add_ca) { if (add_ca) {
@ -1114,14 +1133,25 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
{ {
TCGv t0 = ret; TCGv t0 = ret;
if (((add_ca && compute_ca) || compute_ov) if (compute_ov && (TCGV_EQUAL(ret, arg1) || TCGV_EQUAL(ret, arg2))) {
&& (TCGV_EQUAL(ret, arg1) || TCGV_EQUAL(ret, arg2))) {
t0 = tcg_temp_new(); t0 = tcg_temp_new();
} }
if (add_ca) { if (compute_ca) {
/* dest = ~arg1 + arg2 + ca. */ /* dest = ~arg1 + arg2 [+ ca]. */
if (compute_ca) { if (NARROW_MODE(ctx)) {
TCGv inv1 = tcg_temp_new();
tcg_gen_not_tl(inv1, arg1);
tcg_gen_ext32u_tl(t0, arg2);
tcg_gen_ext32u_tl(inv1, inv1);
if (add_ca) {
tcg_gen_add_tl(t0, t0, cpu_ca);
} else {
tcg_gen_addi_tl(t0, t0, 1);
}
tcg_gen_add_tl(t0, t0, inv1);
tcg_gen_shri_tl(cpu_ca, t0, 32);
} else if (add_ca) {
TCGv zero, inv1 = tcg_temp_new(); TCGv zero, inv1 = tcg_temp_new();
tcg_gen_not_tl(inv1, arg1); tcg_gen_not_tl(inv1, arg1);
zero = tcg_const_tl(0); zero = tcg_const_tl(0);
@ -1130,14 +1160,16 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_temp_free(zero); tcg_temp_free(zero);
tcg_temp_free(inv1); tcg_temp_free(inv1);
} else { } else {
tcg_gen_sub_tl(t0, arg2, arg1);
tcg_gen_add_tl(t0, t0, cpu_ca);
tcg_gen_subi_tl(t0, t0, 1);
}
} else {
if (compute_ca) {
tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1); tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
tcg_gen_sub_tl(t0, arg2, arg1);
} }
} else if (add_ca) {
/* Since we're ignoring carry-out, we can simplify the
standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
tcg_gen_sub_tl(t0, arg2, arg1);
tcg_gen_add_tl(t0, t0, cpu_ca);
tcg_gen_subi_tl(t0, t0, 1);
} else {
tcg_gen_sub_tl(t0, arg2, arg1); tcg_gen_sub_tl(t0, arg2, arg1);
} }