tcg/optimize: Split out fold_const{1,2}

Split out a whole bunch of placeholder functions, which are
currently identical.  That won't last as more code gets moved.

Use CASE_32_64_VEC for some logical operators that previously
missed the addition of vectors.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-08-25 12:03:48 -07:00
parent 3eefdf2b58
commit 2f9f08ba43

View file

@ -660,6 +660,60 @@ static void finish_folding(OptContext *ctx, TCGOp *op)
}
}
/*
* The fold_* functions return true when processing is complete,
* usually by folding the operation to a constant or to a copy,
* and calling tcg_opt_gen_{mov,movi}. They may do other things,
* like collect information about the value produced, for use in
* optimizing a subsequent operation.
*
* These first fold_* functions are all helpers, used by other
* folders for more specific operations.
*/
static bool fold_const1(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[1])) {
uint64_t t;
t = arg_info(op->args[1])->val;
t = do_constant_folding(op->opc, t, 0);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
return false;
}
static bool fold_const2(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
uint64_t t1 = arg_info(op->args[1])->val;
uint64_t t2 = arg_info(op->args[2])->val;
t1 = do_constant_folding(op->opc, t1, t2);
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
}
return false;
}
/*
* These outermost fold_<op> functions are sorted alphabetically.
*/
static bool fold_add(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_and(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_andc(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_call(OptContext *ctx, TCGOp *op)
{
TCGContext *s = ctx->tcg;
@ -692,6 +746,31 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
return true;
}
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
{
return fold_const1(ctx, op);
}
static bool fold_divide(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_eqv(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_exts(OptContext *ctx, TCGOp *op)
{
return fold_const1(ctx, op);
}
static bool fold_extu(OptContext *ctx, TCGOp *op)
{
return fold_const1(ctx, op);
}
static bool fold_mb(OptContext *ctx, TCGOp *op)
{
/* Eliminate duplicate and redundant fence instructions. */
@ -716,6 +795,46 @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
return true;
}
static bool fold_mul(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_nand(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_neg(OptContext *ctx, TCGOp *op)
{
return fold_const1(ctx, op);
}
static bool fold_nor(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_not(OptContext *ctx, TCGOp *op)
{
return fold_const1(ctx, op);
}
static bool fold_or(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_orc(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
{
/* Opcodes that touch guest memory stop the mb optimization. */
@ -730,6 +849,26 @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
return false;
}
static bool fold_remainder(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_sub(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
static bool fold_xor(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
}
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
@ -1276,26 +1415,6 @@ void tcg_optimize(TCGContext *s)
}
break;
CASE_OP_32_64(not):
CASE_OP_32_64(neg):
CASE_OP_32_64(ext8s):
CASE_OP_32_64(ext8u):
CASE_OP_32_64(ext16s):
CASE_OP_32_64(ext16u):
CASE_OP_32_64(ctpop):
case INDEX_op_ext32s_i64:
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
CASE_OP_32_64(bswap16):
CASE_OP_32_64(bswap32):
case INDEX_op_bswap64_i64:
@ -1307,36 +1426,6 @@ void tcg_optimize(TCGContext *s)
}
break;
CASE_OP_32_64(add):
CASE_OP_32_64(sub):
CASE_OP_32_64(mul):
CASE_OP_32_64(or):
CASE_OP_32_64(and):
CASE_OP_32_64(xor):
CASE_OP_32_64(shl):
CASE_OP_32_64(shr):
CASE_OP_32_64(sar):
CASE_OP_32_64(rotl):
CASE_OP_32_64(rotr):
CASE_OP_32_64(andc):
CASE_OP_32_64(orc):
CASE_OP_32_64(eqv):
CASE_OP_32_64(nand):
CASE_OP_32_64(nor):
CASE_OP_32_64(muluh):
CASE_OP_32_64(mulsh):
CASE_OP_32_64(div):
CASE_OP_32_64(divu):
CASE_OP_32_64(rem):
CASE_OP_32_64(remu):
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
arg_info(op->args[2])->val);
tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
CASE_OP_32_64(clz):
CASE_OP_32_64(ctz):
if (arg_is_const(op->args[1])) {
@ -1637,9 +1726,73 @@ void tcg_optimize(TCGContext *s)
}
break;
default:
break;
/* ---------------------------------------------------------- */
/* Sorted alphabetically by opcode as much as possible. */
CASE_OP_32_64_VEC(add):
done = fold_add(&ctx, op);
break;
CASE_OP_32_64_VEC(and):
done = fold_and(&ctx, op);
break;
CASE_OP_32_64_VEC(andc):
done = fold_andc(&ctx, op);
break;
CASE_OP_32_64(ctpop):
done = fold_ctpop(&ctx, op);
break;
CASE_OP_32_64(div):
CASE_OP_32_64(divu):
done = fold_divide(&ctx, op);
break;
CASE_OP_32_64(eqv):
done = fold_eqv(&ctx, op);
break;
CASE_OP_32_64(ext8s):
CASE_OP_32_64(ext16s):
case INDEX_op_ext32s_i64:
case INDEX_op_ext_i32_i64:
done = fold_exts(&ctx, op);
break;
CASE_OP_32_64(ext8u):
CASE_OP_32_64(ext16u):
case INDEX_op_ext32u_i64:
case INDEX_op_extu_i32_i64:
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
done = fold_extu(&ctx, op);
break;
case INDEX_op_mb:
done = fold_mb(&ctx, op);
break;
CASE_OP_32_64(mul):
done = fold_mul(&ctx, op);
break;
CASE_OP_32_64(mulsh):
CASE_OP_32_64(muluh):
done = fold_mul_highpart(&ctx, op);
break;
CASE_OP_32_64(nand):
done = fold_nand(&ctx, op);
break;
CASE_OP_32_64(neg):
done = fold_neg(&ctx, op);
break;
CASE_OP_32_64(nor):
done = fold_nor(&ctx, op);
break;
CASE_OP_32_64_VEC(not):
done = fold_not(&ctx, op);
break;
CASE_OP_32_64_VEC(or):
done = fold_or(&ctx, op);
break;
CASE_OP_32_64_VEC(orc):
done = fold_orc(&ctx, op);
break;
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
done = fold_qemu_ld(&ctx, op);
@ -1649,8 +1802,22 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_qemu_st_i64:
done = fold_qemu_st(&ctx, op);
break;
default:
CASE_OP_32_64(rem):
CASE_OP_32_64(remu):
done = fold_remainder(&ctx, op);
break;
CASE_OP_32_64(rotl):
CASE_OP_32_64(rotr):
CASE_OP_32_64(sar):
CASE_OP_32_64(shl):
CASE_OP_32_64(shr):
done = fold_shift(&ctx, op);
break;
CASE_OP_32_64_VEC(sub):
done = fold_sub(&ctx, op);
break;
CASE_OP_32_64_VEC(xor):
done = fold_xor(&ctx, op);
break;
}