diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 6c740b92c1..fe58ccaeae 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -491,6 +491,11 @@ static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env) } #endif +static inline int riscv_cpu_xlen(CPURISCVState *env) +{ + return 16 << env->xl; +} + /* * Encode LMUL to lmul as follows: * LMUL vlmul lmul diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index a9484c22ea..8b7c9ec890 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -36,8 +36,11 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL); uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW); uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV); - bool vill = FIELD_EX64(s2, VTYPE, VILL); - target_ulong reserved = FIELD_EX64(s2, VTYPE, RESERVED); + int xlen = riscv_cpu_xlen(env); + bool vill = (s2 >> (xlen - 1)) & 0x1; + target_ulong reserved = s2 & + MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT, + xlen - 1 - R_VTYPE_RESERVED_SHIFT); if (lmul & 4) { /* Fractional LMUL. */