This PR contains two patches to improve PLIC support in QEMU.

It also contains one patch that fixes CLINT accesses for RISC-V. This
 fixes a regression for most RISC-V boards.
 
 The rest of the PR is adding support for the v0.7.1 RISC-V vector
 extensions. This is experimental support as the vector extensions are
 still in a draft state.
 
 This is a v2 pull request that has fixed the building on big endian
 machines failure.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE9sSsRtSTSGjTuM6PIeENKd+XcFQFAl7+CaIACgkQIeENKd+X
 cFTB0ggAhzBWLbhWfjyFSw2Unbt77zFOPtbAPThxskF63rfPutleXCUtdYk+I3Us
 2XYrSScV87aZXJK4ZLqGYhskWCLEShHx4dH6Gl01SVkxDaMPLl6MOvo7JPcMQigX
 5c2m2I7UYVbwubxvb3/2bpvbIzdV42bk4X/suq4cr5sUzNrGXUNUXPT33xfuENk1
 jTHsz4dc2IqTZT22uasw1/RcGQCTUL3Oe2T8PXVV8FBj5D1GdSvdRHPAldsLv6cW
 Cks8W8HHRauraA+Cmaemv6w5un03ByHzwDq32Li2/zPHR1DFgFVJ0XFIMKmq4znF
 Q8TFXzpUdL2CiHiql2DF9CSByJHIsQ==
 =14HX
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/alistair/tags/pull-riscv-to-apply-20200702-1' into staging

This PR contains two patches to improve PLIC support in QEMU.

It also contains one patch that fixes CLINT accesses for RISC-V. This
fixes a regression for most RISC-V boards.

The rest of the PR is adding support for the v0.7.1 RISC-V vector
extensions. This is experimental support as the vector extensions are
still in a draft state.

This is a v2 pull request that has fixed the building on big endian
machines failure.

# gpg: Signature made Thu 02 Jul 2020 17:21:54 BST
# gpg:                using RSA key F6C4AC46D4934868D3B8CE8F21E10D29DF977054
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [full]
# Primary key fingerprint: F6C4 AC46 D493 4868 D3B8  CE8F 21E1 0D29 DF97 7054

* remotes/alistair/tags/pull-riscv-to-apply-20200702-1: (64 commits)
  target/riscv: configure and turn on vector extension from command line
  target/riscv: vector compress instruction
  target/riscv: vector register gather instruction
  target/riscv: vector slide instructions
  target/riscv: floating-point scalar move instructions
  target/riscv: integer scalar move instruction
  target/riscv: integer extract instruction
  target/riscv: vector element index instruction
  target/riscv: vector iota instruction
  target/riscv: set-X-first mask bit
  target/riscv: vmfirst find-first-set mask bit
  target/riscv: vector mask population count vmpopc
  target/riscv: vector mask-register logical instructions
  target/riscv: vector widening floating-point reduction instructions
  target/riscv: vector single-width floating-point reduction instructions
  target/riscv: vector wideing integer reduction instructions
  target/riscv: vector single-width integer reduction instructions
  target/riscv: narrowing floating-point/integer type-convert instructions
  target/riscv: widening floating-point/integer type-convert instructions
  target/riscv: vector floating-point/integer type-convert instructions
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-07-03 16:58:38 +01:00
commit 5f42c3375d
15 changed files with 9536 additions and 52 deletions

View file

@ -181,7 +181,7 @@ static const MemoryRegionOps sifive_clint_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4
.max_access_size = 8
}
};

View file

@ -166,6 +166,9 @@ static void sifive_plic_update(SiFivePLICState *plic)
static uint32_t sifive_plic_claim(SiFivePLICState *plic, uint32_t addrid)
{
int i, j;
uint32_t max_irq = 0;
uint32_t max_prio = plic->target_priority[addrid];
for (i = 0; i < plic->bitfield_words; i++) {
uint32_t pending_enabled_not_claimed =
(plic->pending[i] & ~plic->claimed[i]) &
@ -177,14 +180,18 @@ static uint32_t sifive_plic_claim(SiFivePLICState *plic, uint32_t addrid)
int irq = (i << 5) + j;
uint32_t prio = plic->source_priority[irq];
int enabled = pending_enabled_not_claimed & (1 << j);
if (enabled && prio > plic->target_priority[addrid]) {
sifive_plic_set_pending(plic, irq, false);
sifive_plic_set_claimed(plic, irq, true);
return irq;
if (enabled && prio > max_prio) {
max_irq = irq;
max_prio = prio;
}
}
}
return 0;
if (max_irq) {
sifive_plic_set_pending(plic, max_irq, false);
sifive_plic_set_claimed(plic, max_irq, true);
}
return max_irq;
}
static uint64_t sifive_plic_read(void *opaque, hwaddr addr, unsigned size)
@ -248,8 +255,8 @@ static uint64_t sifive_plic_read(void *opaque, hwaddr addr, unsigned size)
plic->addr_config[addrid].hartid,
mode_to_char(plic->addr_config[addrid].mode),
value);
sifive_plic_print_state(plic);
}
sifive_plic_update(plic);
return value;
}
}
@ -280,6 +287,7 @@ static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value,
qemu_log("plic: write priority: irq=%d priority=%d\n",
irq, plic->source_priority[irq]);
}
sifive_plic_update(plic);
return;
} else if (addr >= plic->pending_base && /* 1 bit per source */
addr < plic->pending_base + (plic->num_sources >> 3))

View file

@ -1,4 +1,4 @@
obj-y += translate.o op_helper.o cpu_helper.o cpu.o csr.o fpu_helper.o gdbstub.o
obj-y += translate.o op_helper.o cpu_helper.o cpu.o csr.o fpu_helper.o vector_helper.o gdbstub.o
obj-$(CONFIG_SOFTMMU) += pmp.o
ifeq ($(CONFIG_SOFTMMU),y)

View file

@ -106,6 +106,11 @@ static void set_priv_version(CPURISCVState *env, int priv_ver)
env->priv_ver = priv_ver;
}
static void set_vext_version(CPURISCVState *env, int vext_ver)
{
env->vext_ver = vext_ver;
}
static void set_feature(CPURISCVState *env, int feature)
{
env->features |= (1ULL << feature);
@ -334,6 +339,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
CPURISCVState *env = &cpu->env;
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
int priv_version = PRIV_VERSION_1_11_0;
int vext_version = VEXT_VERSION_0_07_1;
target_ulong target_misa = 0;
Error *local_err = NULL;
@ -357,6 +363,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
}
set_priv_version(env, priv_version);
set_vext_version(env, vext_version);
if (cpu->cfg.mmu) {
set_feature(env, RISCV_FEATURE_MMU);
@ -423,6 +430,45 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
if (cpu->cfg.ext_h) {
target_misa |= RVH;
}
if (cpu->cfg.ext_v) {
target_misa |= RVV;
if (!is_power_of_2(cpu->cfg.vlen)) {
error_setg(errp,
"Vector extension VLEN must be power of 2");
return;
}
if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
error_setg(errp,
"Vector extension implementation only supports VLEN "
"in the range [128, %d]", RV_VLEN_MAX);
return;
}
if (!is_power_of_2(cpu->cfg.elen)) {
error_setg(errp,
"Vector extension ELEN must be power of 2");
return;
}
if (cpu->cfg.elen > 64 || cpu->cfg.vlen < 8) {
error_setg(errp,
"Vector extension implementation only supports ELEN "
"in the range [8, 64]");
return;
}
if (cpu->cfg.vext_spec) {
if (!g_strcmp0(cpu->cfg.vext_spec, "v0.7.1")) {
vext_version = VEXT_VERSION_0_07_1;
} else {
error_setg(errp,
"Unsupported vector spec version '%s'",
cpu->cfg.vext_spec);
return;
}
} else {
qemu_log("vector verison is not specified, "
"use the default value v0.7.1\n");
}
set_vext_version(env, vext_version);
}
set_misa(env, RVXLEN | target_misa);
}
@ -462,10 +508,14 @@ static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
/* This is experimental so mark with 'x-' */
DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false),
DEFINE_PROP_BOOL("x-v", RISCVCPU, cfg.ext_v, false),
DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true),
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
DEFINE_PROP_END_OF_LIST(),

View file

@ -21,6 +21,7 @@
#define RISCV_CPU_H
#include "hw/core/cpu.h"
#include "hw/registerfields.h"
#include "exec/cpu-defs.h"
#include "fpu/softfloat-types.h"
@ -59,6 +60,7 @@
#define RVA RV('A')
#define RVF RV('F')
#define RVD RV('D')
#define RVV RV('V')
#define RVC RV('C')
#define RVS RV('S')
#define RVU RV('U')
@ -77,6 +79,8 @@ enum {
#define PRIV_VERSION_1_10_0 0x00011000
#define PRIV_VERSION_1_11_0 0x00011100
#define VEXT_VERSION_0_07_1 0x00000701
#define TRANSLATE_PMP_FAIL 2
#define TRANSLATE_FAIL 1
#define TRANSLATE_SUCCESS 0
@ -88,9 +92,26 @@ typedef struct CPURISCVState CPURISCVState;
#include "pmp.h"
#define RV_VLEN_MAX 256
FIELD(VTYPE, VLMUL, 0, 2)
FIELD(VTYPE, VSEW, 2, 3)
FIELD(VTYPE, VEDIV, 5, 2)
FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9)
FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 2, 1)
struct CPURISCVState {
target_ulong gpr[32];
uint64_t fpr[32]; /* assume both F and D extensions */
/* vector coprocessor state. */
uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
target_ulong vxrm;
target_ulong vxsat;
target_ulong vl;
target_ulong vstart;
target_ulong vtype;
target_ulong pc;
target_ulong load_res;
target_ulong load_val;
@ -101,6 +122,7 @@ struct CPURISCVState {
target_ulong guest_phys_fault_addr;
target_ulong priv_ver;
target_ulong vext_ver;
target_ulong misa;
target_ulong misa_mask;
@ -257,12 +279,16 @@ typedef struct RISCVCPU {
bool ext_s;
bool ext_u;
bool ext_h;
bool ext_v;
bool ext_counters;
bool ext_ifencei;
bool ext_icsr;
char *priv_spec;
char *user_spec;
char *vext_spec;
uint16_t vlen;
uint16_t elen;
bool mmu;
bool pmp;
} cfg;
@ -335,19 +361,62 @@ void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
#define TB_FLAGS_MMU_MASK 3
#define TB_FLAGS_MSTATUS_FS MSTATUS_FS
static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
typedef CPURISCVState CPUArchState;
typedef RISCVCPU ArchCPU;
#include "exec/cpu-all.h"
FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1)
FIELD(TB_FLAGS, LMUL, 3, 2)
FIELD(TB_FLAGS, SEW, 5, 3)
FIELD(TB_FLAGS, VILL, 8, 1)
/*
* A simplification for VLMAX
* = (1 << LMUL) * VLEN / (8 * (1 << SEW))
* = (VLEN << LMUL) / (8 << SEW)
* = (VLEN << LMUL) >> (SEW + 3)
* = VLEN >> (SEW + 3 - LMUL)
*/
static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
{
uint8_t sew, lmul;
sew = FIELD_EX64(vtype, VTYPE, VSEW);
lmul = FIELD_EX64(vtype, VTYPE, VLMUL);
return cpu->cfg.vlen >> (sew + 3 - lmul);
}
static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *pflags)
{
uint32_t flags = 0;
*pc = env->pc;
*cs_base = 0;
if (riscv_has_ext(env, RVV)) {
uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl);
flags = FIELD_DP32(flags, TB_FLAGS, VILL,
FIELD_EX64(env->vtype, VTYPE, VILL));
flags = FIELD_DP32(flags, TB_FLAGS, SEW,
FIELD_EX64(env->vtype, VTYPE, VSEW));
flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
FIELD_EX64(env->vtype, VTYPE, VLMUL));
flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
} else {
flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
}
#ifdef CONFIG_USER_ONLY
*flags = TB_FLAGS_MSTATUS_FS;
flags |= TB_FLAGS_MSTATUS_FS;
#else
*flags = cpu_mmu_index(env, 0);
flags |= cpu_mmu_index(env, 0);
if (riscv_cpu_fp_enabled(env)) {
*flags |= env->mstatus & MSTATUS_FS;
flags |= env->mstatus & MSTATUS_FS;
}
#endif
*pflags = flags;
}
int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value,
@ -388,9 +457,4 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
typedef CPURISCVState CPUArchState;
typedef RISCVCPU ArchCPU;
#include "exec/cpu-all.h"
#endif /* RISCV_CPU_H */

View file

@ -29,6 +29,14 @@
#define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT)
#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
/* Vector Fixed-Point round model */
#define FSR_VXRM_SHIFT 9
#define FSR_VXRM (0x3 << FSR_VXRM_SHIFT)
/* Vector Fixed-Point saturation flag */
#define FSR_VXSAT_SHIFT 8
#define FSR_VXSAT (0x1 << FSR_VXSAT_SHIFT)
/* Control and Status Registers */
/* User Trap Setup */
@ -48,6 +56,13 @@
#define CSR_FRM 0x002
#define CSR_FCSR 0x003
/* User Vector CSRs */
#define CSR_VSTART 0x008
#define CSR_VXSAT 0x009
#define CSR_VXRM 0x00a
#define CSR_VL 0xc20
#define CSR_VTYPE 0xc21
/* User Timers and Counters */
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01

View file

@ -46,6 +46,10 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
static int fs(CPURISCVState *env, int csrno)
{
#if !defined(CONFIG_USER_ONLY)
/* loose check condition for fcsr in vector extension */
if ((csrno == CSR_FCSR) && (env->misa & RVV)) {
return 0;
}
if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
return -1;
}
@ -53,6 +57,14 @@ static int fs(CPURISCVState *env, int csrno)
return 0;
}
static int vs(CPURISCVState *env, int csrno)
{
if (env->misa & RVV) {
return 0;
}
return -1;
}
static int ctr(CPURISCVState *env, int csrno)
{
#if !defined(CONFIG_USER_ONLY)
@ -154,6 +166,10 @@ static int read_fcsr(CPURISCVState *env, int csrno, target_ulong *val)
#endif
*val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
| (env->frm << FSR_RD_SHIFT);
if (vs(env, csrno) >= 0) {
*val |= (env->vxrm << FSR_VXRM_SHIFT)
| (env->vxsat << FSR_VXSAT_SHIFT);
}
return 0;
}
@ -166,10 +182,62 @@ static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val)
env->mstatus |= MSTATUS_FS;
#endif
env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
if (vs(env, csrno) >= 0) {
env->vxrm = (val & FSR_VXRM) >> FSR_VXRM_SHIFT;
env->vxsat = (val & FSR_VXSAT) >> FSR_VXSAT_SHIFT;
}
riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
return 0;
}
static int read_vtype(CPURISCVState *env, int csrno, target_ulong *val)
{
*val = env->vtype;
return 0;
}
static int read_vl(CPURISCVState *env, int csrno, target_ulong *val)
{
*val = env->vl;
return 0;
}
static int read_vxrm(CPURISCVState *env, int csrno, target_ulong *val)
{
*val = env->vxrm;
return 0;
}
static int write_vxrm(CPURISCVState *env, int csrno, target_ulong val)
{
env->vxrm = val;
return 0;
}
static int read_vxsat(CPURISCVState *env, int csrno, target_ulong *val)
{
*val = env->vxsat;
return 0;
}
static int write_vxsat(CPURISCVState *env, int csrno, target_ulong val)
{
env->vxsat = val;
return 0;
}
static int read_vstart(CPURISCVState *env, int csrno, target_ulong *val)
{
*val = env->vstart;
return 0;
}
static int write_vstart(CPURISCVState *env, int csrno, target_ulong val)
{
env->vstart = val;
return 0;
}
/* User Timers and Counters */
static int read_instret(CPURISCVState *env, int csrno, target_ulong *val)
{
@ -1183,7 +1251,12 @@ static riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_FFLAGS] = { fs, read_fflags, write_fflags },
[CSR_FRM] = { fs, read_frm, write_frm },
[CSR_FCSR] = { fs, read_fcsr, write_fcsr },
/* Vector CSRs */
[CSR_VSTART] = { vs, read_vstart, write_vstart },
[CSR_VXSAT] = { vs, read_vxsat, write_vxsat },
[CSR_VXRM] = { vs, read_vxrm, write_vxrm },
[CSR_VL] = { vs, read_vl },
[CSR_VTYPE] = { vs, read_vtype },
/* User Timers and Counters */
[CSR_CYCLE] = { ctr, read_instret },
[CSR_INSTRET] = { ctr, read_instret },

View file

@ -22,6 +22,7 @@
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "internals.h"
target_ulong riscv_cpu_get_fflags(CPURISCVState *env)
{
@ -230,21 +231,7 @@ uint64_t helper_fcvt_s_lu(CPURISCVState *env, uint64_t rs1)
target_ulong helper_fclass_s(uint64_t frs1)
{
float32 f = frs1;
bool sign = float32_is_neg(f);
if (float32_is_infinity(f)) {
return sign ? 1 << 0 : 1 << 7;
} else if (float32_is_zero(f)) {
return sign ? 1 << 3 : 1 << 4;
} else if (float32_is_zero_or_denormal(f)) {
return sign ? 1 << 2 : 1 << 5;
} else if (float32_is_any_nan(f)) {
float_status s = { }; /* for snan_bit_is_one */
return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
} else {
return sign ? 1 << 1 : 1 << 6;
}
return fclass_s(frs1);
}
uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
@ -353,19 +340,5 @@ uint64_t helper_fcvt_d_lu(CPURISCVState *env, uint64_t rs1)
target_ulong helper_fclass_d(uint64_t frs1)
{
float64 f = frs1;
bool sign = float64_is_neg(f);
if (float64_is_infinity(f)) {
return sign ? 1 << 0 : 1 << 7;
} else if (float64_is_zero(f)) {
return sign ? 1 << 3 : 1 << 4;
} else if (float64_is_zero_or_denormal(f)) {
return sign ? 1 << 2 : 1 << 5;
} else if (float64_is_any_nan(f)) {
float_status s = { }; /* for snan_bit_is_one */
return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
} else {
return sign ? 1 << 1 : 1 << 6;
}
return fclass_d(frs1);
}

File diff suppressed because it is too large Load diff

View file

@ -57,6 +57,17 @@ amomax_d 10100 . . ..... ..... 011 ..... 0101111 @atom_st
amominu_d 11000 . . ..... ..... 011 ..... 0101111 @atom_st
amomaxu_d 11100 . . ..... ..... 011 ..... 0101111 @atom_st
#*** Vector AMO operations (in addition to Zvamo) ***
vamoswapd_v 00001 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamoaddd_v 00000 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamoxord_v 00100 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamoandd_v 01100 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamoord_v 01000 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamomind_v 10000 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamomaxd_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamominud_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamomaxud_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm
# *** RV64F Standard Extension (in addition to RV32F) ***
fcvt_l_s 1100000 00010 ..... ... ..... 1010011 @r2_rm
fcvt_lu_s 1100000 00011 ..... ... ..... 1010011 @r2_rm

View file

@ -25,6 +25,7 @@
%sh10 20:10
%csr 20:12
%rm 12:3
%nf 29:3 !function=ex_plus_1
# immediates:
%imm_i 20:s12
@ -43,6 +44,11 @@
&u imm rd
&shift shamt rs1 rd
&atomic aq rl rs2 rs1 rd
&rmrr vm rd rs1 rs2
&rmr vm rd rs2
&rwdvm vm wd rd rs1 rs2
&r2nfvm vm rd rs1 nf
&rnfvm vm rd rs1 rs2 nf
# Formats 32:
@r ....... ..... ..... ... ..... ....... &r %rs2 %rs1 %rd
@ -62,6 +68,16 @@
@r_rm ....... ..... ..... ... ..... ....... %rs2 %rs1 %rm %rd
@r2_rm ....... ..... ..... ... ..... ....... %rs1 %rm %rd
@r2 ....... ..... ..... ... ..... ....... %rs1 %rd
@r2_nfvm ... ... vm:1 ..... ..... ... ..... ....... &r2nfvm %nf %rs1 %rd
@r2_vm ...... vm:1 ..... ..... ... ..... ....... &rmr %rs2 %rd
@r1_vm ...... vm:1 ..... ..... ... ..... ....... %rd
@r_nfvm ... ... vm:1 ..... ..... ... ..... ....... &rnfvm %nf %rs2 %rs1 %rd
@r2rd ....... ..... ..... ... ..... ....... %rs2 %rd
@r_vm ...... vm:1 ..... ..... ... ..... ....... &rmrr %rs2 %rs1 %rd
@r_vm_1 ...... . ..... ..... ... ..... ....... &rmrr vm=1 %rs2 %rs1 %rd
@r_vm_0 ...... . ..... ..... ... ..... ....... &rmrr vm=0 %rs2 %rs1 %rd
@r_wdvm ..... wd:1 vm:1 ..... ..... ... ..... ....... &rwdvm %rs2 %rs1 %rd
@r2_zimm . zimm:11 ..... ... ..... ....... %rs1 %rd
@hfence_gvma ....... ..... ..... ... ..... ....... %rs2 %rs1
@hfence_vvma ....... ..... ..... ... ..... ....... %rs2 %rs1
@ -209,3 +225,359 @@ fcvt_d_wu 1101001 00001 ..... ... ..... 1010011 @r2_rm
# *** RV32H Base Instruction Set ***
hfence_gvma 0110001 ..... ..... 000 00000 1110011 @hfence_gvma
hfence_vvma 0010001 ..... ..... 000 00000 1110011 @hfence_vvma
# *** RV32V Extension ***
# *** Vector loads and stores are encoded within LOADFP/STORE-FP ***
vlb_v ... 100 . 00000 ..... 000 ..... 0000111 @r2_nfvm
vlh_v ... 100 . 00000 ..... 101 ..... 0000111 @r2_nfvm
vlw_v ... 100 . 00000 ..... 110 ..... 0000111 @r2_nfvm
vle_v ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm
vlbu_v ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm
vlhu_v ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm
vlwu_v ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm
vlbff_v ... 100 . 10000 ..... 000 ..... 0000111 @r2_nfvm
vlhff_v ... 100 . 10000 ..... 101 ..... 0000111 @r2_nfvm
vlwff_v ... 100 . 10000 ..... 110 ..... 0000111 @r2_nfvm
vleff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
vlbuff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
vlhuff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
vlwuff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
vsb_v ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm
vsh_v ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm
vsw_v ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm
vse_v ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm
vlsb_v ... 110 . ..... ..... 000 ..... 0000111 @r_nfvm
vlsh_v ... 110 . ..... ..... 101 ..... 0000111 @r_nfvm
vlsw_v ... 110 . ..... ..... 110 ..... 0000111 @r_nfvm
vlse_v ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm
vlsbu_v ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm
vlshu_v ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm
vlswu_v ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm
vssb_v ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm
vssh_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
vssw_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
vsse_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
vlxb_v ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm
vlxh_v ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm
vlxw_v ... 111 . ..... ..... 110 ..... 0000111 @r_nfvm
vlxe_v ... 011 . ..... ..... 111 ..... 0000111 @r_nfvm
vlxbu_v ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm
vlxhu_v ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm
vlxwu_v ... 011 . ..... ..... 110 ..... 0000111 @r_nfvm
# Vector ordered-indexed and unordered-indexed store insns.
vsxb_v ... -11 . ..... ..... 000 ..... 0100111 @r_nfvm
vsxh_v ... -11 . ..... ..... 101 ..... 0100111 @r_nfvm
vsxw_v ... -11 . ..... ..... 110 ..... 0100111 @r_nfvm
vsxe_v ... -11 . ..... ..... 111 ..... 0100111 @r_nfvm
#*** Vector AMO operations are encoded under the standard AMO major opcode ***
vamoswapw_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamoaddw_v 00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamoxorw_v 00100 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamoandw_v 01100 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamoorw_v 01000 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamominw_v 10000 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamomaxw_v 10100 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamominuw_v 11000 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamomaxuw_v 11100 . . ..... ..... 110 ..... 0101111 @r_wdvm
# *** new major opcode OP-V ***
vadd_vv 000000 . ..... ..... 000 ..... 1010111 @r_vm
vadd_vx 000000 . ..... ..... 100 ..... 1010111 @r_vm
vadd_vi 000000 . ..... ..... 011 ..... 1010111 @r_vm
vsub_vv 000010 . ..... ..... 000 ..... 1010111 @r_vm
vsub_vx 000010 . ..... ..... 100 ..... 1010111 @r_vm
vrsub_vx 000011 . ..... ..... 100 ..... 1010111 @r_vm
vrsub_vi 000011 . ..... ..... 011 ..... 1010111 @r_vm
vwaddu_vv 110000 . ..... ..... 010 ..... 1010111 @r_vm
vwaddu_vx 110000 . ..... ..... 110 ..... 1010111 @r_vm
vwadd_vv 110001 . ..... ..... 010 ..... 1010111 @r_vm
vwadd_vx 110001 . ..... ..... 110 ..... 1010111 @r_vm
vwsubu_vv 110010 . ..... ..... 010 ..... 1010111 @r_vm
vwsubu_vx 110010 . ..... ..... 110 ..... 1010111 @r_vm
vwsub_vv 110011 . ..... ..... 010 ..... 1010111 @r_vm
vwsub_vx 110011 . ..... ..... 110 ..... 1010111 @r_vm
vwaddu_wv 110100 . ..... ..... 010 ..... 1010111 @r_vm
vwaddu_wx 110100 . ..... ..... 110 ..... 1010111 @r_vm
vwadd_wv 110101 . ..... ..... 010 ..... 1010111 @r_vm
vwadd_wx 110101 . ..... ..... 110 ..... 1010111 @r_vm
vwsubu_wv 110110 . ..... ..... 010 ..... 1010111 @r_vm
vwsubu_wx 110110 . ..... ..... 110 ..... 1010111 @r_vm
vwsub_wv 110111 . ..... ..... 010 ..... 1010111 @r_vm
vwsub_wx 110111 . ..... ..... 110 ..... 1010111 @r_vm
vadc_vvm 010000 1 ..... ..... 000 ..... 1010111 @r_vm_1
vadc_vxm 010000 1 ..... ..... 100 ..... 1010111 @r_vm_1
vadc_vim 010000 1 ..... ..... 011 ..... 1010111 @r_vm_1
vmadc_vvm 010001 1 ..... ..... 000 ..... 1010111 @r_vm_1
vmadc_vxm 010001 1 ..... ..... 100 ..... 1010111 @r_vm_1
vmadc_vim 010001 1 ..... ..... 011 ..... 1010111 @r_vm_1
vsbc_vvm 010010 1 ..... ..... 000 ..... 1010111 @r_vm_1
vsbc_vxm 010010 1 ..... ..... 100 ..... 1010111 @r_vm_1
vmsbc_vvm 010011 1 ..... ..... 000 ..... 1010111 @r_vm_1
vmsbc_vxm 010011 1 ..... ..... 100 ..... 1010111 @r_vm_1
vand_vv 001001 . ..... ..... 000 ..... 1010111 @r_vm
vand_vx 001001 . ..... ..... 100 ..... 1010111 @r_vm
vand_vi 001001 . ..... ..... 011 ..... 1010111 @r_vm
vor_vv 001010 . ..... ..... 000 ..... 1010111 @r_vm
vor_vx 001010 . ..... ..... 100 ..... 1010111 @r_vm
vor_vi 001010 . ..... ..... 011 ..... 1010111 @r_vm
vxor_vv 001011 . ..... ..... 000 ..... 1010111 @r_vm
vxor_vx 001011 . ..... ..... 100 ..... 1010111 @r_vm
vxor_vi 001011 . ..... ..... 011 ..... 1010111 @r_vm
vsll_vv 100101 . ..... ..... 000 ..... 1010111 @r_vm
vsll_vx 100101 . ..... ..... 100 ..... 1010111 @r_vm
vsll_vi 100101 . ..... ..... 011 ..... 1010111 @r_vm
vsrl_vv 101000 . ..... ..... 000 ..... 1010111 @r_vm
vsrl_vx 101000 . ..... ..... 100 ..... 1010111 @r_vm
vsrl_vi 101000 . ..... ..... 011 ..... 1010111 @r_vm
vsra_vv 101001 . ..... ..... 000 ..... 1010111 @r_vm
vsra_vx 101001 . ..... ..... 100 ..... 1010111 @r_vm
vsra_vi 101001 . ..... ..... 011 ..... 1010111 @r_vm
vnsrl_vv 101100 . ..... ..... 000 ..... 1010111 @r_vm
vnsrl_vx 101100 . ..... ..... 100 ..... 1010111 @r_vm
vnsrl_vi 101100 . ..... ..... 011 ..... 1010111 @r_vm
vnsra_vv 101101 . ..... ..... 000 ..... 1010111 @r_vm
vnsra_vx 101101 . ..... ..... 100 ..... 1010111 @r_vm
vnsra_vi 101101 . ..... ..... 011 ..... 1010111 @r_vm
vmseq_vv 011000 . ..... ..... 000 ..... 1010111 @r_vm
vmseq_vx 011000 . ..... ..... 100 ..... 1010111 @r_vm
vmseq_vi 011000 . ..... ..... 011 ..... 1010111 @r_vm
vmsne_vv 011001 . ..... ..... 000 ..... 1010111 @r_vm
vmsne_vx 011001 . ..... ..... 100 ..... 1010111 @r_vm
vmsne_vi 011001 . ..... ..... 011 ..... 1010111 @r_vm
vmsltu_vv 011010 . ..... ..... 000 ..... 1010111 @r_vm
vmsltu_vx 011010 . ..... ..... 100 ..... 1010111 @r_vm
vmslt_vv 011011 . ..... ..... 000 ..... 1010111 @r_vm
vmslt_vx 011011 . ..... ..... 100 ..... 1010111 @r_vm
vmsleu_vv 011100 . ..... ..... 000 ..... 1010111 @r_vm
vmsleu_vx 011100 . ..... ..... 100 ..... 1010111 @r_vm
vmsleu_vi 011100 . ..... ..... 011 ..... 1010111 @r_vm
vmsle_vv 011101 . ..... ..... 000 ..... 1010111 @r_vm
vmsle_vx 011101 . ..... ..... 100 ..... 1010111 @r_vm
vmsle_vi 011101 . ..... ..... 011 ..... 1010111 @r_vm
vmsgtu_vx 011110 . ..... ..... 100 ..... 1010111 @r_vm
vmsgtu_vi 011110 . ..... ..... 011 ..... 1010111 @r_vm
vmsgt_vx 011111 . ..... ..... 100 ..... 1010111 @r_vm
vmsgt_vi 011111 . ..... ..... 011 ..... 1010111 @r_vm
vminu_vv 000100 . ..... ..... 000 ..... 1010111 @r_vm
vminu_vx 000100 . ..... ..... 100 ..... 1010111 @r_vm
vmin_vv 000101 . ..... ..... 000 ..... 1010111 @r_vm
vmin_vx 000101 . ..... ..... 100 ..... 1010111 @r_vm
vmaxu_vv 000110 . ..... ..... 000 ..... 1010111 @r_vm
vmaxu_vx 000110 . ..... ..... 100 ..... 1010111 @r_vm
vmax_vv 000111 . ..... ..... 000 ..... 1010111 @r_vm
vmax_vx 000111 . ..... ..... 100 ..... 1010111 @r_vm
vmul_vv 100101 . ..... ..... 010 ..... 1010111 @r_vm
vmul_vx 100101 . ..... ..... 110 ..... 1010111 @r_vm
vmulh_vv 100111 . ..... ..... 010 ..... 1010111 @r_vm
vmulh_vx 100111 . ..... ..... 110 ..... 1010111 @r_vm
vmulhu_vv 100100 . ..... ..... 010 ..... 1010111 @r_vm
vmulhu_vx 100100 . ..... ..... 110 ..... 1010111 @r_vm
vmulhsu_vv 100110 . ..... ..... 010 ..... 1010111 @r_vm
vmulhsu_vx 100110 . ..... ..... 110 ..... 1010111 @r_vm
vdivu_vv 100000 . ..... ..... 010 ..... 1010111 @r_vm
vdivu_vx 100000 . ..... ..... 110 ..... 1010111 @r_vm
vdiv_vv 100001 . ..... ..... 010 ..... 1010111 @r_vm
vdiv_vx 100001 . ..... ..... 110 ..... 1010111 @r_vm
vremu_vv 100010 . ..... ..... 010 ..... 1010111 @r_vm
vremu_vx 100010 . ..... ..... 110 ..... 1010111 @r_vm
vrem_vv 100011 . ..... ..... 010 ..... 1010111 @r_vm
vrem_vx 100011 . ..... ..... 110 ..... 1010111 @r_vm
vwmulu_vv 111000 . ..... ..... 010 ..... 1010111 @r_vm
vwmulu_vx 111000 . ..... ..... 110 ..... 1010111 @r_vm
vwmulsu_vv 111010 . ..... ..... 010 ..... 1010111 @r_vm
vwmulsu_vx 111010 . ..... ..... 110 ..... 1010111 @r_vm
vwmul_vv 111011 . ..... ..... 010 ..... 1010111 @r_vm
vwmul_vx 111011 . ..... ..... 110 ..... 1010111 @r_vm
vmacc_vv 101101 . ..... ..... 010 ..... 1010111 @r_vm
vmacc_vx 101101 . ..... ..... 110 ..... 1010111 @r_vm
vnmsac_vv 101111 . ..... ..... 010 ..... 1010111 @r_vm
vnmsac_vx 101111 . ..... ..... 110 ..... 1010111 @r_vm
vmadd_vv 101001 . ..... ..... 010 ..... 1010111 @r_vm
vmadd_vx 101001 . ..... ..... 110 ..... 1010111 @r_vm
vnmsub_vv 101011 . ..... ..... 010 ..... 1010111 @r_vm
vnmsub_vx 101011 . ..... ..... 110 ..... 1010111 @r_vm
vwmaccu_vv 111100 . ..... ..... 010 ..... 1010111 @r_vm
vwmaccu_vx 111100 . ..... ..... 110 ..... 1010111 @r_vm
vwmacc_vv 111101 . ..... ..... 010 ..... 1010111 @r_vm
vwmacc_vx 111101 . ..... ..... 110 ..... 1010111 @r_vm
vwmaccsu_vv 111110 . ..... ..... 010 ..... 1010111 @r_vm
vwmaccsu_vx 111110 . ..... ..... 110 ..... 1010111 @r_vm
vwmaccus_vx 111111 . ..... ..... 110 ..... 1010111 @r_vm
vmv_v_v 010111 1 00000 ..... 000 ..... 1010111 @r2
vmv_v_x 010111 1 00000 ..... 100 ..... 1010111 @r2
vmv_v_i 010111 1 00000 ..... 011 ..... 1010111 @r2
vmerge_vvm 010111 0 ..... ..... 000 ..... 1010111 @r_vm_0
vmerge_vxm 010111 0 ..... ..... 100 ..... 1010111 @r_vm_0
vmerge_vim 010111 0 ..... ..... 011 ..... 1010111 @r_vm_0
vsaddu_vv 100000 . ..... ..... 000 ..... 1010111 @r_vm
vsaddu_vx 100000 . ..... ..... 100 ..... 1010111 @r_vm
vsaddu_vi 100000 . ..... ..... 011 ..... 1010111 @r_vm
vsadd_vv 100001 . ..... ..... 000 ..... 1010111 @r_vm
vsadd_vx 100001 . ..... ..... 100 ..... 1010111 @r_vm
vsadd_vi 100001 . ..... ..... 011 ..... 1010111 @r_vm
vssubu_vv 100010 . ..... ..... 000 ..... 1010111 @r_vm
vssubu_vx 100010 . ..... ..... 100 ..... 1010111 @r_vm
vssub_vv 100011 . ..... ..... 000 ..... 1010111 @r_vm
vssub_vx 100011 . ..... ..... 100 ..... 1010111 @r_vm
vaadd_vv 100100 . ..... ..... 000 ..... 1010111 @r_vm
vaadd_vx 100100 . ..... ..... 100 ..... 1010111 @r_vm
vaadd_vi 100100 . ..... ..... 011 ..... 1010111 @r_vm
vasub_vv 100110 . ..... ..... 000 ..... 1010111 @r_vm
vasub_vx 100110 . ..... ..... 100 ..... 1010111 @r_vm
vsmul_vv 100111 . ..... ..... 000 ..... 1010111 @r_vm
vsmul_vx 100111 . ..... ..... 100 ..... 1010111 @r_vm
vwsmaccu_vv 111100 . ..... ..... 000 ..... 1010111 @r_vm
vwsmaccu_vx 111100 . ..... ..... 100 ..... 1010111 @r_vm
vwsmacc_vv 111101 . ..... ..... 000 ..... 1010111 @r_vm
vwsmacc_vx 111101 . ..... ..... 100 ..... 1010111 @r_vm
vwsmaccsu_vv 111110 . ..... ..... 000 ..... 1010111 @r_vm
vwsmaccsu_vx 111110 . ..... ..... 100 ..... 1010111 @r_vm
vwsmaccus_vx 111111 . ..... ..... 100 ..... 1010111 @r_vm
vssrl_vv 101010 . ..... ..... 000 ..... 1010111 @r_vm
vssrl_vx 101010 . ..... ..... 100 ..... 1010111 @r_vm
vssrl_vi 101010 . ..... ..... 011 ..... 1010111 @r_vm
vssra_vv 101011 . ..... ..... 000 ..... 1010111 @r_vm
vssra_vx 101011 . ..... ..... 100 ..... 1010111 @r_vm
vssra_vi 101011 . ..... ..... 011 ..... 1010111 @r_vm
vnclipu_vv 101110 . ..... ..... 000 ..... 1010111 @r_vm
vnclipu_vx 101110 . ..... ..... 100 ..... 1010111 @r_vm
vnclipu_vi 101110 . ..... ..... 011 ..... 1010111 @r_vm
vnclip_vv 101111 . ..... ..... 000 ..... 1010111 @r_vm
vnclip_vx 101111 . ..... ..... 100 ..... 1010111 @r_vm
vnclip_vi 101111 . ..... ..... 011 ..... 1010111 @r_vm
vfadd_vv 000000 . ..... ..... 001 ..... 1010111 @r_vm
vfadd_vf 000000 . ..... ..... 101 ..... 1010111 @r_vm
vfsub_vv 000010 . ..... ..... 001 ..... 1010111 @r_vm
vfsub_vf 000010 . ..... ..... 101 ..... 1010111 @r_vm
vfrsub_vf 100111 . ..... ..... 101 ..... 1010111 @r_vm
vfwadd_vv 110000 . ..... ..... 001 ..... 1010111 @r_vm
vfwadd_vf 110000 . ..... ..... 101 ..... 1010111 @r_vm
vfwadd_wv 110100 . ..... ..... 001 ..... 1010111 @r_vm
vfwadd_wf 110100 . ..... ..... 101 ..... 1010111 @r_vm
vfwsub_vv 110010 . ..... ..... 001 ..... 1010111 @r_vm
vfwsub_vf 110010 . ..... ..... 101 ..... 1010111 @r_vm
vfwsub_wv 110110 . ..... ..... 001 ..... 1010111 @r_vm
vfwsub_wf 110110 . ..... ..... 101 ..... 1010111 @r_vm
vfmul_vv 100100 . ..... ..... 001 ..... 1010111 @r_vm
vfmul_vf 100100 . ..... ..... 101 ..... 1010111 @r_vm
vfdiv_vv 100000 . ..... ..... 001 ..... 1010111 @r_vm
vfdiv_vf 100000 . ..... ..... 101 ..... 1010111 @r_vm
vfrdiv_vf 100001 . ..... ..... 101 ..... 1010111 @r_vm
vfwmul_vv 111000 . ..... ..... 001 ..... 1010111 @r_vm
vfwmul_vf 111000 . ..... ..... 101 ..... 1010111 @r_vm
vfmacc_vv 101100 . ..... ..... 001 ..... 1010111 @r_vm
vfnmacc_vv 101101 . ..... ..... 001 ..... 1010111 @r_vm
vfnmacc_vf 101101 . ..... ..... 101 ..... 1010111 @r_vm
vfmacc_vf 101100 . ..... ..... 101 ..... 1010111 @r_vm
vfmsac_vv 101110 . ..... ..... 001 ..... 1010111 @r_vm
vfmsac_vf 101110 . ..... ..... 101 ..... 1010111 @r_vm
vfnmsac_vv 101111 . ..... ..... 001 ..... 1010111 @r_vm
vfnmsac_vf 101111 . ..... ..... 101 ..... 1010111 @r_vm
vfmadd_vv 101000 . ..... ..... 001 ..... 1010111 @r_vm
vfmadd_vf 101000 . ..... ..... 101 ..... 1010111 @r_vm
vfnmadd_vv 101001 . ..... ..... 001 ..... 1010111 @r_vm
vfnmadd_vf 101001 . ..... ..... 101 ..... 1010111 @r_vm
vfmsub_vv 101010 . ..... ..... 001 ..... 1010111 @r_vm
vfmsub_vf 101010 . ..... ..... 101 ..... 1010111 @r_vm
vfnmsub_vv 101011 . ..... ..... 001 ..... 1010111 @r_vm
vfnmsub_vf 101011 . ..... ..... 101 ..... 1010111 @r_vm
vfwmacc_vv 111100 . ..... ..... 001 ..... 1010111 @r_vm
vfwmacc_vf 111100 . ..... ..... 101 ..... 1010111 @r_vm
vfwnmacc_vv 111101 . ..... ..... 001 ..... 1010111 @r_vm
vfwnmacc_vf 111101 . ..... ..... 101 ..... 1010111 @r_vm
vfwmsac_vv 111110 . ..... ..... 001 ..... 1010111 @r_vm
vfwmsac_vf 111110 . ..... ..... 101 ..... 1010111 @r_vm
vfwnmsac_vv 111111 . ..... ..... 001 ..... 1010111 @r_vm
vfwnmsac_vf 111111 . ..... ..... 101 ..... 1010111 @r_vm
vfsqrt_v 100011 . ..... 00000 001 ..... 1010111 @r2_vm
vfmin_vv 000100 . ..... ..... 001 ..... 1010111 @r_vm
vfmin_vf 000100 . ..... ..... 101 ..... 1010111 @r_vm
vfmax_vv 000110 . ..... ..... 001 ..... 1010111 @r_vm
vfmax_vf 000110 . ..... ..... 101 ..... 1010111 @r_vm
vfsgnj_vv 001000 . ..... ..... 001 ..... 1010111 @r_vm
vfsgnj_vf 001000 . ..... ..... 101 ..... 1010111 @r_vm
vfsgnjn_vv 001001 . ..... ..... 001 ..... 1010111 @r_vm
vfsgnjn_vf 001001 . ..... ..... 101 ..... 1010111 @r_vm
vfsgnjx_vv 001010 . ..... ..... 001 ..... 1010111 @r_vm
vfsgnjx_vf 001010 . ..... ..... 101 ..... 1010111 @r_vm
vmfeq_vv 011000 . ..... ..... 001 ..... 1010111 @r_vm
vmfeq_vf 011000 . ..... ..... 101 ..... 1010111 @r_vm
vmfne_vv 011100 . ..... ..... 001 ..... 1010111 @r_vm
vmfne_vf 011100 . ..... ..... 101 ..... 1010111 @r_vm
vmflt_vv 011011 . ..... ..... 001 ..... 1010111 @r_vm
vmflt_vf 011011 . ..... ..... 101 ..... 1010111 @r_vm
vmfle_vv 011001 . ..... ..... 001 ..... 1010111 @r_vm
vmfle_vf 011001 . ..... ..... 101 ..... 1010111 @r_vm
vmfgt_vf 011101 . ..... ..... 101 ..... 1010111 @r_vm
vmfge_vf 011111 . ..... ..... 101 ..... 1010111 @r_vm
vmford_vv 011010 . ..... ..... 001 ..... 1010111 @r_vm
vmford_vf 011010 . ..... ..... 101 ..... 1010111 @r_vm
vfclass_v 100011 . ..... 10000 001 ..... 1010111 @r2_vm
vfmerge_vfm 010111 0 ..... ..... 101 ..... 1010111 @r_vm_0
vfmv_v_f 010111 1 00000 ..... 101 ..... 1010111 @r2
vfcvt_xu_f_v 100010 . ..... 00000 001 ..... 1010111 @r2_vm
vfcvt_x_f_v 100010 . ..... 00001 001 ..... 1010111 @r2_vm
vfcvt_f_xu_v 100010 . ..... 00010 001 ..... 1010111 @r2_vm
vfcvt_f_x_v 100010 . ..... 00011 001 ..... 1010111 @r2_vm
vfwcvt_xu_f_v 100010 . ..... 01000 001 ..... 1010111 @r2_vm
vfwcvt_x_f_v 100010 . ..... 01001 001 ..... 1010111 @r2_vm
vfwcvt_f_xu_v 100010 . ..... 01010 001 ..... 1010111 @r2_vm
vfwcvt_f_x_v 100010 . ..... 01011 001 ..... 1010111 @r2_vm
vfwcvt_f_f_v 100010 . ..... 01100 001 ..... 1010111 @r2_vm
vfncvt_xu_f_v 100010 . ..... 10000 001 ..... 1010111 @r2_vm
vfncvt_x_f_v 100010 . ..... 10001 001 ..... 1010111 @r2_vm
vfncvt_f_xu_v 100010 . ..... 10010 001 ..... 1010111 @r2_vm
vfncvt_f_x_v 100010 . ..... 10011 001 ..... 1010111 @r2_vm
vfncvt_f_f_v 100010 . ..... 10100 001 ..... 1010111 @r2_vm
vredsum_vs 000000 . ..... ..... 010 ..... 1010111 @r_vm
vredand_vs 000001 . ..... ..... 010 ..... 1010111 @r_vm
vredor_vs 000010 . ..... ..... 010 ..... 1010111 @r_vm
vredxor_vs 000011 . ..... ..... 010 ..... 1010111 @r_vm
vredminu_vs 000100 . ..... ..... 010 ..... 1010111 @r_vm
vredmin_vs 000101 . ..... ..... 010 ..... 1010111 @r_vm
vredmaxu_vs 000110 . ..... ..... 010 ..... 1010111 @r_vm
vredmax_vs 000111 . ..... ..... 010 ..... 1010111 @r_vm
vwredsumu_vs 110000 . ..... ..... 000 ..... 1010111 @r_vm
vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm
# Vector ordered and unordered reduction sum
vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm
vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm
vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
# Vector widening ordered and unordered float reduction sum
vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
vmandnot_mm 011000 - ..... ..... 010 ..... 1010111 @r
vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r
vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r
vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r
vmornot_mm 011100 - ..... ..... 010 ..... 1010111 @r
vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r
vmpopc_m 010100 . ..... ----- 010 ..... 1010111 @r2_vm
vmfirst_m 010101 . ..... ----- 010 ..... 1010111 @r2_vm
vmsbf_m 010110 . ..... 00001 010 ..... 1010111 @r2_vm
vmsif_m 010110 . ..... 00011 010 ..... 1010111 @r2_vm
vmsof_m 010110 . ..... 00010 010 ..... 1010111 @r2_vm
viota_m 010110 . ..... 10000 010 ..... 1010111 @r2_vm
vid_v 010110 . 00000 10001 010 ..... 1010111 @r1_vm
vext_x_v 001100 1 ..... ..... 010 ..... 1010111 @r
vmv_s_x 001101 1 00000 ..... 110 ..... 1010111 @r2
vfmv_f_s 001100 1 ..... 00000 001 ..... 1010111 @r2rd
vfmv_s_f 001101 1 00000 ..... 101 ..... 1010111 @r2
vslideup_vx 001110 . ..... ..... 100 ..... 1010111 @r_vm
vslideup_vi 001110 . ..... ..... 011 ..... 1010111 @r_vm
vslide1up_vx 001110 . ..... ..... 110 ..... 1010111 @r_vm
vslidedown_vx 001111 . ..... ..... 100 ..... 1010111 @r_vm
vslidedown_vi 001111 . ..... ..... 011 ..... 1010111 @r_vm
vslide1down_vx 001111 . ..... ..... 110 ..... 1010111 @r_vm
vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm
vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm
vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm
vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r

File diff suppressed because it is too large Load diff

41
target/riscv/internals.h Normal file
View file

@ -0,0 +1,41 @@
/*
* QEMU RISC-V CPU -- internal functions and types
*
* Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef RISCV_CPU_INTERNALS_H
#define RISCV_CPU_INTERNALS_H
#include "hw/registerfields.h"
/* share data between vector helpers and decode code */
FIELD(VDATA, MLEN, 0, 8)
FIELD(VDATA, VM, 8, 1)
FIELD(VDATA, LMUL, 9, 2)
FIELD(VDATA, NF, 11, 4)
FIELD(VDATA, WD, 11, 1)
/* float point classify helpers */
target_ulong fclass_h(uint64_t frs1);
target_ulong fclass_s(uint64_t frs1);
target_ulong fclass_d(uint64_t frs1);
#define SEW8 0
#define SEW16 1
#define SEW32 2
#define SEW64 3
#endif

View file

@ -32,7 +32,7 @@
#include "instmap.h"
/* global register indices */
static TCGv cpu_gpr[32], cpu_pc;
static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
static TCGv load_res;
static TCGv load_val;
@ -56,6 +56,13 @@ typedef struct DisasContext {
to reset this known value. */
int frm;
bool ext_ifencei;
/* vector extension */
bool vill;
uint8_t lmul;
uint8_t sew;
uint16_t vlen;
uint16_t mlen;
bool vl_eq_vlmax;
} DisasContext;
#ifdef TARGET_RISCV64
@ -542,6 +549,11 @@ static void decode_RV32_64C(DisasContext *ctx, uint16_t opcode)
}
}
static int ex_plus_1(DisasContext *ctx, int nf)
{
return nf + 1;
}
#define EX_SH(amount) \
static int ex_shift_##amount(DisasContext *ctx, int imm) \
{ \
@ -712,6 +724,7 @@ static bool gen_shift(DisasContext *ctx, arg_r *a,
#include "insn_trans/trans_rvf.inc.c"
#include "insn_trans/trans_rvd.inc.c"
#include "insn_trans/trans_rvh.inc.c"
#include "insn_trans/trans_rvv.inc.c"
#include "insn_trans/trans_privileged.inc.c"
/* Include the auto-generated decoder for 16 bit insn */
@ -746,10 +759,11 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPURISCVState *env = cs->env_ptr;
RISCVCPU *cpu = RISCV_CPU(cs);
uint32_t tb_flags = ctx->base.tb->flags;
ctx->pc_succ_insn = ctx->base.pc_first;
ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK;
ctx->mstatus_fs = ctx->base.tb->flags & TB_FLAGS_MSTATUS_FS;
ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK;
ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
ctx->priv_ver = env->priv_ver;
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVH)) {
@ -773,6 +787,12 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->misa = env->misa;
ctx->frm = -1; /* unknown rounding mode */
ctx->ext_ifencei = cpu->cfg.ext_ifencei;
ctx->vlen = cpu->cfg.vlen;
ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul);
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
}
static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
@ -887,6 +907,7 @@ void riscv_translate_init(void)
}
cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
"load_res");
load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),

4899
target/riscv/vector_helper.c Normal file

File diff suppressed because it is too large Load diff