qemu-patch-raspberry4/target/ppc/power8-pmu-regs.c.inc
Daniel Henrique Barboza a6f91249e0 target/ppc: PMU: update counters on MMCR1 write
MMCR1 determines the events to be sampled by the PMU. Updating the
counters at every MMCR1 write ensures that we're not sampling more
or less events by looking only at MMCR0 and the PMCs.

It is worth noticing that both the Book3S PowerPC PMU, and this IBM
Power8+ PMU that we're modeling, also uses MMCRA, MMCR2 and MMCR3 to
control the PMU. These three registers aren't being handled in this
initial implementation, so for now we're controlling all the PMU
aspects using MMCR0, MMCR1 and the PMCs.

Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-5-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 17:57:18 +01:00

320 lines
7.9 KiB
C++

/*
* PMU register read/write functions for TCG IBM POWER chips
*
* Copyright IBM Corp. 2021
*
* Authors:
* Daniel Henrique Barboza <danielhb413@gmail.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
/*
* Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the
* PMCs) has problem state read access.
*
* Read acccess is granted for all PMCC values but 0b01, where a
* Facility Unavailable Interrupt will occur.
*/
static bool spr_groupA_read_allowed(DisasContext *ctx)
{
if (!ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
return false;
}
return true;
}
/*
* Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the
* PMCs) has problem state write access.
*
* Write acccess is granted for PMCC values 0b10 and 0b11. Userspace
* writing with PMCC 0b00 will generate a Hypervisor Emulation
* Assistance Interrupt. Userspace writing with PMCC 0b01 will
* generate a Facility Unavailable Interrupt.
*/
static bool spr_groupA_write_allowed(DisasContext *ctx)
{
if (ctx->mmcr0_pmcc0) {
return true;
}
if (ctx->mmcr0_pmcc1) {
/* PMCC = 0b01 */
gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
} else {
/* PMCC = 0b00 */
gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
}
return false;
}
/*
* Helper function to avoid code repetition between MMCR0 and
* MMCR2 problem state write functions.
*
* 'ret' must be tcg_temp_freed() by the caller.
*/
static TCGv masked_gprn_for_spr_write(int gprn, int sprn,
uint64_t spr_mask)
{
TCGv ret = tcg_temp_new();
TCGv t0 = tcg_temp_new();
/* 'ret' starts with all mask bits cleared */
gen_load_spr(ret, sprn);
tcg_gen_andi_tl(ret, ret, ~(spr_mask));
/* Apply the mask into 'gprn' in a temp var */
tcg_gen_andi_tl(t0, cpu_gpr[gprn], spr_mask);
/* Add the masked gprn bits into 'ret' */
tcg_gen_or_tl(ret, ret, t0);
tcg_temp_free(t0);
return ret;
}
void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn)
{
TCGv t0;
if (!spr_groupA_read_allowed(ctx)) {
return;
}
t0 = tcg_temp_new();
/*
* Filter out all bits but FC, PMAO, and PMAE, according
* to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0,
* fourth paragraph.
*/
gen_load_spr(t0, SPR_POWER_MMCR0);
tcg_gen_andi_tl(t0, t0, MMCR0_UREG_MASK);
tcg_gen_mov_tl(cpu_gpr[gprn], t0);
tcg_temp_free(t0);
}
static void write_MMCR0_common(DisasContext *ctx, TCGv val)
{
/*
* helper_store_mmcr0 will make clock based operations that
* will cause 'bad icount read' errors if we do not execute
* gen_icount_io_start() beforehand.
*/
gen_icount_io_start(ctx);
gen_helper_store_mmcr0(cpu_env, val);
}
void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn)
{
TCGv masked_gprn;
if (!spr_groupA_write_allowed(ctx)) {
return;
}
/*
* Filter out all bits but FC, PMAO, and PMAE, according
* to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0,
* fourth paragraph.
*/
masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR0,
MMCR0_UREG_MASK);
write_MMCR0_common(ctx, masked_gprn);
tcg_temp_free(masked_gprn);
}
void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn)
{
TCGv t0;
if (!spr_groupA_read_allowed(ctx)) {
return;
}
t0 = tcg_temp_new();
/*
* On read, filter out all bits that are not FCnP0 bits.
* When MMCR0[PMCC] is set to 0b10 or 0b11, providing
* problem state programs read/write access to MMCR2,
* only the FCnP0 bits can be accessed. All other bits are
* not changed when mtspr is executed in problem state, and
* all other bits return 0s when mfspr is executed in problem
* state, according to ISA v3.1, section 10.4.6 Monitor Mode
* Control Register 2, p. 1316, third paragraph.
*/
gen_load_spr(t0, SPR_POWER_MMCR2);
tcg_gen_andi_tl(t0, t0, MMCR2_UREG_MASK);
tcg_gen_mov_tl(cpu_gpr[gprn], t0);
tcg_temp_free(t0);
}
void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn)
{
TCGv masked_gprn;
if (!spr_groupA_write_allowed(ctx)) {
return;
}
/*
* Filter the bits that can be written using MMCR2_UREG_MASK,
* similar to what is done in spr_write_MMCR0_ureg().
*/
masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR2,
MMCR2_UREG_MASK);
gen_store_spr(SPR_POWER_MMCR2, masked_gprn);
tcg_temp_free(masked_gprn);
}
void spr_read_PMC(DisasContext *ctx, int gprn, int sprn)
{
TCGv_i32 t_sprn = tcg_const_i32(sprn);
gen_icount_io_start(ctx);
gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn);
tcg_temp_free_i32(t_sprn);
}
void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn)
{
if (!spr_groupA_read_allowed(ctx)) {
return;
}
spr_read_PMC(ctx, gprn, sprn + 0x10);
}
void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn)
{
/*
* If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance
* Monitor, and a read attempt results in a Facility Unavailable
* Interrupt.
*/
if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
return;
}
/* The remaining steps are similar to PMCs 1-4 userspace read */
spr_read_PMC14_ureg(ctx, gprn, sprn);
}
void spr_write_PMC(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t_sprn = tcg_const_i32(sprn);
gen_icount_io_start(ctx);
gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]);
tcg_temp_free_i32(t_sprn);
}
void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn)
{
if (!spr_groupA_write_allowed(ctx)) {
return;
}
spr_write_PMC(ctx, sprn + 0x10, gprn);
}
void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn)
{
/*
* If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance
* Monitor, and a write attempt results in a Facility Unavailable
* Interrupt.
*/
if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
return;
}
/* The remaining steps are similar to PMCs 1-4 userspace write */
spr_write_PMC14_ureg(ctx, sprn, gprn);
}
void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn)
{
write_MMCR0_common(ctx, cpu_gpr[gprn]);
}
void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn)
{
gen_icount_io_start(ctx);
gen_helper_store_mmcr1(cpu_env, cpu_gpr[gprn]);
}
#else
void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn)
{
spr_read_ureg(ctx, gprn, sprn);
}
void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn)
{
spr_noaccess(ctx, gprn, sprn);
}
void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn)
{
spr_read_ureg(ctx, gprn, sprn);
}
void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn)
{
spr_noaccess(ctx, gprn, sprn);
}
void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn)
{
spr_read_ureg(ctx, gprn, sprn);
}
void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn)
{
spr_read_ureg(ctx, gprn, sprn);
}
void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn)
{
spr_noaccess(ctx, gprn, sprn);
}
void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn)
{
spr_noaccess(ctx, gprn, sprn);
}
void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn)
{
spr_write_generic(ctx, sprn, gprn);
}
void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn)
{
spr_write_generic(ctx, sprn, gprn);
}
void spr_write_PMC(DisasContext *ctx, int sprn, int gprn)
{
spr_write_generic(ctx, sprn, gprn);
}
#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */