diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 9c1c2c1858..7f33a1b2b5 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1771,6 +1771,13 @@ static void ppc_spapr_init(MachineState *machine) spapr->vrma_adjust = 1; spapr->rma_size = MIN(spapr->rma_size, 0x10000000); } + + /* Actually we don't support unbounded RMA anymore since we + * added proper emulation of HV mode. The max we can get is + * 16G which also happens to be what we configure for PAPR + * mode so make sure we don't do anything bigger than that + */ + spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull); } if (spapr->rma_size > node0_size) { diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h index af73bced9f..2666a3f80d 100644 --- a/target-ppc/cpu.h +++ b/target-ppc/cpu.h @@ -1047,6 +1047,8 @@ struct CPUPPCState { uint64_t insns_flags2; #if defined(TARGET_PPC64) struct ppc_segment_page_sizes sps; + ppc_slb_t vrma_slb; + target_ulong rmls; bool ci_large_pages; #endif diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c index 7c1b169676..7f314442ca 100644 --- a/target-ppc/mmu-hash64.c +++ b/target-ppc/mmu-hash64.c @@ -681,11 +681,52 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, assert((rwx == 0) || (rwx == 1) || (rwx == 2)); + /* Note on LPCR usage: 970 uses HID4, but our special variant + * of store_spr copies relevant fields into env->spr[SPR_LPCR]. + * Similarily we filter unimplemented bits when storing into + * LPCR depending on the MMU version. This code can thus just + * use the LPCR "as-is". + */ + /* 1. Handle real mode accesses */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { - /* Translation is off */ - /* In real mode the top 4 effective address bits are ignored */ + /* Translation is supposedly "off" */ + /* In real mode the top 4 effective address bits are (mostly) ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; + + /* In HV mode, add HRMOR if top EA bit is clear */ + if (msr_hv || !env->has_hv_mode) { + if (!(eaddr >> 63)) { + raddr |= env->spr[SPR_HRMOR]; + } + } else { + /* Otherwise, check VPM for RMA vs VRMA */ + if (env->spr[SPR_LPCR] & LPCR_VPM0) { + slb = &env->vrma_slb; + if (slb->sps) { + goto skip_slb_search; + } + /* Not much else to do here */ + cs->exception_index = POWERPC_EXCP_MCHECK; + env->error_code = 0; + return 1; + } else if (raddr < env->rmls) { + /* RMA. Check bounds in RMLS */ + raddr |= env->spr[SPR_RMOR]; + } else { + /* The access failed, generate the approriate interrupt */ + if (rwx == 2) { + ppc_hash64_set_isi(cs, env, 0x08000000); + } else { + dsisr = 0x08000000; + if (rwx == 1) { + dsisr |= 0x02000000; + } + ppc_hash64_set_dsi(cs, env, eaddr, dsisr); + } + return 1; + } + } tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); @@ -694,7 +735,6 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, /* 2. Translation is on, so look up the SLB */ slb = slb_lookup(cpu, eaddr); - if (!slb) { if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISEG; @@ -707,6 +747,8 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, return 1; } +skip_slb_search: + /* 3. Check for segment level no-execute violation */ if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { ppc_hash64_set_isi(cs, env, 0x10000000); @@ -789,18 +831,37 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) { CPUPPCState *env = &cpu->env; ppc_slb_t *slb; - hwaddr pte_offset; + hwaddr pte_offset, raddr; ppc_hash_pte64_t pte; unsigned apshift; + /* Handle real mode */ if (msr_dr == 0) { /* In real mode the top 4 effective address bits are ignored */ - return addr & 0x0FFFFFFFFFFFFFFFULL; - } + raddr = addr & 0x0FFFFFFFFFFFFFFFULL; - slb = slb_lookup(cpu, addr); - if (!slb) { - return -1; + /* In HV mode, add HRMOR if top EA bit is clear */ + if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) { + return raddr | env->spr[SPR_HRMOR]; + } + + /* Otherwise, check VPM for RMA vs VRMA */ + if (env->spr[SPR_LPCR] & LPCR_VPM0) { + slb = &env->vrma_slb; + if (!slb->sps) { + return -1; + } + } else if (raddr < env->rmls) { + /* RMA. Check bounds in RMLS */ + return raddr | env->spr[SPR_RMOR]; + } else { + return -1; + } + } else { + slb = slb_lookup(cpu, addr); + if (!slb) { + return -1; + } } pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift); @@ -846,6 +907,90 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, tlb_flush(CPU(cpu), 1); } +void ppc_hash64_update_rmls(CPUPPCState *env) +{ + uint64_t lpcr = env->spr[SPR_LPCR]; + + /* + * This is the full 4 bits encoding of POWER8. Previous + * CPUs only support a subset of these but the filtering + * is done when writing LPCR + */ + switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) { + case 0x8: /* 32MB */ + env->rmls = 0x2000000ull; + break; + case 0x3: /* 64MB */ + env->rmls = 0x4000000ull; + break; + case 0x7: /* 128MB */ + env->rmls = 0x8000000ull; + break; + case 0x4: /* 256MB */ + env->rmls = 0x10000000ull; + break; + case 0x2: /* 1GB */ + env->rmls = 0x40000000ull; + break; + case 0x1: /* 16GB */ + env->rmls = 0x400000000ull; + break; + default: + /* What to do here ??? */ + env->rmls = 0; + } +} + +void ppc_hash64_update_vrma(CPUPPCState *env) +{ + const struct ppc_one_seg_page_size *sps = NULL; + target_ulong esid, vsid, lpcr; + ppc_slb_t *slb = &env->vrma_slb; + uint32_t vrmasd; + int i; + + /* First clear it */ + slb->esid = slb->vsid = 0; + slb->sps = NULL; + + /* Is VRMA enabled ? */ + lpcr = env->spr[SPR_LPCR]; + if (!(lpcr & LPCR_VPM0)) { + return; + } + + /* Make one up. Mostly ignore the ESID which will not be + * needed for translation + */ + vsid = SLB_VSID_VRMA; + vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; + vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP); + esid = SLB_ESID_V; + + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { + const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i]; + + if (!sps1->page_shift) { + break; + } + + if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { + sps = sps1; + break; + } + } + + if (!sps) { + error_report("Bad page size encoding esid 0x"TARGET_FMT_lx + " vsid 0x"TARGET_FMT_lx, esid, vsid); + return; + } + + slb->vsid = vsid; + slb->esid = esid; + slb->sps = sps; +} + void helper_store_lpcr(CPUPPCState *env, target_ulong val) { uint64_t lpcr = 0; @@ -901,4 +1046,6 @@ void helper_store_lpcr(CPUPPCState *env, target_ulong val) ; } env->spr[SPR_LPCR] = lpcr; + ppc_hash64_update_rmls(env); + ppc_hash64_update_vrma(env); } diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h index 154a306997..3a7476b30a 100644 --- a/target-ppc/mmu-hash64.h +++ b/target-ppc/mmu-hash64.h @@ -18,6 +18,8 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong pte0, target_ulong pte1); unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, uint64_t pte0, uint64_t pte1); +void ppc_hash64_update_vrma(CPUPPCState *env); +void ppc_hash64_update_rmls(CPUPPCState *env); #endif /* @@ -36,6 +38,7 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, #define SLB_VSID_B_256M 0x0000000000000000ULL #define SLB_VSID_B_1T 0x4000000000000000ULL #define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL +#define SLB_VSID_VRMA (0x0001FFFFFF000000ULL | SLB_VSID_B_1T) #define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID) #define SLB_VSID_KS 0x0000000000000800ULL #define SLB_VSID_KP 0x0000000000000400ULL diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c index a06bf50b65..8f257fb74a 100644 --- a/target-ppc/translate_init.c +++ b/target-ppc/translate_init.c @@ -8791,11 +8791,19 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu) /* Set emulated LPCR to not send interrupts to hypervisor. Note that * under KVM, the actual HW LPCR will be set differently by KVM itself, * the settings below ensure proper operations with TCG in absence of - * a real hypervisor + * a real hypervisor. + * + * Clearing VPM0 will also cause us to use RMOR in mmu-hash64.c for + * real mode accesses, which thankfully defaults to 0 and isn't + * accessible in guest mode. */ lpcr->default_value &= ~(LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV); lpcr->default_value |= LPCR_LPES0 | LPCR_LPES1; + /* Set RMLS to the max (ie, 16G) */ + lpcr->default_value &= ~LPCR_RMLS; + lpcr->default_value |= 1ull << LPCR_RMLS_SHIFT; + /* P7 and P8 has slightly different PECE bits, mostly because P8 adds * bit 47 and 48 which are reserved on P7. Here we set them all, which * will work as expected for both implementations @@ -8811,6 +8819,10 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu) /* Set a full AMOR so guest can use the AMR as it sees fit */ env->spr[SPR_AMOR] = amor->default_value = 0xffffffffffffffffull; + /* Update some env bits based on new LPCR value */ + ppc_hash64_update_rmls(env); + ppc_hash64_update_vrma(env); + /* Tell KVM that we're in PAPR mode */ if (kvm_enabled()) { kvmppc_set_papr(cpu);