Features for s390x/kvm

1. guest reIPL changes (Fan Zhang)
 Implements subcode 5 and 6 of diag 0x308. This allows to use
 /sys/firmware/[re]ipl/ccw/* and the chreipl and lsreipl tools in
 Linux. In addition to the normal "change the disk" this also
 allows to switch from booting an external kernel into rebooting
 from a disk.
 
 2. Memory page table walking (Thomas Huth)
 Fix several page table walking functions, used in several places
 like gdb server and instruction handling. Also use these functions
 in several I/O related functions.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJU5FeyAAoJEBF7vIC1phx84EkP/jtg2H2I9/WY2BcHcuDNQCzX
 FJdeQ06NVxNZD4kez4gqLx7ps4gy+IJpLGOsscuqfh4GakvoTh1Z/Y4gLD0n/6UC
 DyuX+IM56Cfzn3reIBig/A6s/VZFtNfIeoKYWL8UCA/f8lzzfKBaBjpUhijEsBQK
 SazqzCPqeSULCJnhJbpYMKvzG9GI+E4YbgIMN0//aoRX/ODbYUO/4QVKrXz5m7if
 MKOuyQyg6XjmD2+pVXlqfTh0wqF8a5e3tjjnAFc0+vpmqN1q4g13593qkhRWQGBT
 0pBTs2PsJ7q9+tplndTqm0a6NRnVcPPdp9YvDwTz8bN/f+DHpWiI1vxDp1SeoLEg
 uYCUzX5dMp2/QXe9xJqNeLOHJt/XSxw0n7ujZ+AkBtcjYBe8qI0C3DtnpG/Uu/sJ
 jNb5bNhwqfdLmtjFx9LhHy2vEuu6JxNeAGec8hdysv21Eg+mi6LkOBkHzm1zlaly
 RGSOUydukwm6UT+MIMBkcJLGUEPj4mTk/mvQ7VCKedG5Rs6pwdwLFPojWzhhZwLS
 zYuRf/bzJodORfchbUMAj+X46IvarOelK5hF6oNGlKl1xoGww0Mbc7XPosmxy6g8
 4UvlopNORnjZ05DA0qJM9i9Dar6JZ6V1qzKOUdt6FGrxTVZlByc79uaC8wiyZlth
 pLHxteJ6WsEf/gqI9KGu
 =lR0Q
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/borntraeger/tags/s390x-20150218' into staging

Features for s390x/kvm

1. guest reIPL changes (Fan Zhang)
Implements subcode 5 and 6 of diag 0x308. This allows to use
/sys/firmware/[re]ipl/ccw/* and the chreipl and lsreipl tools in
Linux. In addition to the normal "change the disk" this also
allows to switch from booting an external kernel into rebooting
from a disk.

2. Memory page table walking (Thomas Huth)
Fix several page table walking functions, used in several places
like gdb server and instruction handling. Also use these functions
in several I/O related functions.

# gpg: Signature made Wed Feb 18 09:13:22 2015 GMT using RSA key ID B5A61C7C
# gpg: Good signature from "Christian Borntraeger (IBM) <borntraeger@de.ibm.com>"

* remotes/borntraeger/tags/s390x-20150218: (29 commits)
  s390x/helper: Remove s390_cpu_physical_memory_map
  s390x/pci: Rework memory access in zpci instruction
  s390x/ioinst: Rework memory access in TPI instruction
  s390x/ioinst: Rework memory access in CHSC instruction
  s390x/ioinst: Rework memory access in STCRW instruction
  s390x/ioinst: Rework memory access in TSCH instruction
  s390x/ioinst: Set condition code in ioinst_handle_tsch() handler
  s390x/ioinst: Rework memory access in STSCH instruction
  s390x/ioinst: Rework memory access in SSCH instruction
  s390x/ioinst: Rework memory access in MSCH instruction
  s390x/css: Make schib parameter of css_do_msch const
  s390x/mmu: Add function for accessing guest memory
  s390x/kvm: Add function for injecting pgm access exceptions
  s390x/mmu: Clean up mmu_translate_asc()
  s390x/mmu: Check bit 52 in page table entry
  s390x/mmu: Renaming related to the ASCE confusion
  s390x/mmu: Add support for read-only regions
  s390x/mmu: Fix the exception codes for illegal table entries
  s390x/mmu: Fix exception types when checking the ASCEs
  s390x/mmu: Fix translation exception code in lowcore
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-02-26 09:08:54 +00:00
commit 11d39a1310
16 changed files with 923 additions and 556 deletions

View file

@ -584,7 +584,7 @@ static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
}
}
int css_do_msch(SubchDev *sch, SCHIB *orig_schib)
int css_do_msch(SubchDev *sch, const SCHIB *orig_schib)
{
SCSW *s = &sch->curr_status.scsw;
PMCW *p = &sch->curr_status.pmcw;
@ -801,7 +801,8 @@ out:
return ret;
}
static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw)
static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw,
int *irb_len)
{
int i;
uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
@ -815,6 +816,8 @@ static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw)
for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
dest->ecw[i] = cpu_to_be32(src->ecw[i]);
}
*irb_len = sizeof(*dest) - sizeof(dest->emw);
/* extended measurements enabled? */
if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
!(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
@ -832,26 +835,21 @@ static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw)
dest->emw[i] = cpu_to_be32(src->emw[i]);
}
}
*irb_len = sizeof(*dest);
}
int css_do_tsch(SubchDev *sch, IRB *target_irb)
int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
{
SCSW *s = &sch->curr_status.scsw;
PMCW *p = &sch->curr_status.pmcw;
uint16_t stctl;
uint16_t fctl;
uint16_t actl;
IRB irb;
int ret;
if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
ret = 3;
goto out;
return 3;
}
stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
/* Prepare the irb for the guest. */
memset(&irb, 0, sizeof(IRB));
@ -876,7 +874,22 @@ int css_do_tsch(SubchDev *sch, IRB *target_irb)
}
}
/* Store the irb to the guest. */
copy_irb_to_guest(target_irb, &irb, p);
copy_irb_to_guest(target_irb, &irb, p, irb_len);
return ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
}
void css_do_tsch_update_subch(SubchDev *sch)
{
SCSW *s = &sch->curr_status.scsw;
PMCW *p = &sch->curr_status.pmcw;
uint16_t stctl;
uint16_t fctl;
uint16_t actl;
stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
/* Clear conditions on subchannel, if applicable. */
if (stctl & SCSW_STCTL_STATUS_PEND) {
@ -913,11 +926,6 @@ int css_do_tsch(SubchDev *sch, IRB *target_irb)
memset(sch->sense_data, 0 , sizeof(sch->sense_data));
}
}
ret = ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
out:
return ret;
}
static void copy_crw_to_guest(CRW *dest, const CRW *src)
@ -947,6 +955,26 @@ int css_do_stcrw(CRW *crw)
return ret;
}
static void copy_crw_from_guest(CRW *dest, const CRW *src)
{
dest->flags = be16_to_cpu(src->flags);
dest->rsid = be16_to_cpu(src->rsid);
}
void css_undo_stcrw(CRW *crw)
{
CrwContainer *crw_cont;
crw_cont = g_try_malloc0(sizeof(CrwContainer));
if (!crw_cont) {
channel_subsys->crws_lost = true;
return;
}
copy_crw_from_guest(&crw_cont->crw, crw);
QTAILQ_INSERT_HEAD(&channel_subsys->pending_crws, crw_cont, sibling);
}
int css_do_tpi(IOIntCode *int_code, int lowcore)
{
/* No pending interrupts for !KVM. */

View file

@ -18,6 +18,7 @@
#include "hw/sysbus.h"
#include "hw/s390x/virtio-ccw.h"
#include "hw/s390x/css.h"
#include "ipl.h"
#define KERN_IMAGE_START 0x010000UL
#define KERN_PARM_AREA 0x010480UL
@ -50,14 +51,49 @@ typedef struct S390IPLState {
/*< private >*/
SysBusDevice parent_obj;
uint64_t start_addr;
uint64_t bios_start_addr;
bool enforce_bios;
IplParameterBlock iplb;
bool iplb_valid;
bool reipl_requested;
/*< public >*/
char *kernel;
char *initrd;
char *cmdline;
char *firmware;
uint8_t cssid;
uint8_t ssid;
uint16_t devno;
} S390IPLState;
static const VMStateDescription vmstate_iplb = {
.name = "ipl/iplb",
.version_id = 0,
.minimum_version_id = 0,
.fields = (VMStateField[]) {
VMSTATE_UINT8_ARRAY(reserved1, IplParameterBlock, 110),
VMSTATE_UINT16(devno, IplParameterBlock),
VMSTATE_UINT8_ARRAY(reserved2, IplParameterBlock, 88),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_ipl = {
.name = "ipl",
.version_id = 0,
.minimum_version_id = 0,
.fields = (VMStateField[]) {
VMSTATE_UINT64(start_addr, S390IPLState),
VMSTATE_UINT64(bios_start_addr, S390IPLState),
VMSTATE_STRUCT(iplb, S390IPLState, 0, vmstate_iplb, IplParameterBlock),
VMSTATE_BOOL(iplb_valid, S390IPLState),
VMSTATE_UINT8(cssid, S390IPLState),
VMSTATE_UINT8(ssid, S390IPLState),
VMSTATE_UINT16(devno, S390IPLState),
VMSTATE_END_OF_LIST()
}
};
static int s390_ipl_init(SysBusDevice *dev)
{
@ -65,11 +101,14 @@ static int s390_ipl_init(SysBusDevice *dev)
uint64_t pentry = KERN_IMAGE_START;
int kernel_size;
if (!ipl->kernel) {
int bios_size;
char *bios_filename;
int bios_size;
char *bios_filename;
/* Load zipl bootloader */
/*
* Always load the bios if it was enforced,
* even if an external kernel has been defined.
*/
if (!ipl->kernel || ipl->enforce_bios) {
if (bios_name == NULL) {
bios_name = ipl->firmware;
}
@ -79,12 +118,12 @@ static int s390_ipl_init(SysBusDevice *dev)
hw_error("could not find stage1 bootloader\n");
}
bios_size = load_elf(bios_filename, NULL, NULL, &ipl->start_addr, NULL,
NULL, 1, ELF_MACHINE, 0);
bios_size = load_elf(bios_filename, NULL, NULL, &ipl->bios_start_addr,
NULL, NULL, 1, ELF_MACHINE, 0);
if (bios_size < 0) {
bios_size = load_image_targphys(bios_filename, ZIPL_IMAGE_START,
4096);
ipl->start_addr = ZIPL_IMAGE_START;
ipl->bios_start_addr = ZIPL_IMAGE_START;
if (bios_size > 4096) {
hw_error("stage1 bootloader is > 4k\n");
}
@ -94,52 +133,59 @@ static int s390_ipl_init(SysBusDevice *dev)
if (bios_size == -1) {
hw_error("could not load bootloader '%s'\n", bios_name);
}
return 0;
/* default boot target is the bios */
ipl->start_addr = ipl->bios_start_addr;
}
kernel_size = load_elf(ipl->kernel, NULL, NULL, &pentry, NULL,
NULL, 1, ELF_MACHINE, 0);
if (kernel_size < 0) {
kernel_size = load_image_targphys(ipl->kernel, 0, ram_size);
}
if (kernel_size < 0) {
fprintf(stderr, "could not load kernel '%s'\n", ipl->kernel);
return -1;
}
/*
* Is it a Linux kernel (starting at 0x10000)? If yes, we fill in the
* kernel parameters here as well. Note: For old kernels (up to 3.2)
* we can not rely on the ELF entry point - it was 0x800 (the SALIPL
* loader) and it won't work. For this case we force it to 0x10000, too.
*/
if (pentry == KERN_IMAGE_START || pentry == 0x800) {
ipl->start_addr = KERN_IMAGE_START;
/* Overwrite parameters in the kernel image, which are "rom" */
strcpy(rom_ptr(KERN_PARM_AREA), ipl->cmdline);
} else {
ipl->start_addr = pentry;
}
if (ipl->initrd) {
ram_addr_t initrd_offset;
int initrd_size;
initrd_offset = INITRD_START;
while (kernel_size + 0x100000 > initrd_offset) {
initrd_offset += 0x100000;
if (ipl->kernel) {
kernel_size = load_elf(ipl->kernel, NULL, NULL, &pentry, NULL,
NULL, 1, ELF_MACHINE, 0);
if (kernel_size < 0) {
kernel_size = load_image_targphys(ipl->kernel, 0, ram_size);
}
initrd_size = load_image_targphys(ipl->initrd, initrd_offset,
ram_size - initrd_offset);
if (initrd_size == -1) {
fprintf(stderr, "qemu: could not load initrd '%s'\n", ipl->initrd);
exit(1);
if (kernel_size < 0) {
fprintf(stderr, "could not load kernel '%s'\n", ipl->kernel);
return -1;
}
/*
* Is it a Linux kernel (starting at 0x10000)? If yes, we fill in the
* kernel parameters here as well. Note: For old kernels (up to 3.2)
* we can not rely on the ELF entry point - it was 0x800 (the SALIPL
* loader) and it won't work. For this case we force it to 0x10000, too.
*/
if (pentry == KERN_IMAGE_START || pentry == 0x800) {
ipl->start_addr = KERN_IMAGE_START;
/* Overwrite parameters in the kernel image, which are "rom" */
strcpy(rom_ptr(KERN_PARM_AREA), ipl->cmdline);
} else {
ipl->start_addr = pentry;
}
/* we have to overwrite values in the kernel image, which are "rom" */
stq_p(rom_ptr(INITRD_PARM_START), initrd_offset);
stq_p(rom_ptr(INITRD_PARM_SIZE), initrd_size);
}
if (ipl->initrd) {
ram_addr_t initrd_offset;
int initrd_size;
initrd_offset = INITRD_START;
while (kernel_size + 0x100000 > initrd_offset) {
initrd_offset += 0x100000;
}
initrd_size = load_image_targphys(ipl->initrd, initrd_offset,
ram_size - initrd_offset);
if (initrd_size == -1) {
fprintf(stderr, "qemu: could not load initrd '%s'\n",
ipl->initrd);
exit(1);
}
/*
* we have to overwrite values in the kernel image,
* which are "rom"
*/
stq_p(rom_ptr(INITRD_PARM_START), initrd_offset);
stq_p(rom_ptr(INITRD_PARM_SIZE), initrd_size);
}
}
return 0;
}
@ -148,9 +194,82 @@ static Property s390_ipl_properties[] = {
DEFINE_PROP_STRING("initrd", S390IPLState, initrd),
DEFINE_PROP_STRING("cmdline", S390IPLState, cmdline),
DEFINE_PROP_STRING("firmware", S390IPLState, firmware),
DEFINE_PROP_BOOL("enforce_bios", S390IPLState, enforce_bios, false),
DEFINE_PROP_END_OF_LIST(),
};
/*
* In addition to updating the iplstate, this function returns:
* - 0 if system was ipled with external kernel
* - -1 if no valid boot device was found
* - ccw id of the boot device otherwise
*/
static uint64_t s390_update_iplstate(CPUS390XState *env, S390IPLState *ipl)
{
DeviceState *dev_st;
if (ipl->iplb_valid) {
ipl->cssid = 0;
ipl->ssid = 0;
ipl->devno = ipl->iplb.devno;
goto out;
}
if (ipl->kernel) {
return 0;
}
dev_st = get_boot_device(0);
if (dev_st) {
VirtioCcwDevice *ccw_dev = (VirtioCcwDevice *) object_dynamic_cast(
OBJECT(qdev_get_parent_bus(dev_st)->parent),
TYPE_VIRTIO_CCW_DEVICE);
if (ccw_dev) {
ipl->cssid = ccw_dev->sch->cssid;
ipl->ssid = ccw_dev->sch->ssid;
ipl->devno = ccw_dev->sch->devno;
goto out;
}
}
return -1;
out:
return ipl->cssid << 24 | ipl->ssid << 16 | ipl->devno;
}
int s390_ipl_update_diag308(IplParameterBlock *iplb)
{
S390IPLState *ipl;
ipl = S390_IPL(object_resolve_path(TYPE_S390_IPL, NULL));
if (ipl) {
ipl->iplb = *iplb;
ipl->iplb_valid = true;
return 0;
}
return -1;
}
IplParameterBlock *s390_ipl_get_iplb(void)
{
S390IPLState *ipl;
ipl = S390_IPL(object_resolve_path(TYPE_S390_IPL, NULL));
if (!ipl || !ipl->iplb_valid) {
return NULL;
}
return &ipl->iplb;
}
void s390_reipl_request(void)
{
S390IPLState *ipl;
ipl = S390_IPL(object_resolve_path(TYPE_S390_IPL, NULL));
ipl->reipl_requested = true;
qemu_system_reset_request();
}
static void s390_ipl_reset(DeviceState *dev)
{
S390IPLState *ipl = S390_IPL(dev);
@ -160,21 +279,14 @@ static void s390_ipl_reset(DeviceState *dev)
env->psw.addr = ipl->start_addr;
env->psw.mask = IPL_PSW_MASK;
if (!ipl->kernel) {
/* Tell firmware, if there is a preferred boot device */
env->regs[7] = -1;
DeviceState *dev_st = get_boot_device(0);
if (dev_st) {
VirtioCcwDevice *ccw_dev = (VirtioCcwDevice *) object_dynamic_cast(
OBJECT(qdev_get_parent_bus(dev_st)->parent),
TYPE_VIRTIO_CCW_DEVICE);
if (!ipl->reipl_requested) {
ipl->iplb_valid = false;
}
ipl->reipl_requested = false;
if (ccw_dev) {
env->regs[7] = ccw_dev->sch->cssid << 24 |
ccw_dev->sch->ssid << 16 |
ccw_dev->sch->devno;
}
}
if (!ipl->kernel || ipl->iplb_valid) {
env->psw.addr = ipl->bios_start_addr;
env->regs[7] = s390_update_iplstate(env, ipl);
}
s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
@ -188,6 +300,7 @@ static void s390_ipl_class_init(ObjectClass *klass, void *data)
k->init = s390_ipl_init;
dc->props = s390_ipl_properties;
dc->reset = s390_ipl_reset;
dc->vmsd = &vmstate_ipl;
}
static const TypeInfo s390_ipl_info = {

25
hw/s390x/ipl.h Normal file
View file

@ -0,0 +1,25 @@
/*
* s390 IPL device
*
* Copyright 2015 IBM Corp.
* Author(s): Zhang Fan <bjfanzh@cn.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
* directory.
*/
#ifndef HW_S390_IPL_H
#define HW_S390_IPL_H
typedef struct IplParameterBlock {
uint8_t reserved1[110];
uint16_t devno;
uint8_t reserved2[88];
} IplParameterBlock;
int s390_ipl_update_diag308(IplParameterBlock *iplb);
IplParameterBlock *s390_ipl_get_iplb(void);
void s390_reipl_request(void);
#endif

View file

@ -155,7 +155,9 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
return 0;
}
cpu_physical_memory_read(env->regs[r2], buffer, sizeof(*reqh));
if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer, sizeof(*reqh))) {
return 0;
}
reqh = (ClpReqHdr *)buffer;
req_len = lduw_p(&reqh->len);
if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
@ -163,7 +165,10 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
return 0;
}
cpu_physical_memory_read(env->regs[r2], buffer, req_len + sizeof(*resh));
if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer,
req_len + sizeof(*resh))) {
return 0;
}
resh = (ClpRspHdr *)(buffer + req_len);
res_len = lduw_p(&resh->len);
if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
@ -175,7 +180,10 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
return 0;
}
cpu_physical_memory_read(env->regs[r2], buffer, req_len + res_len);
if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer,
req_len + res_len)) {
return 0;
}
if (req_len != 32) {
stw_p(&resh->rsp, CLP_RC_LEN);
@ -269,7 +277,10 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
}
out:
cpu_physical_memory_write(env->regs[r2], buffer, req_len + res_len);
if (s390_cpu_virt_mem_write(cpu, env->regs[r2], buffer,
req_len + res_len)) {
return 0;
}
setcc(cpu, cc);
return 0;
}
@ -539,10 +550,10 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr)
S390PCIBusDevice *pbdev;
MemoryRegion *mr;
int i;
uint64_t val;
uint32_t fh;
uint8_t pcias;
uint8_t len;
uint8_t buffer[128];
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
@ -590,9 +601,12 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr)
return 0;
}
if (s390_cpu_virt_mem_read(cpu, gaddr, buffer, len)) {
return 0;
}
for (i = 0; i < len / 8; i++) {
val = ldq_phys(&address_space_memory, gaddr + i * 8);
io_mem_write(mr, env->regs[r3] + i * 8, val, 8);
io_mem_write(mr, env->regs[r3] + i * 8, ldq_p(buffer + i * 8), 8);
}
setcc(cpu, ZPCI_PCI_LS_OK);
@ -709,7 +723,9 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
return 0;
}
cpu_physical_memory_read(fiba, (uint8_t *)&fib, sizeof(fib));
if (s390_cpu_virt_mem_read(cpu, fiba, (uint8_t *)&fib, sizeof(fib))) {
return 0;
}
switch (oc) {
case ZPCI_MOD_FC_REG_INT:
@ -809,7 +825,10 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
fib.fc |= 0x10;
}
cpu_physical_memory_write(fiba, (uint8_t *)&fib, sizeof(fib));
if (s390_cpu_virt_mem_write(cpu, fiba, (uint8_t *)&fib, sizeof(fib))) {
return 0;
}
setcc(cpu, cc);
return 0;
}

View file

@ -126,7 +126,7 @@ static void ccw_init(MachineState *machine)
css_bus = virtual_css_bus_init();
s390_sclp_init();
s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
machine->initrd_filename, "s390-ccw.img");
machine->initrd_filename, "s390-ccw.img", true);
s390_flic_init();
dev = qdev_create(NULL, TYPE_S390_PCI_HOST_BRIDGE);

View file

@ -128,7 +128,8 @@ static void s390_virtio_register_hcalls(void)
void s390_init_ipl_dev(const char *kernel_filename,
const char *kernel_cmdline,
const char *initrd_filename,
const char *firmware)
const char *firmware,
bool enforce_bios)
{
DeviceState *dev;
@ -141,6 +142,9 @@ void s390_init_ipl_dev(const char *kernel_filename,
}
qdev_prop_set_string(dev, "cmdline", kernel_cmdline);
qdev_prop_set_string(dev, "firmware", firmware);
qdev_prop_set_bit(dev, "enforce_bios", enforce_bios);
object_property_add_child(qdev_get_machine(), "s390-ipl",
OBJECT(dev), NULL);
qdev_init_nofail(dev);
}
@ -221,7 +225,7 @@ static void s390_init(MachineState *machine)
s390_bus = s390_virtio_bus_init(&my_ram_size);
s390_sclp_init();
s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
machine->initrd_filename, ZIPL_FILENAME);
machine->initrd_filename, ZIPL_FILENAME, false);
s390_flic_init();
/* register hypercalls */

View file

@ -26,7 +26,8 @@ void s390_init_cpus(const char *cpu_model, uint8_t *storage_keys);
void s390_init_ipl_dev(const char *kernel_filename,
const char *kernel_cmdline,
const char *initrd_filename,
const char *firmware);
const char *firmware,
bool enforce_bios);
void s390_create_virtio_net(BusState *bus, const char *name);
void s390_nmi(NMIState *n, int cpu_index, Error **errp);
#endif

View file

@ -1,5 +1,5 @@
obj-y += translate.o helper.o cpu.o interrupt.o
obj-y += int_helper.o fpu_helper.o cc_helper.o mem_helper.o misc_helper.o
obj-y += gdbstub.o
obj-$(CONFIG_SOFTMMU) += machine.o ioinst.o arch_dump.o
obj-$(CONFIG_SOFTMMU) += machine.o ioinst.o arch_dump.o mmu_helper.o
obj-$(CONFIG_KVM) += kvm.o

View file

@ -287,6 +287,7 @@ typedef struct CPUS390XState {
#define FLAG_MASK_32 0x00001000
/* Control register 0 bits */
#define CR0_LOWPROT 0x0000000010000000ULL
#define CR0_EDAT 0x0000000000800000ULL
static inline int cpu_mmu_index (CPUS390XState *env)
@ -331,6 +332,7 @@ static inline int get_ilen(uint8_t opc)
to re-compute the length by examining the insn in memory. */
#define ILEN_LATER 0x20
#define ILEN_LATER_INC 0x21
void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen);
#endif
S390CPU *cpu_s390x_init(const char *cpu_model);
@ -348,10 +350,6 @@ int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#include "ioinst.h"
#ifndef CONFIG_USER_ONLY
void *s390_cpu_physical_memory_map(CPUS390XState *env, hwaddr addr, hwaddr *len,
int is_write);
void s390_cpu_physical_memory_unmap(CPUS390XState *env, void *addr, hwaddr len,
int is_write);
static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb)
{
hwaddr addr = 0;
@ -396,6 +394,7 @@ void kvm_s390_service_interrupt(uint32_t parm);
void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
#else
static inline void kvm_s390_virtio_irq(int config_change, uint64_t token)
{
@ -403,6 +402,10 @@ static inline void kvm_s390_virtio_irq(int config_change, uint64_t token)
static inline void kvm_s390_service_interrupt(uint32_t parm)
{
}
static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code,
uint64_t te_code)
{
}
#endif
S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
unsigned int s390_cpu_halt(S390CPU *cpu);
@ -443,13 +446,15 @@ bool css_subch_visible(SubchDev *sch);
void css_conditional_io_interrupt(SubchDev *sch);
int css_do_stsch(SubchDev *sch, SCHIB *schib);
bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid);
int css_do_msch(SubchDev *sch, SCHIB *schib);
int css_do_msch(SubchDev *sch, const SCHIB *schib);
int css_do_xsch(SubchDev *sch);
int css_do_csch(SubchDev *sch);
int css_do_hsch(SubchDev *sch);
int css_do_ssch(SubchDev *sch, ORB *orb);
int css_do_tsch(SubchDev *sch, IRB *irb);
int css_do_tsch_get_irb(SubchDev *sch, IRB *irb, int *irb_len);
void css_do_tsch_update_subch(SubchDev *sch);
int css_do_stcrw(CRW *crw);
void css_undo_stcrw(CRW *crw);
int css_do_tpi(IOIntCode *int_code, int lowcore);
int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
int rfmt, void *buf);
@ -836,6 +841,8 @@ struct sysib_322 {
#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
#define _REGION_ENTRY_ORIGIN ~0xfffULL /* region/segment table origin */
#define _REGION_ENTRY_RO 0x200 /* region/segment protection bit */
#define _REGION_ENTRY_TF 0xc0 /* region/segment table offset */
#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
@ -850,6 +857,7 @@ struct sysib_322 {
#define _PAGE_RO 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_RES0 0x800 /* bit must be zero */
#define SK_C (0x1 << 1)
#define SK_R (0x1 << 2)
@ -883,11 +891,21 @@ struct sysib_322 {
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags);
target_ulong *raddr, int *flags, bool exc);
int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
uint64_t vr);
int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, int len,
bool is_write);
#define s390_cpu_virt_mem_read(cpu, laddr, dest, len) \
s390_cpu_virt_mem_rw(cpu, laddr, dest, len, false)
#define s390_cpu_virt_mem_write(cpu, laddr, dest, len) \
s390_cpu_virt_mem_rw(cpu, laddr, dest, len, true)
#define s390_cpu_virt_mem_check_write(cpu, laddr, len) \
s390_cpu_virt_mem_rw(cpu, laddr, NULL, len, true)
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL

View file

@ -27,7 +27,6 @@
#endif
//#define DEBUG_S390
//#define DEBUG_S390_PTE
//#define DEBUG_S390_STDOUT
#ifdef DEBUG_S390
@ -44,12 +43,6 @@
do { } while (0)
#endif
#ifdef DEBUG_S390_PTE
#define PTE_DPRINTF DPRINTF
#else
#define PTE_DPRINTF(fmt, ...) \
do { } while (0)
#endif
#ifndef CONFIG_USER_ONLY
void s390x_tod_timer(void *opaque)
@ -105,8 +98,7 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
#else /* !CONFIG_USER_ONLY */
/* Ensure to exit the TB after this call! */
static void trigger_pgm_exception(CPUS390XState *env, uint32_t code,
uint32_t ilen)
void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
@ -115,319 +107,6 @@ static void trigger_pgm_exception(CPUS390XState *env, uint32_t code,
env->int_pgm_ilen = ilen;
}
static int trans_bits(CPUS390XState *env, uint64_t mode)
{
S390CPU *cpu = s390_env_get_cpu(env);
int bits = 0;
switch (mode) {
case PSW_ASC_PRIMARY:
bits = 1;
break;
case PSW_ASC_SECONDARY:
bits = 2;
break;
case PSW_ASC_HOME:
bits = 3;
break;
default:
cpu_abort(CPU(cpu), "unknown asc mode\n");
break;
}
return bits;
}
static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
uint64_t mode)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
int ilen = ILEN_LATER_INC;
int bits = trans_bits(env, mode) | 4;
DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
stq_phys(cs->as,
env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
trigger_pgm_exception(env, PGM_PROTECTION, ilen);
}
static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
uint32_t type, uint64_t asc, int rw)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
int ilen = ILEN_LATER;
int bits = trans_bits(env, asc);
/* Code accesses have an undefined ilc. */
if (rw == 2) {
ilen = 2;
}
DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
stq_phys(cs->as,
env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
trigger_pgm_exception(env, type, ilen);
}
/**
* Translate real address to absolute (= physical)
* address by taking care of the prefix mapping.
*/
static target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
{
if (raddr < 0x2000) {
return raddr + env->psa; /* Map the lowcore. */
} else if (raddr >= env->psa && raddr < env->psa + 0x2000) {
return raddr - env->psa; /* Map the 0 page. */
}
return raddr;
}
/* Decode page table entry (normal 4KB page) */
static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t asce,
target_ulong *raddr, int *flags, int rw)
{
if (asce & _PAGE_INVALID) {
DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, asce);
trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw);
return -1;
}
if (asce & _PAGE_RO) {
*flags &= ~PAGE_WRITE;
}
*raddr = asce & _ASCE_ORIGIN;
PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, asce);
return 0;
}
/* Decode EDAT1 segment frame absolute address (1MB page) */
static int mmu_translate_sfaa(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t asce, target_ulong *raddr,
int *flags, int rw)
{
if (asce & _SEGMENT_ENTRY_INV) {
DPRINTF("%s: SEG=0x%" PRIx64 " invalid\n", __func__, asce);
trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw);
return -1;
}
if (asce & _SEGMENT_ENTRY_RO) {
*flags &= ~PAGE_WRITE;
}
*raddr = (asce & 0xfffffffffff00000ULL) | (vaddr & 0xfffff);
PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, asce);
return 0;
}
static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t asce, int level,
target_ulong *raddr, int *flags, int rw)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t offs = 0;
uint64_t origin;
uint64_t new_asce;
PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, asce);
if (((level != _ASCE_TYPE_SEGMENT) && (asce & _REGION_ENTRY_INV)) ||
((level == _ASCE_TYPE_SEGMENT) && (asce & _SEGMENT_ENTRY_INV))) {
/* XXX different regions have different faults */
DPRINTF("%s: invalid region\n", __func__);
trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw);
return -1;
}
if ((level <= _ASCE_TYPE_MASK) && ((asce & _ASCE_TYPE_MASK) != level)) {
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
return -1;
}
if (asce & _ASCE_REAL_SPACE) {
/* direct mapping */
*raddr = vaddr;
return 0;
}
origin = asce & _ASCE_ORIGIN;
switch (level) {
case _ASCE_TYPE_REGION1 + 4:
offs = (vaddr >> 50) & 0x3ff8;
break;
case _ASCE_TYPE_REGION1:
offs = (vaddr >> 39) & 0x3ff8;
break;
case _ASCE_TYPE_REGION2:
offs = (vaddr >> 28) & 0x3ff8;
break;
case _ASCE_TYPE_REGION3:
offs = (vaddr >> 17) & 0x3ff8;
break;
case _ASCE_TYPE_SEGMENT:
offs = (vaddr >> 9) & 0x07f8;
origin = asce & _SEGMENT_ENTRY_ORIGIN;
break;
}
/* XXX region protection flags */
/* *flags &= ~PAGE_WRITE */
new_asce = ldq_phys(cs->as, origin + offs);
PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
__func__, origin, offs, new_asce);
if (level == _ASCE_TYPE_SEGMENT) {
/* 4KB page */
return mmu_translate_pte(env, vaddr, asc, new_asce, raddr, flags, rw);
} else if (level - 4 == _ASCE_TYPE_SEGMENT &&
(new_asce & _SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
/* 1MB page */
return mmu_translate_sfaa(env, vaddr, asc, new_asce, raddr, flags, rw);
} else {
/* yet another region */
return mmu_translate_asce(env, vaddr, asc, new_asce, level - 4, raddr,
flags, rw);
}
}
static int mmu_translate_asc(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, target_ulong *raddr, int *flags,
int rw)
{
uint64_t asce = 0;
int level, new_level;
int r;
switch (asc) {
case PSW_ASC_PRIMARY:
PTE_DPRINTF("%s: asc=primary\n", __func__);
asce = env->cregs[1];
break;
case PSW_ASC_SECONDARY:
PTE_DPRINTF("%s: asc=secondary\n", __func__);
asce = env->cregs[7];
break;
case PSW_ASC_HOME:
PTE_DPRINTF("%s: asc=home\n", __func__);
asce = env->cregs[13];
break;
}
switch (asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
break;
case _ASCE_TYPE_REGION2:
if (vaddr & 0xffe0000000000000ULL) {
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
" 0xffe0000000000000ULL\n", __func__, vaddr);
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
return -1;
}
break;
case _ASCE_TYPE_REGION3:
if (vaddr & 0xfffffc0000000000ULL) {
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
" 0xfffffc0000000000ULL\n", __func__, vaddr);
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
return -1;
}
break;
case _ASCE_TYPE_SEGMENT:
if (vaddr & 0xffffffff80000000ULL) {
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
" 0xffffffff80000000ULL\n", __func__, vaddr);
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
return -1;
}
break;
}
/* fake level above current */
level = asce & _ASCE_TYPE_MASK;
new_level = level + 4;
asce = (asce & ~_ASCE_TYPE_MASK) | (new_level & _ASCE_TYPE_MASK);
r = mmu_translate_asce(env, vaddr, asc, asce, new_level, raddr, flags, rw);
if ((rw == 1) && !(*flags & PAGE_WRITE)) {
trigger_prot_fault(env, vaddr, asc);
return -1;
}
return r;
}
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags)
{
int r = -1;
uint8_t *sk;
*flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
vaddr &= TARGET_PAGE_MASK;
if (!(env->psw.mask & PSW_MASK_DAT)) {
*raddr = vaddr;
r = 0;
goto out;
}
switch (asc) {
case PSW_ASC_PRIMARY:
case PSW_ASC_HOME:
r = mmu_translate_asc(env, vaddr, asc, raddr, flags, rw);
break;
case PSW_ASC_SECONDARY:
/*
* Instruction: Primary
* Data: Secondary
*/
if (rw == 2) {
r = mmu_translate_asc(env, vaddr, PSW_ASC_PRIMARY, raddr, flags,
rw);
*flags &= ~(PAGE_READ | PAGE_WRITE);
} else {
r = mmu_translate_asc(env, vaddr, PSW_ASC_SECONDARY, raddr, flags,
rw);
*flags &= ~(PAGE_EXEC);
}
break;
case PSW_ASC_ACCREG:
default:
hw_error("guest switched to unknown asc mode\n");
break;
}
out:
/* Convert real address -> absolute address */
*raddr = mmu_real2abs(env, *raddr);
if (*raddr <= ram_size) {
sk = &env->storage_keys[*raddr / TARGET_PAGE_SIZE];
if (*flags & PAGE_READ) {
*sk |= SK_R;
}
if (*flags & PAGE_WRITE) {
*sk |= SK_C;
}
}
return r;
}
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
int rw, int mmu_idx)
{
@ -448,7 +127,7 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
vaddr &= 0x7fffffff;
}
if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot)) {
if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
/* Translation ended in exception */
return 1;
}
@ -475,8 +154,7 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
target_ulong raddr;
int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
int old_exc = cs->exception_index;
int prot;
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
/* 31-Bit mode */
@ -484,8 +162,7 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
vaddr &= 0x7fffffff;
}
mmu_translate(env, vaddr, 2, asc, &raddr, &prot);
cs->exception_index = old_exc;
mmu_translate(env, vaddr, 2, asc, &raddr, &prot, false);
return raddr;
}
@ -552,31 +229,6 @@ static void cpu_unmap_lowcore(LowCore *lowcore)
cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
}
void *s390_cpu_physical_memory_map(CPUS390XState *env, hwaddr addr, hwaddr *len,
int is_write)
{
hwaddr start = addr;
/* Mind the prefix area. */
if (addr < 8192) {
/* Map the lowcore. */
start += env->psa;
*len = MIN(*len, 8192 - addr);
} else if ((addr >= env->psa) && (addr < env->psa + 8192)) {
/* Map the 0 page. */
start -= env->psa;
*len = MIN(*len, 8192 - start);
}
return cpu_physical_memory_map(start, len, is_write);
}
void s390_cpu_physical_memory_unmap(CPUS390XState *env, void *addr, hwaddr len,
int is_write)
{
cpu_physical_memory_unmap(addr, len, is_write, len);
}
static void do_svc_interrupt(CPUS390XState *env)
{
uint64_t mask, addr;

View file

@ -1,7 +1,7 @@
/*
* I/O instructions for S/390
*
* Copyright 2012 IBM Corp.
* Copyright 2012, 2015 IBM Corp.
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
@ -144,11 +144,10 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
{
int cssid, ssid, schid, m;
SubchDev *sch;
SCHIB *schib;
SCHIB schib;
uint64_t addr;
int ret = -ENODEV;
int cc;
hwaddr len = sizeof(*schib);
CPUS390XState *env = &cpu->env;
addr = decode_basedisp_s(env, ipb);
@ -156,20 +155,18 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
program_interrupt(env, PGM_SPECIFICATION, 2);
return;
}
schib = s390_cpu_physical_memory_map(env, addr, &len, 0);
if (!schib || len != sizeof(*schib)) {
program_interrupt(env, PGM_ADDRESSING, 2);
goto out;
if (s390_cpu_virt_mem_read(cpu, addr, &schib, sizeof(schib))) {
return;
}
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
!ioinst_schib_valid(schib)) {
!ioinst_schib_valid(&schib)) {
program_interrupt(env, PGM_OPERAND, 2);
goto out;
return;
}
trace_ioinst_sch_id("msch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
if (sch && css_subch_visible(sch)) {
ret = css_do_msch(sch, schib);
ret = css_do_msch(sch, &schib);
}
switch (ret) {
case -ENODEV:
@ -186,9 +183,6 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
break;
}
setcc(cpu, cc);
out:
s390_cpu_physical_memory_unmap(env, schib, len, 0);
}
static void copy_orb_from_guest(ORB *dest, const ORB *src)
@ -216,11 +210,10 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
{
int cssid, ssid, schid, m;
SubchDev *sch;
ORB *orig_orb, orb;
ORB orig_orb, orb;
uint64_t addr;
int ret = -ENODEV;
int cc;
hwaddr len = sizeof(*orig_orb);
CPUS390XState *env = &cpu->env;
addr = decode_basedisp_s(env, ipb);
@ -228,16 +221,14 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
program_interrupt(env, PGM_SPECIFICATION, 2);
return;
}
orig_orb = s390_cpu_physical_memory_map(env, addr, &len, 0);
if (!orig_orb || len != sizeof(*orig_orb)) {
program_interrupt(env, PGM_ADDRESSING, 2);
goto out;
if (s390_cpu_virt_mem_read(cpu, addr, &orig_orb, sizeof(orb))) {
return;
}
copy_orb_from_guest(&orb, orig_orb);
copy_orb_from_guest(&orb, &orig_orb);
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
!ioinst_orb_valid(&orb)) {
program_interrupt(env, PGM_OPERAND, 2);
goto out;
return;
}
trace_ioinst_sch_id("ssch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
@ -259,17 +250,13 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
break;
}
setcc(cpu, cc);
out:
s390_cpu_physical_memory_unmap(env, orig_orb, len, 0);
}
void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
{
CRW *crw;
CRW crw;
uint64_t addr;
int cc;
hwaddr len = sizeof(*crw);
CPUS390XState *env = &cpu->env;
addr = decode_basedisp_s(env, ipb);
@ -277,17 +264,16 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
program_interrupt(env, PGM_SPECIFICATION, 2);
return;
}
crw = s390_cpu_physical_memory_map(env, addr, &len, 1);
if (!crw || len != sizeof(*crw)) {
program_interrupt(env, PGM_ADDRESSING, 2);
goto out;
}
cc = css_do_stcrw(crw);
/* 0 - crw stored, 1 - zeroes stored */
setcc(cpu, cc);
out:
s390_cpu_physical_memory_unmap(env, crw, len, 1);
cc = css_do_stcrw(&crw);
/* 0 - crw stored, 1 - zeroes stored */
if (s390_cpu_virt_mem_write(cpu, addr, &crw, sizeof(crw)) == 0) {
setcc(cpu, cc);
} else if (cc == 0) {
/* Write failed: requeue CRW since STCRW is a suppressing instruction */
css_undo_stcrw(&crw);
}
}
void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
@ -296,8 +282,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
SubchDev *sch;
uint64_t addr;
int cc;
SCHIB *schib;
hwaddr len = sizeof(*schib);
SCHIB schib;
CPUS390XState *env = &cpu->env;
addr = decode_basedisp_s(env, ipb);
@ -305,21 +290,23 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
program_interrupt(env, PGM_SPECIFICATION, 2);
return;
}
schib = s390_cpu_physical_memory_map(env, addr, &len, 1);
if (!schib || len != sizeof(*schib)) {
program_interrupt(env, PGM_ADDRESSING, 2);
goto out;
}
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(env, PGM_OPERAND, 2);
goto out;
/*
* As operand exceptions have a lower priority than access exceptions,
* we check whether the memory area is writeable (injecting the
* access execption if it is not) first.
*/
if (!s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib))) {
program_interrupt(env, PGM_OPERAND, 2);
}
return;
}
trace_ioinst_sch_id("stsch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
if (sch) {
if (css_subch_visible(sch)) {
css_do_stsch(sch, schib);
css_do_stsch(sch, &schib);
cc = 0;
} else {
/* Indicate no more subchannels in this css/ss */
@ -330,25 +317,31 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
cc = 3; /* No more subchannels in this css/ss */
} else {
/* Store an empty schib. */
memset(schib, 0, sizeof(*schib));
memset(&schib, 0, sizeof(schib));
cc = 0;
}
}
if (cc != 3) {
if (s390_cpu_virt_mem_write(cpu, addr, &schib, sizeof(schib)) != 0) {
return;
}
} else {
/* Access exceptions have a higher priority than cc3 */
if (s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib)) != 0) {
return;
}
}
setcc(cpu, cc);
out:
s390_cpu_physical_memory_unmap(env, schib, len, 1);
}
int ioinst_handle_tsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb)
int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
{
CPUS390XState *env = &cpu->env;
int cssid, ssid, schid, m;
SubchDev *sch;
IRB *irb;
IRB irb;
uint64_t addr;
int ret = -ENODEV;
int cc;
hwaddr len = sizeof(*irb);
int cc, irb_len;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(env, PGM_OPERAND, 2);
@ -360,23 +353,29 @@ int ioinst_handle_tsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb)
program_interrupt(env, PGM_SPECIFICATION, 2);
return -EIO;
}
irb = s390_cpu_physical_memory_map(env, addr, &len, 1);
if (!irb || len != sizeof(*irb)) {
program_interrupt(env, PGM_ADDRESSING, 2);
cc = -EIO;
goto out;
}
sch = css_find_subch(m, cssid, ssid, schid);
if (sch && css_subch_visible(sch)) {
ret = css_do_tsch(sch, irb);
/* 0 - status pending, 1 - not status pending */
cc = ret;
cc = css_do_tsch_get_irb(sch, &irb, &irb_len);
} else {
cc = 3;
}
out:
s390_cpu_physical_memory_unmap(env, irb, sizeof(*irb), 1);
return cc;
/* 0 - status pending, 1 - not status pending, 3 - not operational */
if (cc != 3) {
if (s390_cpu_virt_mem_write(cpu, addr, &irb, irb_len) != 0) {
return -EFAULT;
}
css_do_tsch_update_subch(sch);
} else {
irb_len = sizeof(irb) - sizeof(irb.emw);
/* Access exceptions have a higher priority than cc3 */
if (s390_cpu_virt_mem_check_write(cpu, addr, irb_len) != 0) {
return -EFAULT;
}
}
setcc(cpu, cc);
return 0;
}
typedef struct ChscReq {
@ -630,8 +629,8 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
int reg;
uint16_t len;
uint16_t command;
hwaddr map_size = TARGET_PAGE_SIZE;
CPUS390XState *env = &cpu->env;
uint8_t buf[TARGET_PAGE_SIZE];
trace_ioinst("chsc");
reg = (ipb >> 20) & 0x00f;
@ -641,16 +640,20 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
program_interrupt(env, PGM_SPECIFICATION, 2);
return;
}
req = s390_cpu_physical_memory_map(env, addr, &map_size, 1);
if (!req || map_size != TARGET_PAGE_SIZE) {
program_interrupt(env, PGM_ADDRESSING, 2);
goto out;
/*
* Reading sizeof(ChscReq) bytes is currently enough for all of our
* present CHSC sub-handlers ... if we ever need more, we should take
* care of req->len here first.
*/
if (s390_cpu_virt_mem_read(cpu, addr, buf, sizeof(ChscReq))) {
return;
}
req = (ChscReq *)buf;
len = be16_to_cpu(req->len);
/* Length field valid? */
if ((len < 16) || (len > 4088) || (len & 7)) {
program_interrupt(env, PGM_OPERAND, 2);
goto out;
return;
}
memset((char *)req + len, 0, TARGET_PAGE_SIZE - len);
res = (void *)((char *)req + len);
@ -674,17 +677,18 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
break;
}
setcc(cpu, 0); /* Command execution complete */
out:
s390_cpu_physical_memory_unmap(env, req, map_size, 1);
if (!s390_cpu_virt_mem_write(cpu, addr + len, res, be16_to_cpu(res->len))) {
setcc(cpu, 0); /* Command execution complete */
}
}
int ioinst_handle_tpi(CPUS390XState *env, uint32_t ipb)
int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb)
{
CPUS390XState *env = &cpu->env;
uint64_t addr;
int lowcore;
IOIntCode *int_code;
hwaddr len, orig_len;
IOIntCode int_code;
hwaddr len;
int ret;
trace_ioinst("tpi");
@ -696,16 +700,10 @@ int ioinst_handle_tpi(CPUS390XState *env, uint32_t ipb)
lowcore = addr ? 0 : 1;
len = lowcore ? 8 /* two words */ : 12 /* three words */;
orig_len = len;
int_code = s390_cpu_physical_memory_map(env, addr, &len, 1);
if (!int_code || (len != orig_len)) {
program_interrupt(env, PGM_ADDRESSING, 2);
ret = -EIO;
goto out;
ret = css_do_tpi(&int_code, lowcore);
if (ret == 1) {
s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, &int_code, len);
}
ret = css_do_tpi(int_code, lowcore);
out:
s390_cpu_physical_memory_unmap(env, int_code, len, 1);
return ret;
}

View file

@ -234,9 +234,9 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb);
void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
int ioinst_handle_tsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb);
int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb);
int ioinst_handle_tpi(CPUS390XState *env, uint32_t ipb);
int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb);
void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
uint32_t ipb);
void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1);

View file

@ -42,6 +42,7 @@
#include "qapi-event.h"
#include "hw/s390x/s390-pci-inst.h"
#include "hw/s390x/s390-pci-bus.h"
#include "hw/s390x/ipl.h"
/* #define DEBUG_KVM */
@ -756,6 +757,18 @@ static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
kvm_s390_vcpu_interrupt(cpu, &irq);
}
void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_PROGRAM_INT,
.u.pgm.code = code,
.u.pgm.trans_exc_code = te_code,
.u.pgm.exc_access_id = te_code & 3,
};
kvm_s390_vcpu_interrupt(cpu, &irq);
}
static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
uint16_t ipbh0)
{
@ -1325,19 +1338,14 @@ static int handle_intercept(S390CPU *cpu)
static int handle_tsch(S390CPU *cpu)
{
CPUS390XState *env = &cpu->env;
CPUState *cs = CPU(cpu);
struct kvm_run *run = cs->kvm_run;
int ret;
cpu_synchronize_state(cs);
ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb);
if (ret >= 0) {
/* Success; set condition code. */
setcc(cpu, ret);
ret = 0;
} else if (ret < -1) {
ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
if (ret < 0) {
/*
* Failure.
* If an I/O interrupt had been dequeued, we have to reinject it.
@ -1397,7 +1405,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
ret = handle_intercept(cpu);
break;
case KVM_EXIT_S390_RESET:
qemu_system_reset_request();
s390_reipl_request();
break;
case KVM_EXIT_S390_TSCH:
ret = handle_tsch(cpu);

View file

@ -65,7 +65,7 @@ static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
int flags;
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags, true)) {
cpu_stb_data(env, dest, byte);
cpu_abort(CPU(cpu), "should never reach here");
}
@ -90,13 +90,13 @@ static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
int flags;
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags, true)) {
cpu_stb_data(env, dest, 0);
cpu_abort(CPU(cpu), "should never reach here");
}
dest_phys |= dest & ~TARGET_PAGE_MASK;
if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
if (mmu_translate(env, src, 0, asc, &src_phys, &flags, true)) {
cpu_ldub_data(env, src);
cpu_abort(CPU(cpu), "should never reach here");
}
@ -967,12 +967,12 @@ static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
cc = 3;
}
if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
if (mmu_translate(env, a1, 1, mode1, &dest, &flags, true)) {
cpu_loop_exit(CPU(s390_env_get_cpu(env)));
}
dest |= a1 & ~TARGET_PAGE_MASK;
if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
if (mmu_translate(env, a2, 0, mode2, &src, &flags, true)) {
cpu_loop_exit(CPU(s390_env_get_cpu(env)));
}
src |= a2 & ~TARGET_PAGE_MASK;
@ -1088,7 +1088,7 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
}
cs->exception_index = old_exc;
if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
cc = 3;
}
if (cs->exception_index == EXCP_PGM) {

View file

@ -25,6 +25,7 @@
#include <string.h>
#include "sysemu/kvm.h"
#include "qemu/timer.h"
#include "exec/address-spaces.h"
#ifdef CONFIG_KVM
#include <linux/kvm.h>
#endif
@ -34,6 +35,7 @@
#include "sysemu/cpus.h"
#include "sysemu/sysemu.h"
#include "hw/s390x/ebcdic.h"
#include "hw/s390x/ipl.h"
#endif
/* #define DEBUG_HELPER */
@ -151,12 +153,15 @@ static int load_normal_reset(S390CPU *cpu)
return 0;
}
#define DIAG_308_RC_OK 0x0001
#define DIAG_308_RC_NO_CONF 0x0102
#define DIAG_308_RC_INVALID 0x0402
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
{
uint64_t addr = env->regs[r1];
uint64_t subcode = env->regs[r3];
IplParameterBlock *iplb;
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, ILEN_LATER_INC);
@ -180,14 +185,38 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
return;
}
env->regs[r1+1] = DIAG_308_RC_INVALID;
if (!address_space_access_valid(&address_space_memory, addr,
sizeof(IplParameterBlock), false)) {
program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC);
return;
}
iplb = g_malloc0(sizeof(struct IplParameterBlock));
cpu_physical_memory_read(addr, iplb, sizeof(struct IplParameterBlock));
if (!s390_ipl_update_diag308(iplb)) {
env->regs[r1 + 1] = DIAG_308_RC_OK;
} else {
env->regs[r1 + 1] = DIAG_308_RC_INVALID;
}
g_free(iplb);
return;
case 6:
if ((r1 & 1) || (addr & 0x0fffULL)) {
program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
return;
}
env->regs[r1+1] = DIAG_308_RC_NO_CONF;
if (!address_space_access_valid(&address_space_memory, addr,
sizeof(IplParameterBlock), true)) {
program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC);
return;
}
iplb = s390_ipl_get_iplb();
if (iplb) {
cpu_physical_memory_write(addr, iplb,
sizeof(struct IplParameterBlock));
env->regs[r1 + 1] = DIAG_308_RC_OK;
} else {
env->regs[r1 + 1] = DIAG_308_RC_NO_CONF;
}
return;
default:
hw_error("Unhandled diag308 subcode %" PRIx64, subcode);

472
target-s390x/mmu_helper.c Normal file
View file

@ -0,0 +1,472 @@
/*
* S390x MMU related functions
*
* Copyright (c) 2011 Alexander Graf
* Copyright (c) 2015 Thomas Huth, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "qemu/error-report.h"
#include "exec/address-spaces.h"
#include "sysemu/kvm.h"
#include "cpu.h"
/* #define DEBUG_S390 */
/* #define DEBUG_S390_PTE */
/* #define DEBUG_S390_STDOUT */
#ifdef DEBUG_S390
#ifdef DEBUG_S390_STDOUT
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); \
qemu_log(fmt, ##__VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
#endif
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
#ifdef DEBUG_S390_PTE
#define PTE_DPRINTF DPRINTF
#else
#define PTE_DPRINTF(fmt, ...) \
do { } while (0)
#endif
/* Fetch/store bits in the translation exception code: */
#define FS_READ 0x800
#define FS_WRITE 0x400
static void trigger_access_exception(CPUS390XState *env, uint32_t type,
uint32_t ilen, uint64_t tec)
{
S390CPU *cpu = s390_env_get_cpu(env);
if (kvm_enabled()) {
kvm_s390_access_exception(cpu, type, tec);
} else {
CPUState *cs = CPU(cpu);
stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
trigger_pgm_exception(env, type, ilen);
}
}
static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, int rw, bool exc)
{
uint64_t tec;
tec = vaddr | (rw == 1 ? FS_WRITE : FS_READ) | 4 | asc >> 46;
DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
if (!exc) {
return;
}
trigger_access_exception(env, PGM_PROTECTION, ILEN_LATER_INC, tec);
}
static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
uint32_t type, uint64_t asc, int rw, bool exc)
{
int ilen = ILEN_LATER;
uint64_t tec;
tec = vaddr | (rw == 1 ? FS_WRITE : FS_READ) | asc >> 46;
DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
if (!exc) {
return;
}
/* Code accesses have an undefined ilc. */
if (rw == 2) {
ilen = 2;
}
trigger_access_exception(env, type, ilen, tec);
}
/**
* Translate real address to absolute (= physical)
* address by taking care of the prefix mapping.
*/
static target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
{
if (raddr < 0x2000) {
return raddr + env->psa; /* Map the lowcore. */
} else if (raddr >= env->psa && raddr < env->psa + 0x2000) {
return raddr - env->psa; /* Map the 0 page. */
}
return raddr;
}
/* Decode page table entry (normal 4KB page) */
static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t pt_entry,
target_ulong *raddr, int *flags, int rw, bool exc)
{
if (pt_entry & _PAGE_INVALID) {
DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
return -1;
}
if (pt_entry & _PAGE_RES0) {
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
return -1;
}
if (pt_entry & _PAGE_RO) {
*flags &= ~PAGE_WRITE;
}
*raddr = pt_entry & _ASCE_ORIGIN;
PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
return 0;
}
#define VADDR_PX 0xff000 /* Page index bits */
/* Decode segment table entry */
static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t st_entry,
target_ulong *raddr, int *flags, int rw,
bool exc)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t origin, offs, pt_entry;
if (st_entry & _SEGMENT_ENTRY_RO) {
*flags &= ~PAGE_WRITE;
}
if ((st_entry & _SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
/* Decode EDAT1 segment frame absolute address (1MB page) */
*raddr = (st_entry & 0xfffffffffff00000ULL) | (vaddr & 0xfffff);
PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, st_entry);
return 0;
}
/* Look up 4KB page entry */
origin = st_entry & _SEGMENT_ENTRY_ORIGIN;
offs = (vaddr & VADDR_PX) >> 9;
pt_entry = ldq_phys(cs->as, origin + offs);
PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
__func__, origin, offs, pt_entry);
return mmu_translate_pte(env, vaddr, asc, pt_entry, raddr, flags, rw, exc);
}
/* Decode region table entries */
static int mmu_translate_region(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t entry, int level,
target_ulong *raddr, int *flags, int rw,
bool exc)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t origin, offs, new_entry;
const int pchks[4] = {
PGM_SEGMENT_TRANS, PGM_REG_THIRD_TRANS,
PGM_REG_SEC_TRANS, PGM_REG_FIRST_TRANS
};
PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, entry);
origin = entry & _REGION_ENTRY_ORIGIN;
offs = (vaddr >> (17 + 11 * level / 4)) & 0x3ff8;
new_entry = ldq_phys(cs->as, origin + offs);
PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
__func__, origin, offs, new_entry);
if ((new_entry & _REGION_ENTRY_INV) != 0) {
DPRINTF("%s: invalid region\n", __func__);
trigger_page_fault(env, vaddr, pchks[level / 4], asc, rw, exc);
return -1;
}
if ((new_entry & _REGION_ENTRY_TYPE_MASK) != level) {
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
return -1;
}
if (level == _ASCE_TYPE_SEGMENT) {
return mmu_translate_segment(env, vaddr, asc, new_entry, raddr, flags,
rw, exc);
}
/* Check region table offset and length */
offs = (vaddr >> (28 + 11 * (level - 4) / 4)) & 3;
if (offs < ((new_entry & _REGION_ENTRY_TF) >> 6)
|| offs > (new_entry & _REGION_ENTRY_LENGTH)) {
DPRINTF("%s: invalid offset or len (%lx)\n", __func__, new_entry);
trigger_page_fault(env, vaddr, pchks[level / 4 - 1], asc, rw, exc);
return -1;
}
if ((env->cregs[0] & CR0_EDAT) && (new_entry & _REGION_ENTRY_RO)) {
*flags &= ~PAGE_WRITE;
}
/* yet another region */
return mmu_translate_region(env, vaddr, asc, new_entry, level - 4,
raddr, flags, rw, exc);
}
static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t asce, target_ulong *raddr,
int *flags, int rw, bool exc)
{
int level;
int r;
if (asce & _ASCE_REAL_SPACE) {
/* direct mapping */
*raddr = vaddr;
return 0;
}
level = asce & _ASCE_TYPE_MASK;
switch (level) {
case _ASCE_TYPE_REGION1:
if ((vaddr >> 62) > (asce & _ASCE_TABLE_LENGTH)) {
trigger_page_fault(env, vaddr, PGM_REG_FIRST_TRANS, asc, rw, exc);
return -1;
}
break;
case _ASCE_TYPE_REGION2:
if (vaddr & 0xffe0000000000000ULL) {
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
" 0xffe0000000000000ULL\n", __func__, vaddr);
trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
return -1;
}
if ((vaddr >> 51 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
trigger_page_fault(env, vaddr, PGM_REG_SEC_TRANS, asc, rw, exc);
return -1;
}
break;
case _ASCE_TYPE_REGION3:
if (vaddr & 0xfffffc0000000000ULL) {
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
" 0xfffffc0000000000ULL\n", __func__, vaddr);
trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
return -1;
}
if ((vaddr >> 40 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
trigger_page_fault(env, vaddr, PGM_REG_THIRD_TRANS, asc, rw, exc);
return -1;
}
break;
case _ASCE_TYPE_SEGMENT:
if (vaddr & 0xffffffff80000000ULL) {
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
" 0xffffffff80000000ULL\n", __func__, vaddr);
trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
return -1;
}
if ((vaddr >> 29 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw, exc);
return -1;
}
break;
}
r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw,
exc);
if ((rw == 1) && !(*flags & PAGE_WRITE)) {
trigger_prot_fault(env, vaddr, asc, rw, exc);
return -1;
}
return r;
}
/**
* Translate a virtual (logical) address into a physical (absolute) address.
* @param vaddr the virtual address
* @param rw 0 = read, 1 = write, 2 = code fetch
* @param asc address space control (one of the PSW_ASC_* modes)
* @param raddr the translated address is stored to this pointer
* @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer
* @param exc true = inject a program check if a fault occured
* @return 0 if the translation was successfull, -1 if a fault occured
*/
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags, bool exc)
{
int r = -1;
uint8_t *sk;
*flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
vaddr &= TARGET_PAGE_MASK;
if (!(env->psw.mask & PSW_MASK_DAT)) {
*raddr = vaddr;
r = 0;
goto out;
}
switch (asc) {
case PSW_ASC_PRIMARY:
PTE_DPRINTF("%s: asc=primary\n", __func__);
r = mmu_translate_asce(env, vaddr, asc, env->cregs[1], raddr, flags,
rw, exc);
break;
case PSW_ASC_HOME:
PTE_DPRINTF("%s: asc=home\n", __func__);
r = mmu_translate_asce(env, vaddr, asc, env->cregs[13], raddr, flags,
rw, exc);
break;
case PSW_ASC_SECONDARY:
PTE_DPRINTF("%s: asc=secondary\n", __func__);
/*
* Instruction: Primary
* Data: Secondary
*/
if (rw == 2) {
r = mmu_translate_asce(env, vaddr, PSW_ASC_PRIMARY, env->cregs[1],
raddr, flags, rw, exc);
*flags &= ~(PAGE_READ | PAGE_WRITE);
} else {
r = mmu_translate_asce(env, vaddr, PSW_ASC_SECONDARY, env->cregs[7],
raddr, flags, rw, exc);
*flags &= ~(PAGE_EXEC);
}
break;
case PSW_ASC_ACCREG:
default:
hw_error("guest switched to unknown asc mode\n");
break;
}
out:
/* Convert real address -> absolute address */
*raddr = mmu_real2abs(env, *raddr);
if (*raddr <= ram_size) {
sk = &env->storage_keys[*raddr / TARGET_PAGE_SIZE];
if (*flags & PAGE_READ) {
*sk |= SK_R;
}
if (*flags & PAGE_WRITE) {
*sk |= SK_C;
}
}
return r;
}
/**
* lowprot_enabled: Check whether low-address protection is enabled
*/
static bool lowprot_enabled(const CPUS390XState *env)
{
if (!(env->cregs[0] & CR0_LOWPROT)) {
return false;
}
if (!(env->psw.mask & PSW_MASK_DAT)) {
return true;
}
/* Check the private-space control bit */
switch (env->psw.mask & PSW_MASK_ASC) {
case PSW_ASC_PRIMARY:
return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
case PSW_ASC_SECONDARY:
return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
case PSW_ASC_HOME:
return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
default:
/* We don't support access register mode */
error_report("unsupported addressing mode");
exit(1);
}
}
/**
* translate_pages: Translate a set of consecutive logical page addresses
* to absolute addresses
*/
static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
target_ulong *pages, bool is_write)
{
bool lowprot = is_write && lowprot_enabled(&cpu->env);
uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC;
CPUS390XState *env = &cpu->env;
int ret, i, pflags;
for (i = 0; i < nr_pages; i++) {
/* Low-address protection? */
if (lowprot && (addr < 512 || (addr >= 4096 && addr < 4096 + 512))) {
trigger_access_exception(env, PGM_PROTECTION, ILEN_LATER_INC, 0);
return -EACCES;
}
ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, true);
if (ret) {
return ret;
}
if (!address_space_access_valid(&address_space_memory, pages[i],
TARGET_PAGE_SIZE, is_write)) {
program_interrupt(env, PGM_ADDRESSING, 0);
return -EFAULT;
}
addr += TARGET_PAGE_SIZE;
}
return 0;
}
/**
* s390_cpu_virt_mem_rw:
* @laddr: the logical start address
* @hostbuf: buffer in host memory. NULL = do only checks w/o copying
* @len: length that should be transfered
* @is_write: true = write, false = read
* Returns: 0 on success, non-zero if an exception occured
*
* Copy from/to guest memory using logical addresses. Note that we inject a
* program interrupt in case there is an error while accessing the memory.
*/
int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf,
int len, bool is_write)
{
int currlen, nr_pages, i;
target_ulong *pages;
int ret;
nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS)
+ 1;
pages = g_malloc(nr_pages * sizeof(*pages));
ret = translate_pages(cpu, laddr, nr_pages, pages, is_write);
if (ret == 0 && hostbuf != NULL) {
/* Copy data by stepping through the area page by page */
for (i = 0; i < nr_pages; i++) {
currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE));
cpu_physical_memory_rw(pages[i] | (laddr & ~TARGET_PAGE_MASK),
hostbuf, currlen, is_write);
laddr += currlen;
hostbuf += currlen;
len -= currlen;
}
}
g_free(pages);
return ret;
}