s390x changes for 2.12:

- Lots of tcg improvements: ccw hotplug is now working and we can run
   a Linux kernel built for z12 under tcg
 - zPCI improvements to get virtio-pci working
 - get rid of the cssid restrictions for virtual and non-virtual channel
   devices
 - we now support 8TB+ systems
 - 2.12 compat machine
 - fixes and cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJaM6p9AAoJEN7Pa5PG8C+vCp4P/RXSQhetDZYxCRQw68IlX6q3
 8yiYCL4vn/kaO5Ylb3+RkRFy9Wl/4JAiJLz8h0WoVSaxPIQ2nwp2l+muOFsPGVfy
 ysPYMvHvobX/Odnva6uWZOdQ0TmANUVLofN8d0SHfGiL2dflrvSb3Nj2y82dv4MM
 cbSiNRqvwMjfUrdZq2SK1KRjKx9jSFiqB9EnhQvJ4rBNIVneCA5ozfPSjQ5P9ZLL
 ZvdnFj6lIobrdIx4P4gFeOANH/gPtipiztVqVCshyPu0Ru8XnJFx48Wwz5qfK8YE
 UHojyg2z3o1ySb83EEO/cmsAgsnozT1bGxhJwfCNGxtppc3ONeoqm8RUQev12mP8
 Lxmn9UwK3m+tMsVMlsUMWa4tQ4f1T4f1eeumysbbkVFKNZHFuP2oY/ybelcqLZX/
 dbxwoOm0Db1Aa+EeCgJb5l7S/vQV3pYITs3JKA4NeBESsGGaYFhzk9FlDDJQDP5j
 bwh2VrNxF0o1HFNbuZQsGEBZdwCHOWXAoxsoXGlCuMAk/UJSVxiULTNtHX0t4Aba
 GsUuIfQx1m/JqvDYagMgq8qF9KxQlgBMofUVxWTEvCPclvJX3ku4rBzG5FVtNrpJ
 oVvQrk2JORKMiKgnjNuG2FLofsHS6yDoDCX7agrNOCyJ22caAwmLFnpxg4t97JXT
 KkBpwpt857plfkelqv0r
 =xSZX
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20171215-v2' into staging

s390x changes for 2.12:
- Lots of tcg improvements: ccw hotplug is now working and we can run
  a Linux kernel built for z12 under tcg
- zPCI improvements to get virtio-pci working
- get rid of the cssid restrictions for virtual and non-virtual channel
  devices
- we now support 8TB+ systems
- 2.12 compat machine
- fixes and cleanups

# gpg: Signature made Fri 15 Dec 2017 10:57:01 GMT
# gpg:                using RSA key 0xDECF6B93C6F02FAF
# gpg: Good signature from "Cornelia Huck <conny@cornelia-huck.de>"
# gpg:                 aka "Cornelia Huck <huckc@linux.vnet.ibm.com>"
# gpg:                 aka "Cornelia Huck <cornelia.huck@de.ibm.com>"
# gpg:                 aka "Cornelia Huck <cohuck@kernel.org>"
# gpg:                 aka "Cornelia Huck <cohuck@redhat.com>"
# Primary key fingerprint: C3D0 D66D C362 4FF6 A8C0  18CE DECF 6B93 C6F0 2FAF

* remotes/cohuck/tags/s390x-20171215-v2: (46 commits)
  s390-ccw-virtio: allow for systems larger that 7.999TB
  s390x: change the QEMU cpu model to a stripped down z12
  s390x/tcg: we already implement the Set-Program-Parameter facility
  s390x/tcg: implement extract-CPU-time facility
  s390x/tcg: Implement SIGNAL ADAPTER instruction
  s390x/tcg: Implement STORE CHANNEL PATH STATUS
  s390x/tcg: wire up SET CHANNEL MONITOR
  s390x/tcg: wire up SET ADDRESS LIMIT
  s390x/tcg: implement Interlocked-Access Facility 2
  s390x/tcg: ASI/ASGI/ALSI/ALSGI are atomic with Interlocked-acccess facility 1
  s390x/tcg: wire up STORE CHANNEL REPORT WORD
  s390x/tcg: indicate value of TODPR in STCKE
  s390x/tcg: implement SET CLOCK PROGRAMMABLE FIELD
  s390x/tcg: fix and cleanup mcck injection
  s390x/kvm: factor out build_channel_report_mcic() into cpu.h
  s390x/css: attach css bridge
  s390x: deprecate s390-squash-mcss machine prop
  s390x/css: unrestrict cssids
  s390x/pci: search for subregion inside the BARs
  s390x/pci: move the memory region write from pcistg
  ...

# Conflicts:
#	include/hw/compat.h

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2017-12-15 12:58:17 +00:00
commit a7cf5391a4
36 changed files with 928 additions and 547 deletions

View file

@ -104,7 +104,7 @@ static void emulated_ccw_3270_realize(DeviceState *ds, Error **errp)
SubchDev *sch;
Error *err = NULL;
sch = css_create_sch(cdev->devno, true, cbus->squash_mcss, errp);
sch = css_create_sch(cdev->devno, cbus->squash_mcss, errp);
if (!sch) {
return;
}

View file

@ -99,6 +99,8 @@ VirtualCssBus *virtual_css_bus_init(void)
/* Create bridge device */
dev = qdev_create(NULL, TYPE_VIRTUAL_CSS_BRIDGE);
object_property_add_child(qdev_get_machine(), TYPE_VIRTUAL_CSS_BRIDGE,
OBJECT(dev), NULL);
qdev_init_nofail(dev);
/* Create bus on bridge device */
@ -123,6 +125,11 @@ static Property virtual_css_bridge_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
static bool prop_get_true(Object *obj, Error **errp)
{
return true;
}
static void virtual_css_bridge_class_init(ObjectClass *klass, void *data)
{
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
@ -131,6 +138,12 @@ static void virtual_css_bridge_class_init(ObjectClass *klass, void *data)
hc->unplug = ccw_device_unplug;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->props = virtual_css_bridge_properties;
object_class_property_add_bool(klass, "cssid-unrestricted",
prop_get_true, NULL, NULL);
object_class_property_set_description(klass, "cssid-unrestricted",
"A css device can use any cssid, regardless whether virtual"
" or not (read only, always true)",
NULL);
}
static const TypeInfo virtual_css_bridge_info = {

View file

@ -1723,12 +1723,6 @@ void css_undo_stcrw(CRW *crw)
QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling);
}
int css_do_tpi(IOIntCode *int_code, int lowcore)
{
/* No pending interrupts for !KVM. */
return 0;
}
int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
int rfmt, void *buf)
{
@ -2370,21 +2364,11 @@ const PropertyInfo css_devid_ro_propinfo = {
.get = get_css_devid,
};
SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
Error **errp)
SubchDev *css_create_sch(CssDevId bus_id, bool squash_mcss, Error **errp)
{
uint16_t schid = 0;
SubchDev *sch;
if (bus_id.valid) {
if (is_virtual != (bus_id.cssid == VIRTUAL_CSSID)) {
error_setg(errp, "cssid %hhx not valid for %s devices",
bus_id.cssid,
(is_virtual ? "virtual" : "non-virtual"));
return NULL;
}
}
if (bus_id.valid) {
if (squash_mcss) {
bus_id.cssid = channel_subsys.default_cssid;
@ -2396,19 +2380,8 @@ SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
bus_id.devid, &schid, errp)) {
return NULL;
}
} else if (squash_mcss || is_virtual) {
bus_id.cssid = channel_subsys.default_cssid;
if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
&bus_id.devid, &schid, errp)) {
return NULL;
}
} else {
for (bus_id.cssid = 0; bus_id.cssid < MAX_CSSID; ++bus_id.cssid) {
if (bus_id.cssid == VIRTUAL_CSSID) {
continue;
}
for (bus_id.cssid = channel_subsys.default_cssid;;) {
if (!channel_subsys.css[bus_id.cssid]) {
css_create_css_image(bus_id.cssid, false);
}
@ -2418,7 +2391,8 @@ SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
NULL)) {
break;
}
if (bus_id.cssid == MAX_CSSID) {
bus_id.cssid = (bus_id.cssid + 1) % MAX_CSSID;
if (bus_id.cssid == channel_subsys.default_cssid) {
error_setg(errp, "Virtual channel subsystem is full!");
return NULL;
}

View file

@ -77,7 +77,7 @@ static void s390_ccw_realize(S390CCWDevice *cdev, char *sysfsdev, Error **errp)
goto out_err_propagate;
}
sch = css_create_sch(ccw_dev->devno, false, cbus->squash_mcss, &err);
sch = css_create_sch(ccw_dev->devno, cbus->squash_mcss, &err);
if (!sch) {
goto out_mdevid_free;
}

View file

@ -284,6 +284,7 @@ struct S390PCIBusDevice {
uint64_t fmb_addr;
uint8_t isc;
uint16_t noi;
uint16_t maxstbl;
uint8_t sum;
S390MsixInfo msix;
AdapterRoutes routes;

View file

@ -142,7 +142,7 @@ out:
return rc;
}
int clp_service_call(S390CPU *cpu, uint8_t r2)
int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
{
ClpReqHdr *reqh;
ClpRspHdr *resh;
@ -158,37 +158,40 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 4);
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
reqh = (ClpReqHdr *)buffer;
req_len = lduw_p(&reqh->len);
if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
req_len + sizeof(*resh))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
resh = (ClpRspHdr *)(buffer + req_len);
res_len = lduw_p(&resh->len);
if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
if ((req_len + res_len) > 8192) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
req_len + res_len)) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
@ -294,6 +297,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
stw_p(&resgrp->mui, 0);
stw_p(&resgrp->i, 128);
stw_p(&resgrp->maxstbl, 128);
resgrp->version = 0;
stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
@ -308,19 +312,78 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
out:
if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
req_len + res_len)) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
setcc(cpu, cc);
return 0;
}
int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
/**
* Swap data contained in s390x big endian registers to little endian
* PCI bars.
*
* @ptr: a pointer to a uint64_t data field
* @len: the length of the valid data, must be 1,2,4 or 8
*/
static int zpci_endian_swap(uint64_t *ptr, uint8_t len)
{
uint64_t data = *ptr;
switch (len) {
case 1:
break;
case 2:
data = bswap16(data);
break;
case 4:
data = bswap32(data);
break;
case 8:
data = bswap64(data);
break;
default:
return -EINVAL;
}
*ptr = data;
return 0;
}
static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset,
uint8_t len)
{
MemoryRegion *subregion;
uint64_t subregion_size;
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
subregion_size = int128_get64(subregion->size);
if ((offset >= subregion->addr) &&
(offset + len) <= (subregion->addr + subregion_size)) {
mr = subregion;
break;
}
}
return mr;
}
static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
uint64_t offset, uint64_t *data, uint8_t len)
{
MemoryRegion *mr;
mr = pbdev->pdev->io_regions[pcias].memory;
mr = s390_get_subregion(mr, offset, len);
offset -= mr->addr;
return memory_region_dispatch_read(mr, offset, data, len,
MEMTXATTRS_UNSPECIFIED);
}
int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
uint64_t offset;
uint64_t data;
MemoryRegion *mr;
MemTxResult result;
uint8_t len;
uint32_t fh;
@ -329,12 +392,12 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 4);
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
}
if (r2 & 0x1) {
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
}
@ -343,6 +406,11 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
len = env->regs[r2] & 0xf;
offset = env->regs[r2 + 1];
if (!(fh & FH_MASK_ENABLE)) {
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
if (!pbdev) {
DPRINTF("pcilg no pci dev\n");
@ -351,12 +419,7 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
}
switch (pbdev->state) {
case ZPCI_FS_RESERVED:
case ZPCI_FS_STANDBY:
case ZPCI_FS_DISABLED:
case ZPCI_FS_PERMANENT_ERROR:
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
case ZPCI_FS_ERROR:
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
@ -365,44 +428,33 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
break;
}
if (pcias < 6) {
if ((8 - (offset & 0x7)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
switch (pcias) {
case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
if (!len || (len > (8 - (offset & 0x7)))) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
mr = pbdev->pdev->io_regions[pcias].memory;
result = memory_region_dispatch_read(mr, offset, &data, len,
MEMTXATTRS_UNSPECIFIED);
result = zpci_read_bar(pbdev, pcias, offset, &data, len);
if (result != MEMTX_OK) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
} else if (pcias == 15) {
if ((4 - (offset & 0x3)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
break;
case ZPCI_CONFIG_BAR:
if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
data = pci_host_config_read_common(
pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
switch (len) {
case 1:
break;
case 2:
data = bswap16(data);
break;
case 4:
data = bswap32(data);
break;
case 8:
data = bswap64(data);
break;
default:
program_interrupt(env, PGM_OPERAND, 4);
if (zpci_endian_swap(&data, len)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
} else {
DPRINTF("invalid space\n");
break;
default:
DPRINTF("pcilg invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
return 0;
@ -413,24 +465,23 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
return 0;
}
static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
uint64_t offset, uint64_t data, uint8_t len)
{
if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
offset >= pbdev->msix.table_offset &&
offset < (pbdev->msix.table_offset +
pbdev->msix.entries * PCI_MSIX_ENTRY_SIZE)) {
return 1;
} else {
return 0;
}
MemoryRegion *mr;
mr = pbdev->pdev->io_regions[pcias].memory;
mr = s390_get_subregion(mr, offset, len);
offset -= mr->addr;
return memory_region_dispatch_write(mr, offset, data, len,
MEMTXATTRS_UNSPECIFIED);
}
int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
uint64_t offset, data;
S390PCIBusDevice *pbdev;
MemoryRegion *mr;
MemTxResult result;
uint8_t len;
uint32_t fh;
@ -439,12 +490,12 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 4);
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
}
if (r2 & 0x1) {
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
}
@ -452,6 +503,12 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
pcias = (env->regs[r2] >> 16) & 0xf;
len = env->regs[r2] & 0xf;
offset = env->regs[r2 + 1];
data = env->regs[r1];
if (!(fh & FH_MASK_ENABLE)) {
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
if (!pbdev) {
@ -461,12 +518,10 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
}
switch (pbdev->state) {
case ZPCI_FS_RESERVED:
case ZPCI_FS_STANDBY:
case ZPCI_FS_DISABLED:
/* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
* are already covered by the FH_MASK_ENABLE check above
*/
case ZPCI_FS_PERMANENT_ERROR:
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
case ZPCI_FS_ERROR:
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
@ -475,52 +530,37 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
break;
}
data = env->regs[r1];
if (pcias < 6) {
if ((8 - (offset & 0x7)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
switch (pcias) {
/* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
/* Check length:
* A length of 0 is invalid and length should not cross a double word
*/
if (!len || (len > (8 - (offset & 0x7)))) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
if (trap_msix(pbdev, offset, pcias)) {
offset = offset - pbdev->msix.table_offset;
mr = &pbdev->pdev->msix_table_mmio;
} else {
mr = pbdev->pdev->io_regions[pcias].memory;
}
result = memory_region_dispatch_write(mr, offset, data, len,
MEMTXATTRS_UNSPECIFIED);
result = zpci_write_bar(pbdev, pcias, offset, data, len);
if (result != MEMTX_OK) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
} else if (pcias == 15) {
if ((4 - (offset & 0x3)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
break;
case ZPCI_CONFIG_BAR:
/* ZPCI uses the pseudo BAR number 15 as configuration space */
/* possible access lengths are 1,2,4 and must not cross a word */
if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
switch (len) {
case 1:
break;
case 2:
data = bswap16(data);
break;
case 4:
data = bswap32(data);
break;
case 8:
data = bswap64(data);
break;
default:
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
/* len = 1,2,4 so we do not need to test */
zpci_endian_swap(&data, len);
pci_host_config_write_common(pbdev->pdev, offset,
pci_config_size(pbdev->pdev),
data, len);
} else {
break;
default:
DPRINTF("pcistg invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
@ -531,7 +571,7 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
return 0;
}
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
uint32_t fh;
@ -545,12 +585,12 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 4);
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
goto out;
}
if (r2 & 0x1) {
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
goto out;
}
@ -624,12 +664,13 @@ out:
}
int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
uint8_t ar)
uint8_t ar, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
MemoryRegion *mr;
MemTxResult result;
uint64_t offset;
int i;
uint32_t fh;
uint8_t pcias;
@ -637,29 +678,17 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
uint8_t buffer[128];
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
return 0;
}
fh = env->regs[r1] >> 32;
pcias = (env->regs[r1] >> 16) & 0xf;
len = env->regs[r1] & 0xff;
offset = env->regs[r3];
if (pcias > 5) {
DPRINTF("pcistb invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
return 0;
}
switch (len) {
case 16:
case 32:
case 64:
case 128:
break;
default:
program_interrupt(env, PGM_SPECIFICATION, 6);
if (!(fh & FH_MASK_ENABLE)) {
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
@ -671,12 +700,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
}
switch (pbdev->state) {
case ZPCI_FS_RESERVED:
case ZPCI_FS_STANDBY:
case ZPCI_FS_DISABLED:
case ZPCI_FS_PERMANENT_ERROR:
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
case ZPCI_FS_ERROR:
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
@ -685,28 +709,62 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
break;
}
if (pcias > ZPCI_IO_BAR_MAX) {
DPRINTF("pcistb invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
return 0;
}
/* Verify the address, offset and length */
/* offset must be a multiple of 8 */
if (offset % 8) {
goto specification_error;
}
/* Length must be greater than 8, a multiple of 8 */
/* and not greater than maxstbl */
if ((len <= 8) || (len % 8) || (len > pbdev->maxstbl)) {
goto specification_error;
}
/* Do not cross a 4K-byte boundary */
if (((offset & 0xfff) + len) > 0x1000) {
goto specification_error;
}
/* Guest address must be double word aligned */
if (gaddr & 0x07UL) {
goto specification_error;
}
mr = pbdev->pdev->io_regions[pcias].memory;
if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
program_interrupt(env, PGM_OPERAND, 6);
mr = s390_get_subregion(mr, offset, len);
offset -= mr->addr;
if (!memory_region_access_valid(mr, offset, len, true)) {
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
for (i = 0; i < len / 8; i++) {
result = memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
ldq_p(buffer + i * 8), 8,
MEMTXATTRS_UNSPECIFIED);
result = memory_region_dispatch_write(mr, offset + i * 8,
ldq_p(buffer + i * 8), 8,
MEMTXATTRS_UNSPECIFIED);
if (result != MEMTX_OK) {
program_interrupt(env, PGM_OPERAND, 6);
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return 0;
}
}
setcc(cpu, ZPCI_PCI_LS_OK);
return 0;
specification_error:
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
return 0;
}
static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
@ -767,7 +825,8 @@ int pci_dereg_irqs(S390PCIBusDevice *pbdev)
return 0;
}
static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib)
static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib,
uintptr_t ra)
{
uint64_t pba = ldq_p(&fib.pba);
uint64_t pal = ldq_p(&fib.pal);
@ -776,14 +835,14 @@ static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib)
uint8_t t = (g_iota >> 11) & 0x1;
if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
program_interrupt(env, PGM_OPERAND, 6);
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return -EINVAL;
}
/* currently we only support designation type 1 with translation */
if (!(dt == ZPCI_IOTA_RTTO && t)) {
error_report("unsupported ioat dt %d t %d", dt, t);
program_interrupt(env, PGM_OPERAND, 6);
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return -EINVAL;
}
@ -804,7 +863,8 @@ void pci_dereg_ioat(S390PCIIOMMU *iommu)
iommu->g_iota = 0;
}
int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
uint8_t oc, dmaas;
@ -814,7 +874,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
uint64_t cc = ZPCI_PCI_LS_OK;
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
return 0;
}
@ -823,7 +883,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
fh = env->regs[r1] >> 32;
if (fiba & 0x7) {
program_interrupt(env, PGM_SPECIFICATION, 6);
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
return 0;
}
@ -846,11 +906,12 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
}
if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
if (fib.fmt != 0) {
program_interrupt(env, PGM_OPERAND, 6);
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return 0;
}
@ -879,7 +940,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
} else if (pbdev->iommu->enabled) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
} else if (reg_ioat(env, pbdev->iommu, fib)) {
} else if (reg_ioat(env, pbdev->iommu, fib, ra)) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
}
@ -904,7 +965,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
} else {
pci_dereg_ioat(pbdev->iommu);
if (reg_ioat(env, pbdev->iommu, fib)) {
if (reg_ioat(env, pbdev->iommu, fib, ra)) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
}
@ -935,7 +996,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
break;
default:
program_interrupt(&cpu->env, PGM_OPERAND, 6);
s390_program_interrupt(&cpu->env, PGM_OPERAND, 6, ra);
cc = ZPCI_PCI_LS_ERR;
}
@ -943,7 +1004,8 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
return 0;
}
int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
uint8_t dmaas;
@ -954,7 +1016,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
uint64_t cc = ZPCI_PCI_LS_OK;
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
return 0;
}
@ -968,7 +1030,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
}
if (fiba & 0x7) {
program_interrupt(env, PGM_SPECIFICATION, 6);
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
return 0;
}
@ -1026,6 +1088,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
out:
if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}

View file

@ -162,7 +162,7 @@ typedef struct ClpRspQueryPciGrp {
#define CLP_RSP_QPCIG_MASK_FRAME 0x2
#define CLP_RSP_QPCIG_MASK_REFRESH 0x1
uint8_t fr;
uint16_t reserved2;
uint16_t maxstbl;
uint16_t mui;
uint64_t reserved3;
uint64_t dasm; /* dma address space mask */
@ -293,13 +293,19 @@ typedef struct ZpciFib {
int pci_dereg_irqs(S390PCIBusDevice *pbdev);
void pci_dereg_ioat(S390PCIIOMMU *iommu);
int clp_service_call(S390CPU *cpu, uint8_t r2);
int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra);
int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
uint8_t ar);
int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar);
int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar);
uint8_t ar, uintptr_t ra);
int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
uintptr_t ra);
int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
uintptr_t ra);
#define ZPCI_IO_BAR_MIN 0
#define ZPCI_IO_BAR_MAX 5
#define ZPCI_CONFIG_BAR 15
#endif

View file

@ -152,14 +152,38 @@ static void virtio_ccw_register_hcalls(void)
virtio_ccw_hcall_early_printk);
}
/*
* KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
* as the dirty bitmap must be managed by bitops that take an int as
* position indicator. If we have a guest beyond that we will split off
* new subregions. The split must happen on a segment boundary (1MB).
*/
#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1)
#define SEG_MSK (~0xfffffULL)
#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK)
static void s390_memory_init(ram_addr_t mem_size)
{
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
ram_addr_t chunk, offset = 0;
unsigned int number = 0;
gchar *name;
/* allocate RAM for core */
memory_region_allocate_system_memory(ram, NULL, "s390.ram", mem_size);
memory_region_add_subregion(sysmem, 0, ram);
name = g_strdup_printf("s390.ram");
while (mem_size) {
MemoryRegion *ram = g_new(MemoryRegion, 1);
uint64_t size = mem_size;
/* KVM does not allow memslots >= 8 TB */
chunk = MIN(size, KVM_SLOT_MAX_BYTES);
memory_region_allocate_system_memory(ram, NULL, name, chunk);
memory_region_add_subregion(sysmem, offset, ram);
mem_size -= chunk;
offset += chunk;
g_free(name);
name = g_strdup_printf("s390.ram.%u", ++number);
}
g_free(name);
/* Initialize storage key device */
s390_skeys_init();
@ -302,13 +326,17 @@ static void ccw_init(MachineState *machine)
/*
* Non mcss-e enabled guests only see the devices from the default
* css, which is determined by the value of the squash_mcss property.
* Note: we must not squash non virtual devices to css 0xFE.
*/
if (css_bus->squash_mcss) {
ret = css_create_css_image(0, true);
} else {
ret = css_create_css_image(VIRTUAL_CSSID, true);
}
if (qemu_opt_get(qemu_get_machine_opts(), "s390-squash-mcss")) {
warn_report("The machine property 's390-squash-mcss' is deprecated"
" (obsoleted by lifting the cssid restrictions).");
}
assert(ret == 0);
if (css_migration_enabled()) {
css_register_vmstate();
@ -583,7 +611,7 @@ static inline void s390_machine_initfn(Object *obj)
object_property_add_bool(obj, "s390-squash-mcss",
machine_get_squash_mcss,
machine_set_squash_mcss, NULL);
object_property_set_description(obj, "s390-squash-mcss",
object_property_set_description(obj, "s390-squash-mcss", "(deprecated) "
"enable/disable squashing subchannels into the default css",
NULL);
object_property_set_bool(obj, false, "s390-squash-mcss", NULL);
@ -639,6 +667,9 @@ bool css_migration_enabled(void)
} \
type_init(ccw_machine_register_##suffix)
#define CCW_COMPAT_2_11 \
HW_COMPAT_2_11
#define CCW_COMPAT_2_10 \
HW_COMPAT_2_10
@ -716,14 +747,30 @@ bool css_migration_enabled(void)
.value = "0",\
},
static void ccw_machine_2_12_instance_options(MachineState *machine)
{
}
static void ccw_machine_2_12_class_options(MachineClass *mc)
{
}
DEFINE_CCW_MACHINE(2_12, "2.12", true);
static void ccw_machine_2_11_instance_options(MachineState *machine)
{
static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V2_11 };
ccw_machine_2_12_instance_options(machine);
/* before 2.12 we emulated the very first z900 */
s390_set_qemu_cpu_model(0x2064, 7, 1, qemu_cpu_feat);
}
static void ccw_machine_2_11_class_options(MachineClass *mc)
{
ccw_machine_2_12_class_options(mc);
SET_MACHINE_COMPAT(mc, CCW_COMPAT_2_11);
}
DEFINE_CCW_MACHINE(2_11, "2.11", true);
DEFINE_CCW_MACHINE(2_11, "2.11", false);
static void ccw_machine_2_10_instance_options(MachineState *machine)
{

View file

@ -701,7 +701,7 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
SubchDev *sch;
Error *err = NULL;
sch = css_create_sch(ccw_dev->devno, true, cbus->squash_mcss, errp);
sch = css_create_sch(ccw_dev->devno, cbus->squash_mcss, errp);
if (!sch) {
return;
}

View file

@ -1,7 +1,8 @@
#ifndef HW_COMPAT_H
#define HW_COMPAT_H
#define HW_COMPAT_2_11
#define HW_COMPAT_2_11 \
/* empty */
#define HW_COMPAT_2_10 \
{\

View file

@ -248,7 +248,6 @@ int css_do_tsch_get_irb(SubchDev *sch, IRB *irb, int *irb_len);
void css_do_tsch_update_subch(SubchDev *sch);
int css_do_stcrw(CRW *crw);
void css_undo_stcrw(CRW *crw);
int css_do_tpi(IOIntCode *int_code, int lowcore);
int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
int rfmt, void *buf);
void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo);
@ -272,12 +271,9 @@ extern const PropertyInfo css_devid_ro_propinfo;
* default css image for it.
* If @p bus_id is valid, and @p squash_mcss is false, verify that it is
* not already in use, and find a free devno for it.
* If @p bus_id is not valid, and if either @p squash_mcss or @p is_virtual
* is true, find a free subchannel id and device number across all
* subchannel sets from the default css image.
* If @p bus_id is not valid, and if both @p squash_mcss and @p is_virtual
* are false, find a non-full css image and find a free subchannel id and
* device number across all subchannel sets from it.
* If @p bus_id is not valid find a free subchannel id and device number
* across all subchannel sets and all css images starting from the default
* css image.
*
* If either of the former actions succeed, allocate a subchannel structure,
* initialise it with the bus id, subchannel id and device number, register
@ -286,8 +282,7 @@ extern const PropertyInfo css_devid_ro_propinfo;
* The caller becomes owner of the returned subchannel structure and
* is responsible for unregistering and freeing it.
*/
SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
Error **errp);
SubchDev *css_create_sch(CssDevId bus_id, bool squash_mcss, Error **errp);
/** Turn on css migration */
void css_register_vmstate(void);

Binary file not shown.

View file

@ -3,7 +3,7 @@
* into the pc-bios directory of qemu.
*
* Copyright (c) 2013 Alexander Graf <agraf@suse.de>
* Copyright 2013 IBM Corp.
* Copyright IBM Corp. 2013, 2017
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
@ -13,8 +13,32 @@
.globl _start
_start:
larl %r15, stack + 0x8000 /* Set up stack */
j main /* And call C */
larl %r15, stack + 0x8000 /* Set up stack */
/* clear bss */
larl %r2, __bss_start
larl %r3, _end
slgr %r3, %r2 /* get sizeof bss */
ltgr %r3,%r3 /* bss emtpy? */
jz done
aghi %r3,-1
srlg %r4,%r3,8 /* how many 256 byte chunks? */
ltgr %r4,%r4
lgr %r1,%r2
jz remainder
loop:
xc 0(256,%r1),0(%r1)
la %r1,256(%r1)
brctg %r4,loop
remainder:
larl %r2,memsetxc
ex %r3,0(%r2)
done:
j main /* And call C */
memsetxc:
xc 0(1,%r1),0(%r1)
/*
* void disabled_wait(void)

View file

@ -2501,6 +2501,14 @@ enabled via the ``-machine usb=on'' argument.
The ``-nodefconfig`` argument is a synonym for ``-no-user-config``.
@subsection -machine s390-squash-mcss=on|off (since 2.12.0)
The ``s390-squash-mcss=on`` property has been obsoleted by allowing the
cssid to be chosen freely. Instead of squashing subchannels into the
default channel subsystem image for guests that do not support multiple
channel subsystems, all devices can be put into the default channel
subsystem image.
@section qemu-img command line arguments
@subsection convert -s (since 2.0.0)

View file

@ -43,7 +43,7 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
" suppress-vmdesc=on|off disables self-describing migration (default=off)\n"
" nvdimm=on|off controls NVDIMM support (default=off)\n"
" enforce-config-section=on|off enforce configuration section migration (default=off)\n"
" s390-squash-mcss=on|off controls support for squashing into default css (default=off)\n",
" s390-squash-mcss=on|off (deprecated) controls support for squashing into default css (default=off)\n",
QEMU_ARCH_ALL)
STEXI
@item -machine [type=]@var{name}[,prop=@var{value}[,...]]
@ -98,6 +98,12 @@ Enables or disables NVDIMM support. The default is off.
@item s390-squash-mcss=on|off
Enables or disables squashing subchannels into the default css.
The default is off.
NOTE: This property is deprecated and will be removed in future releases.
The ``s390-squash-mcss=on`` property has been obsoleted by allowing the
cssid to be chosen freely. Instead of squashing subchannels into the
default channel subsystem image for guests that do not support multiple
channel subsystems, all devices can be put into the default channel
subsystem image.
@item enforce-config-section=on|off
If @option{enforce-config-section} is set to @var{on}, force migration
code to send configuration section even if the machine-type sets the

View file

@ -564,7 +564,7 @@ void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
break;
default:
HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
program_interrupt(env, PGM_SPECIFICATION, 2);
s390_program_interrupt(env, PGM_SPECIFICATION, 2, GETPC());
break;
}
}

View file

@ -351,6 +351,9 @@ extern const struct VMStateDescription vmstate_s390_cpu;
#define CR0_CPU_TIMER_SC 0x0000000000000400ULL
#define CR0_SERVICE_SC 0x0000000000000200ULL
/* Control register 14 bits */
#define CR14_CHANNEL_REPORT_SC 0x0000000010000000ULL
/* MMU */
#define MMU_PRIMARY_IDX 0
#define MMU_SECONDARY_IDX 1
@ -674,6 +677,26 @@ struct sysib_322 {
#define MCIC_VB_CT 0x0000000000020000ULL
#define MCIC_VB_CC 0x0000000000010000ULL
static inline uint64_t s390_build_validity_mcic(void)
{
uint64_t mcic;
/*
* Indicate all validity bits (no damage) only. Other bits have to be
* added by the caller. (storage errors, subclasses and subclass modifiers)
*/
mcic = MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP |
MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR |
MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC;
if (s390_has_feat(S390_FEAT_VECTOR)) {
mcic |= MCIC_VB_VR;
}
if (s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
mcic |= MCIC_VB_GS;
}
return mcic;
}
/* cpu.c */
int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low);
@ -699,6 +722,9 @@ static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
/* cpu_models.c */
void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
#define cpu_list s390_cpu_list
void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga,
const S390FeatInit feat_init);
/* helper.c */
#define cpu_init(cpu_model) cpu_generic_init(TYPE_S390_CPU, cpu_model)
@ -719,7 +745,9 @@ void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
uint32_t io_int_parm, uint32_t io_int_word);
/* automatically detect the instruction length */
#define ILEN_AUTO 0xff
void program_interrupt(CPUS390XState *env, uint32_t code, int ilen);
#define RA_IGNORED 0
void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen,
uintptr_t ra);
/* service interrupts are floating therefore we must not pass an cpustate */
void s390_sclp_extint(uint32_t parm);
@ -733,6 +761,7 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra);
/* sigp.c */

View file

@ -15,7 +15,6 @@
#include "internal.h"
#include "kvm_s390x.h"
#include "sysemu/kvm.h"
#include "gen-features.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/error-report.h"
@ -81,6 +80,12 @@ static S390CPUDef s390_cpu_defs[] = {
CPUDEF_INIT(0x3906, 14, 1, 47, 0x08000000U, "z14", "IBM z14 GA1"),
};
#define QEMU_MAX_CPU_TYPE 0x2827
#define QEMU_MAX_CPU_GEN 12
#define QEMU_MAX_CPU_EC_GA 2
static const S390FeatInit qemu_max_cpu_feat_init = { S390_FEAT_LIST_QEMU_MAX };
static S390FeatBitmap qemu_max_cpu_feat;
/* features part of a base model but not relevant for finding a base model */
S390FeatBitmap ignored_base_feat;
@ -812,48 +817,6 @@ static void check_compatibility(const S390CPUModel *max_model,
"available in the configuration: ");
}
/**
* The base TCG CPU model "qemu" is based on the z900. However, we already
* can also emulate some additional features of later CPU generations, so
* we add these additional feature bits here.
*/
static void add_qemu_cpu_model_features(S390FeatBitmap fbm)
{
static const int feats[] = {
S390_FEAT_DAT_ENH,
S390_FEAT_IDTE_SEGMENT,
S390_FEAT_STFLE,
S390_FEAT_SENSE_RUNNING_STATUS,
S390_FEAT_EXTENDED_IMMEDIATE,
S390_FEAT_EXTENDED_TRANSLATION_2,
S390_FEAT_MSA,
S390_FEAT_EXTENDED_TRANSLATION_3,
S390_FEAT_LONG_DISPLACEMENT,
S390_FEAT_LONG_DISPLACEMENT_FAST,
S390_FEAT_ETF2_ENH,
S390_FEAT_STORE_CLOCK_FAST,
S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
S390_FEAT_ETF3_ENH,
S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2,
S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
S390_FEAT_EXECUTE_EXT,
S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
S390_FEAT_STFLE_45,
S390_FEAT_STFLE_49,
S390_FEAT_LOCAL_TLB_CLEARING,
S390_FEAT_STFLE_53,
S390_FEAT_MSA_EXT_5,
S390_FEAT_MSA_EXT_3,
S390_FEAT_MSA_EXT_4,
};
int i;
for (i = 0; i < ARRAY_SIZE(feats); i++) {
set_bit(feats[i], fbm);
}
}
static S390CPUModel *get_max_cpu_model(Error **errp)
{
static S390CPUModel max_model;
@ -866,12 +829,10 @@ static S390CPUModel *get_max_cpu_model(Error **errp)
if (kvm_enabled()) {
kvm_s390_get_host_cpu_model(&max_model, errp);
} else {
/* TCG emulates a z900 (with some optional additional features) */
max_model.def = &s390_cpu_defs[0];
bitmap_copy(max_model.features, max_model.def->default_feat,
S390_FEAT_MAX);
add_qemu_cpu_model_features(max_model.features);
}
max_model.def = s390_find_cpu_def(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN,
QEMU_MAX_CPU_EC_GA, NULL);
bitmap_copy(max_model.features, qemu_max_cpu_feat, S390_FEAT_MAX);
}
if (!*errp) {
cached = true;
return &max_model;
@ -1127,18 +1088,42 @@ static void s390_host_cpu_model_initfn(Object *obj)
}
#endif
static S390CPUDef s390_qemu_cpu_def;
static S390CPUModel s390_qemu_cpu_model;
/* Set the qemu CPU model (on machine initialization). Must not be called
* once CPUs have been created.
*/
void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga,
const S390FeatInit feat_init)
{
const S390CPUDef *def = s390_find_cpu_def(type, gen, ec_ga, NULL);
g_assert(def);
g_assert(QTAILQ_EMPTY(&cpus));
/* TCG emulates some features that can usually not be enabled with
* the emulated machine generation. Make sure they can be enabled
* when using the QEMU model by adding them to full_feat. We have
* to copy the definition to do that.
*/
memcpy(&s390_qemu_cpu_def, def, sizeof(s390_qemu_cpu_def));
bitmap_or(s390_qemu_cpu_def.full_feat, s390_qemu_cpu_def.full_feat,
qemu_max_cpu_feat, S390_FEAT_MAX);
/* build the CPU model */
s390_qemu_cpu_model.def = &s390_qemu_cpu_def;
bitmap_zero(s390_qemu_cpu_model.features, S390_FEAT_MAX);
s390_init_feat_bitmap(feat_init, s390_qemu_cpu_model.features);
}
static void s390_qemu_cpu_model_initfn(Object *obj)
{
static S390CPUDef s390_qemu_cpu_defs;
S390CPU *cpu = S390_CPU(obj);
cpu->model = g_malloc0(sizeof(*cpu->model));
/* TCG emulates a z900 (with some optional additional features) */
memcpy(&s390_qemu_cpu_defs, &s390_cpu_defs[0], sizeof(s390_qemu_cpu_defs));
add_qemu_cpu_model_features(s390_qemu_cpu_defs.full_feat);
cpu->model->def = &s390_qemu_cpu_defs;
bitmap_copy(cpu->model->features, cpu->model->def->default_feat,
S390_FEAT_MAX);
/* copy the CPU model so we can modify it */
memcpy(cpu->model, &s390_qemu_cpu_model, sizeof(*cpu->model));
}
static void s390_cpu_model_finalize(Object *obj)
@ -1279,11 +1264,13 @@ static void init_ignored_base_feat(void)
static void register_types(void)
{
static const S390FeatInit qemu_latest_init = { S390_FEAT_LIST_QEMU_LATEST };
int i;
init_ignored_base_feat();
/* init all bitmaps from gnerated data initially */
s390_init_feat_bitmap(qemu_max_cpu_feat_init, qemu_max_cpu_feat);
for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
s390_init_feat_bitmap(s390_cpu_defs[i].base_init,
s390_cpu_defs[i].base_feat);
@ -1293,6 +1280,10 @@ static void register_types(void)
s390_cpu_defs[i].full_feat);
}
/* initialize the qemu model with latest definition */
s390_set_qemu_cpu_model(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN,
QEMU_MAX_CPU_EC_GA, qemu_latest_init);
for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
char *base_name = s390_base_cpu_type_name(s390_cpu_defs[i].name);
TypeInfo ti_base = {

View file

@ -14,6 +14,7 @@
#define TARGET_S390X_CPU_MODELS_H
#include "cpu_features.h"
#include "gen-features.h"
#include "qom/cpu.h"
/* static CPU definition */

View file

@ -23,7 +23,6 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
const uintptr_t ra = GETPC();
const uint8_t mod = env->regs[0] & 0x80ULL;
const uint8_t fc = env->regs[0] & 0x7fULL;
CPUState *cs = CPU(s390_env_get_cpu(env));
uint8_t subfunc[16] = { 0 };
uint64_t param_addr;
int i;
@ -35,8 +34,7 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
case S390_FEAT_TYPE_PCKMO:
case S390_FEAT_TYPE_PCC:
if (mod) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
}
break;
@ -44,8 +42,7 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
s390_get_feat_block(type, subfunc);
if (!test_be_bit(fc, subfunc)) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
}

View file

@ -99,19 +99,19 @@ int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
#define DIAG_308_RC_NO_CONF 0x0102
#define DIAG_308_RC_INVALID 0x0402
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
{
uint64_t addr = env->regs[r1];
uint64_t subcode = env->regs[r3];
IplParameterBlock *iplb;
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, ILEN_AUTO);
s390_program_interrupt(env, PGM_PRIVILEGED, ILEN_AUTO, ra);
return;
}
if ((subcode & ~0x0ffffULL) || (subcode > 6)) {
program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
return;
}
@ -136,12 +136,12 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
break;
case 5:
if ((r1 & 1) || (addr & 0x0fffULL)) {
program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
return;
}
if (!address_space_access_valid(&address_space_memory, addr,
sizeof(IplParameterBlock), false)) {
program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
return;
}
iplb = g_new0(IplParameterBlock, 1);
@ -165,12 +165,12 @@ out:
return;
case 6:
if ((r1 & 1) || (addr & 0x0fffULL)) {
program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
return;
}
if (!address_space_access_valid(&address_space_memory, addr,
sizeof(IplParameterBlock), true)) {
program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
return;
}
iplb = s390_ipl_get_iplb();

View file

@ -395,6 +395,9 @@ static void do_mchk_interrupt(CPUS390XState *env)
lowcore = cpu_map_lowcore(env);
/* we are always in z/Architecture mode */
lowcore->ar_access_id = 1;
for (i = 0; i < 16; i++) {
lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
@ -404,13 +407,10 @@ static void do_mchk_interrupt(CPUS390XState *env)
lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
lowcore->mcic = cpu_to_be64(s390_build_validity_mcic() | MCIC_SC_CP);
lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
@ -554,10 +554,7 @@ void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
if (retaddr) {
cpu_restore_state(cs, retaddr);
}
program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
}
#endif /* CONFIG_USER_ONLY */

View file

@ -44,7 +44,7 @@ static void ieee_exception(CPUS390XState *env, uint32_t dxc, uintptr_t retaddr)
/* Install the DXC code. */
env->fpc = (env->fpc & ~0xff00) | (dxc << 8);
/* Trap. */
runtime_exception(env, PGM_DATA, retaddr);
s390_program_interrupt(env, PGM_DATA, ILEN_AUTO, retaddr);
}
/* Should be called after any operation that may raise IEEE exceptions. */

View file

@ -536,6 +536,52 @@ static uint16_t default_GEN14_GA1[] = {
S390_FEAT_GROUP_MSA_EXT_8,
};
/* QEMU (CPU model) features */
static uint16_t qemu_V2_11[] = {
S390_FEAT_GROUP_PLO,
S390_FEAT_ESAN3,
S390_FEAT_ZARCH,
};
static uint16_t qemu_LATEST[] = {
S390_FEAT_DAT_ENH,
S390_FEAT_IDTE_SEGMENT,
S390_FEAT_STFLE,
S390_FEAT_SENSE_RUNNING_STATUS,
S390_FEAT_EXTENDED_TRANSLATION_2,
S390_FEAT_MSA,
S390_FEAT_LONG_DISPLACEMENT,
S390_FEAT_LONG_DISPLACEMENT_FAST,
S390_FEAT_EXTENDED_IMMEDIATE,
S390_FEAT_EXTENDED_TRANSLATION_3,
S390_FEAT_ETF2_ENH,
S390_FEAT_STORE_CLOCK_FAST,
S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
S390_FEAT_ETF3_ENH,
S390_FEAT_EXTRACT_CPU_TIME,
S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2,
S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
S390_FEAT_EXECUTE_EXT,
S390_FEAT_SET_PROGRAM_PARAMETERS,
S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
S390_FEAT_STFLE_45,
S390_FEAT_STFLE_49,
S390_FEAT_LOCAL_TLB_CLEARING,
S390_FEAT_INTERLOCKED_ACCESS_2,
S390_FEAT_MSA_EXT_4,
S390_FEAT_MSA_EXT_3,
};
/* add all new definitions before this point */
static uint16_t qemu_MAX[] = {
/* z13+ features */
S390_FEAT_STFLE_53,
/* generates a dependency warning, leave it out for now */
S390_FEAT_MSA_EXT_5,
};
/****** END FEATURE DEFS ******/
#define _YEARS "2016"
@ -627,6 +673,24 @@ static FeatGroupDefSpec FeatGroupDef[] = {
FEAT_GROUP_INITIALIZER(MSA_EXT_8),
};
#define QEMU_FEAT_INITIALIZER(_name) \
{ \
.name = "S390_FEAT_LIST_QEMU_" #_name, \
.bits = \
{ .data = qemu_##_name, \
.len = ARRAY_SIZE(qemu_##_name) }, \
}
/*******************************
* QEMU (CPU model) features
*******************************/
static FeatGroupDefSpec QemuFeatDef[] = {
QEMU_FEAT_INITIALIZER(V2_11),
QEMU_FEAT_INITIALIZER(LATEST),
QEMU_FEAT_INITIALIZER(MAX),
};
static void set_bits(uint64_t list[], BitSpec bits)
{
uint32_t i;
@ -684,6 +748,29 @@ static void print_feature_defs(void)
}
}
static void print_qemu_feature_defs(void)
{
uint64_t feat[S390_FEAT_MAX / 64 + 1] = {};
int i, j;
printf("\n/* QEMU (CPU model) feature list data */\n");
/* for now we assume that we only add new features */
for (i = 0; i < ARRAY_SIZE(QemuFeatDef); i++) {
set_bits(feat, QemuFeatDef[i].bits);
printf("#define %s\t", QemuFeatDef[i].name);
for (j = 0; j < ARRAY_SIZE(feat); j++) {
printf("0x%016"PRIx64"ULL", feat[j]);
if (j < ARRAY_SIZE(feat) - 1) {
printf(",");
} else {
printf("\n");
}
}
}
}
static void print_feature_group_defs(void)
{
int i, j;
@ -721,6 +808,7 @@ int main(int argc, char *argv[])
"#ifndef %s\n#define %s\n", __FILE__, _YEARS, _NAME_H, _NAME_H);
print_feature_defs();
print_feature_group_defs();
print_qemu_feature_defs();
printf("\n#endif\n");
return 0;
}

View file

@ -31,24 +31,6 @@
#include "sysemu/sysemu.h"
#endif
//#define DEBUG_S390
//#define DEBUG_S390_STDOUT
#ifdef DEBUG_S390
#ifdef DEBUG_S390_STDOUT
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); \
if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
#endif
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
#ifndef CONFIG_USER_ONLY
void s390x_tod_timer(void *opaque)
{

View file

@ -119,6 +119,7 @@ DEF_HELPER_4(cu24, i32, env, i32, i32, i32)
DEF_HELPER_4(cu41, i32, env, i32, i32, i32)
DEF_HELPER_4(cu42, i32, env, i32, i32, i32)
DEF_HELPER_5(msa, i32, env, i32, i32, i32, i32)
DEF_HELPER_FLAGS_1(stpt, TCG_CALL_NO_RWG, i64, env)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(servc, i32, env, i64, i64)
@ -127,9 +128,9 @@ DEF_HELPER_3(load_psw, noreturn, env, i64, i64)
DEF_HELPER_FLAGS_2(spx, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_1(stck, TCG_CALL_NO_RWG_SE, i64, env)
DEF_HELPER_FLAGS_2(sckc, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_2(sckpf, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_1(stckc, TCG_CALL_NO_RWG, i64, env)
DEF_HELPER_FLAGS_2(spt, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_1(stpt, TCG_CALL_NO_RWG, i64, env)
DEF_HELPER_4(stsi, i32, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(lctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
@ -164,7 +165,10 @@ DEF_HELPER_2(hsch, void, env, i64)
DEF_HELPER_3(msch, void, env, i64, i64)
DEF_HELPER_2(rchp, void, env, i64)
DEF_HELPER_2(rsch, void, env, i64)
DEF_HELPER_2(sal, void, env, i64)
DEF_HELPER_4(schm, void, env, i64, i64, i64)
DEF_HELPER_3(ssch, void, env, i64, i64)
DEF_HELPER_2(stcrw, void, env, i64)
DEF_HELPER_3(stsch, void, env, i64, i64)
DEF_HELPER_3(tsch, void, env, i64, i64)
DEF_HELPER_2(chsc, void, env, i64)

View file

@ -39,10 +39,10 @@
C(0xb9d8, AHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, adds32)
/* ADD IMMEDIATE */
C(0xc209, AFI, RIL_a, EI, r1, i2, new, r1_32, add, adds32)
C(0xeb6a, ASI, SIY, GIE, m1_32s, i2, new, m1_32, add, adds32)
D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL)
C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32)
C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64)
C(0xeb7a, AGSI, SIY, GIE, m1_64, i2, new, m1_64, add, adds64)
D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ)
C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64)
/* ADD IMMEDIATE HIGH */
C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32)
@ -70,9 +70,9 @@
C(0xc20b, ALFI, RIL_a, EI, r1, i2_32u, new, r1_32, add, addu32)
C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, add, addu64)
/* ADD LOGICAL WITH SIGNED IMMEDIATE */
C(0xeb6e, ALSI, SIY, GIE, m1_32u, i2, new, m1_32, add, addu32)
D(0xeb6e, ALSI, SIY, GIE, la1, i2, new, 0, asi, addu32, MO_TEUL)
C(0xecda, ALHSIK, RIE_d, DO, r3, i2, new, r1_32, add, addu32)
C(0xeb7e, ALGSI, SIY, GIE, m1_64, i2, new, m1_64, add, addu64)
D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asi, addu64, MO_TEQ)
C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, add, addu64)
/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */
C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, addu32)
@ -99,8 +99,8 @@
D(0xa505, NIHL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1020)
D(0xa506, NILH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1010)
D(0xa507, NILL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1000)
C(0x9400, NI, SI, Z, m1_8u, i2_8u, new, m1_8, and, nz64)
C(0xeb54, NIY, SIY, LD, m1_8u, i2_8u, new, m1_8, and, nz64)
D(0x9400, NI, SI, Z, la1, i2_8u, new, 0, ni, nz64, MO_UB)
D(0xeb54, NIY, SIY, LD, la1, i2_8u, new, 0, ni, nz64, MO_UB)
/* BRANCH AND SAVE */
C(0x0d00, BASR, RR_a, Z, 0, r2_nz, r1, 0, bas, 0)
@ -357,8 +357,8 @@
/* EXCLUSIVE OR IMMEDIATE */
D(0xc006, XIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2020)
D(0xc007, XILF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2000)
C(0x9700, XI, SI, Z, m1_8u, i2_8u, new, m1_8, xor, nz64)
C(0xeb57, XIY, SIY, LD, m1_8u, i2_8u, new, m1_8, xor, nz64)
D(0x9700, XI, SI, Z, la1, i2_8u, new, 0, xi, nz64, MO_UB)
D(0xeb57, XIY, SIY, LD, la1, i2_8u, new, 0, xi, nz64, MO_UB)
/* EXECUTE */
C(0x4400, EX, RX_a, Z, 0, a2, 0, 0, ex, 0)
@ -369,6 +369,8 @@
C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0)
/* EXTRACT CPU ATTRIBUTE */
C(0xeb4c, ECAG, RSY_a, GIE, 0, a2, r1, 0, ecag, 0)
/* EXTRACT CPU TIME */
C(0xc801, ECTG, SSF, ECT, 0, 0, 0, 0, ectg, 0)
/* EXTRACT FPC */
C(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0)
/* EXTRACT PSW */
@ -698,8 +700,8 @@
D(0xa509, OIHL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1020)
D(0xa50a, OILH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1010)
D(0xa50b, OILL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1000)
C(0x9600, OI, SI, Z, m1_8u, i2_8u, new, m1_8, or, nz64)
C(0xeb56, OIY, SIY, LD, m1_8u, i2_8u, new, m1_8, or, nz64)
D(0x9600, OI, SI, Z, la1, i2_8u, new, 0, oi, nz64, MO_UB)
D(0xeb56, OIY, SIY, LD, la1, i2_8u, new, 0, oi, nz64, MO_UB)
/* PACK */
/* Really format SS_b, but we pack both lengths into one argument
@ -999,6 +1001,8 @@
C(0xb204, SCK, S, Z, 0, 0, 0, 0, 0, 0)
/* SET CLOCK COMPARATOR */
C(0xb206, SCKC, S, Z, 0, m2_64, 0, 0, sckc, 0)
/* SET CLOCK PROGRAMMABLE FIELD */
C(0x0107, SCKPF, E, Z, 0, 0, 0, 0, sckpf, 0)
/* SET CPU TIMER */
C(0xb208, SPT, S, Z, 0, m2_64, 0, 0, spt, 0)
/* SET PREFIX */
@ -1052,7 +1056,12 @@
C(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0)
C(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0)
C(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0)
C(0xb237, SAL, S, Z, 0, 0, 0, 0, sal, 0)
C(0xb23c, SCHM, S, Z, 0, insn, 0, 0, schm, 0)
C(0xb274, SIGA, S, Z, 0, 0, 0, 0, siga, 0)
C(0xb23a, STCPS, S, Z, 0, 0, 0, 0, stcps, 0)
C(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0)
C(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0)
C(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0)
C(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0)
/* ??? Not listed in PoO ninth edition, but there's a linux driver that

View file

@ -39,7 +39,7 @@ int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64)
int64_t q;
if (b == 0) {
runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
ret = q = a / b;
@ -47,7 +47,7 @@ int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64)
/* Catch non-representable quotient. */
if (ret != q) {
runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
return ret;
@ -60,7 +60,7 @@ uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64)
uint64_t q;
if (b == 0) {
runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
ret = q = a / b;
@ -68,7 +68,7 @@ uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64)
/* Catch non-representable quotient. */
if (ret != q) {
runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
return ret;
@ -79,7 +79,7 @@ int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b)
{
/* Catch divide by zero, and non-representable quotient (MIN / -1). */
if (b == 0 || (b == -1 && a == (1ll << 63))) {
runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
env->retxl = a % b;
return a / b;
@ -92,7 +92,7 @@ uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
uint64_t ret;
/* Signal divide by zero. */
if (b == 0) {
runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
if (ah == 0) {
/* 64 -> 64/64 case */
@ -106,7 +106,7 @@ uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
env->retxl = a % b;
ret = q;
if (ret != q) {
runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
#else
S390CPU *cpu = s390_env_get_cpu(env);

View file

@ -43,7 +43,7 @@ typedef struct LowCore {
uint8_t pad3[0xc8 - 0xc4]; /* 0x0c4 */
uint32_t stfl_fac_list; /* 0x0c8 */
uint8_t pad4[0xe8 - 0xcc]; /* 0x0cc */
uint32_t mcck_interruption_code[2]; /* 0x0e8 */
uint64_t mcic; /* 0x0e8 */
uint8_t pad5[0xf4 - 0xf0]; /* 0x0f0 */
uint32_t external_damage_code; /* 0x0f4 */
uint64_t failing_storage_address; /* 0x0f8 */
@ -118,8 +118,8 @@ typedef struct LowCore {
uint32_t fpt_creg_save_area; /* 0x131c */
uint8_t pad16[0x1324 - 0x1320]; /* 0x1320 */
uint32_t tod_progreg_save_area; /* 0x1324 */
uint32_t cpu_timer_save_area[2]; /* 0x1328 */
uint32_t clock_comp_save_area[2]; /* 0x1330 */
uint64_t cpu_timer_save_area; /* 0x1328 */
uint64_t clock_comp_save_area; /* 0x1330 */
uint8_t pad17[0x1340 - 0x1338]; /* 0x1338 */
uint32_t access_regs_save_area[16]; /* 0x1340 */
uint64_t cregs_save_area[16]; /* 0x1380 */
@ -379,21 +379,23 @@ void cpu_inject_stop(S390CPU *cpu);
/* ioinst.c */
void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1);
void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1);
void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1);
void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb);
void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb);
int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb);
void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
uintptr_t ra);
void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
uintptr_t ra);
void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra);
void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
uintptr_t ra);
int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra);
void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra);
void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
uint32_t ipb);
void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1);
void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1);
void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1);
uint32_t ipb, uintptr_t ra);
void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
/* mem_helper.c */
@ -408,10 +410,9 @@ int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
/* misc_helper.c */
void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
uintptr_t retaddr);
int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3);
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
uintptr_t ra);
/* translate.c */

View file

@ -27,17 +27,18 @@ void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
}
static void tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
int ilen)
int ilen, uintptr_t ra)
{
#ifdef CONFIG_TCG
trigger_pgm_exception(env, code, ilen);
cpu_loop_exit(CPU(s390_env_get_cpu(env)));
cpu_loop_exit_restore(CPU(s390_env_get_cpu(env)), ra);
#else
g_assert_not_reached();
#endif
}
void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen,
uintptr_t ra)
{
S390CPU *cpu = s390_env_get_cpu(env);
@ -47,7 +48,7 @@ void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
if (kvm_enabled()) {
kvm_s390_program_interrupt(cpu, code);
} else if (tcg_enabled()) {
tcg_s390_program_interrupt(env, code, ilen);
tcg_s390_program_interrupt(env, code, ilen, ra);
} else {
g_assert_not_reached();
}

View file

@ -38,13 +38,13 @@ int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
return 0;
}
void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1)
void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(&cpu->env, PGM_OPERAND, 4);
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("xsch", cssid, ssid, schid);
@ -56,13 +56,13 @@ void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1)
setcc(cpu, css_do_xsch(sch));
}
void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1)
void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(&cpu->env, PGM_OPERAND, 4);
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("csch", cssid, ssid, schid);
@ -74,13 +74,13 @@ void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1)
setcc(cpu, css_do_csch(sch));
}
void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1)
void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(&cpu->env, PGM_OPERAND, 4);
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("hsch", cssid, ssid, schid);
@ -105,7 +105,7 @@ static int ioinst_schib_valid(SCHIB *schib)
return 1;
}
void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
@ -116,15 +116,16 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
!ioinst_schib_valid(&schib)) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("msch", cssid, ssid, schid);
@ -161,7 +162,7 @@ static int ioinst_orb_valid(ORB *orb)
return 1;
}
void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
@ -172,16 +173,17 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
copy_orb_from_guest(&orb, &orig_orb);
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
!ioinst_orb_valid(&orb)) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("ssch", cssid, ssid, schid);
@ -193,7 +195,7 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
setcc(cpu, css_do_ssch(sch, &orb));
}
void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
{
CRW crw;
uint64_t addr;
@ -203,7 +205,7 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
@ -212,13 +214,17 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) {
setcc(cpu, cc);
} else if (cc == 0) {
/* Write failed: requeue CRW since STCRW is a suppressing instruction */
css_undo_stcrw(&crw);
} else {
if (cc == 0) {
/* Write failed: requeue CRW since STCRW is suppressing */
css_undo_stcrw(&crw);
}
s390_cpu_virt_mem_handle_exc(cpu, ra);
}
}
void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
@ -230,7 +236,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
@ -241,7 +247,9 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
* access execption if it is not) first.
*/
if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
} else {
s390_cpu_virt_mem_handle_exc(cpu, ra);
}
return;
}
@ -267,18 +275,20 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
if (cc != 3) {
if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib,
sizeof(schib)) != 0) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
} else {
/* Access exceptions have a higher priority than cc3 */
if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
}
setcc(cpu, cc);
}
int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
int cssid, ssid, schid, m;
@ -289,13 +299,13 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
uint8_t ar;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return -EIO;
}
trace_ioinst_sch_id("tsch", cssid, ssid, schid);
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return -EIO;
}
@ -308,6 +318,7 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
/* 0 - status pending, 1 - not status pending, 3 - not operational */
if (cc != 3) {
if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return -EFAULT;
}
css_do_tsch_update_subch(sch);
@ -315,6 +326,7 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
irb_len = sizeof(irb) - sizeof(irb.emw);
/* Access exceptions have a higher priority than cc3 */
if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return -EFAULT;
}
}
@ -585,7 +597,7 @@ static void ioinst_handle_chsc_unimplemented(ChscResp *res)
res->param = 0;
}
void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
{
ChscReq *req;
ChscResp *res;
@ -601,7 +613,7 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
addr = env->regs[reg];
/* Page boundary? */
if (addr & 0xfff) {
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
/*
@ -610,13 +622,14 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
* care of req->len here first.
*/
if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
req = (ChscReq *)buf;
len = be16_to_cpu(req->len);
/* Length field valid? */
if ((len < 16) || (len > 4088) || (len & 7)) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
memset((char *)req + len, 0, TARGET_PAGE_SIZE - len);
@ -644,42 +657,18 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res,
be16_to_cpu(res->len))) {
setcc(cpu, 0); /* Command execution complete */
} else {
s390_cpu_virt_mem_handle_exc(cpu, ra);
}
}
int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb)
{
CPUS390XState *env = &cpu->env;
uint64_t addr;
int lowcore;
IOIntCode int_code;
hwaddr len;
int ret;
uint8_t ar;
trace_ioinst("tpi");
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 4);
return -EIO;
}
lowcore = addr ? 0 : 1;
len = lowcore ? 8 /* two words */ : 12 /* three words */;
ret = css_do_tpi(&int_code, lowcore);
if (ret == 1) {
s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, ar, &int_code, len);
}
return ret;
}
#define SCHM_REG1_RES(_reg) (_reg & 0x000000000ffffffc)
#define SCHM_REG1_MBK(_reg) ((_reg & 0x00000000f0000000) >> 28)
#define SCHM_REG1_UPD(_reg) ((_reg & 0x0000000000000002) >> 1)
#define SCHM_REG1_DCT(_reg) (_reg & 0x0000000000000001)
void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
uint32_t ipb)
uint32_t ipb, uintptr_t ra)
{
uint8_t mbk;
int update;
@ -689,7 +678,7 @@ void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
trace_ioinst("schm");
if (SCHM_REG1_RES(reg1)) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
@ -698,20 +687,20 @@ void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
dct = SCHM_REG1_DCT(reg1);
if (update && (reg2 & 0x000000000000001f)) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
css_do_schm(mbk, update, dct, update ? reg2 : 0);
}
void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1)
void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(&cpu->env, PGM_OPERAND, 4);
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("rsch", cssid, ssid, schid);
@ -726,7 +715,7 @@ void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1)
#define RCHP_REG1_RES(_reg) (_reg & 0x00000000ff00ff00)
#define RCHP_REG1_CSSID(_reg) ((_reg & 0x0000000000ff0000) >> 16)
#define RCHP_REG1_CHPID(_reg) (_reg & 0x00000000000000ff)
void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1)
void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cc;
uint8_t cssid;
@ -735,7 +724,7 @@ void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1)
CPUS390XState *env = &cpu->env;
if (RCHP_REG1_RES(reg1)) {
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
@ -758,17 +747,17 @@ void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1)
break;
default:
/* Invalid channel subsystem. */
program_interrupt(env, PGM_OPERAND, 4);
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
setcc(cpu, cc);
}
#define SAL_REG1_INVALID(_reg) (_reg & 0x0000000080000000)
void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1)
void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
/* We do not provide address limit checking, so let's suppress it. */
if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) {
program_interrupt(&cpu->env, PGM_OPERAND, 4);
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
}
}

View file

@ -1124,32 +1124,32 @@ static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
switch (ipa1) {
case PRIV_B2_XSCH:
ioinst_handle_xsch(cpu, env->regs[1]);
ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_CSCH:
ioinst_handle_csch(cpu, env->regs[1]);
ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_HSCH:
ioinst_handle_hsch(cpu, env->regs[1]);
ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_MSCH:
ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_SSCH:
ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_STCRW:
ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_STSCH:
ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_TSCH:
/* We should only get tsch via KVM_EXIT_S390_TSCH. */
fprintf(stderr, "Spurious tsch intercept\n");
break;
case PRIV_B2_CHSC:
ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_TPI:
/* This should have been handled by kvm already. */
@ -1157,19 +1157,19 @@ static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
break;
case PRIV_B2_SCHM:
ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
run->s390_sieic.ipb);
run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_RSCH:
ioinst_handle_rsch(cpu, env->regs[1]);
ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_RCHP:
ioinst_handle_rchp(cpu, env->regs[1]);
ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_STCPS:
/* We do not provide this instruction, it is suppressed. */
break;
case PRIV_B2_SAL:
ioinst_handle_sal(cpu, env->regs[1]);
ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_SIGA:
/* Not provided, set CC = 3 for subchannel not operational */
@ -1230,7 +1230,7 @@ static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
if (s390_has_feat(S390_FEAT_ZPCI)) {
return clp_service_call(cpu, r2);
return clp_service_call(cpu, r2, RA_IGNORED);
} else {
return -1;
}
@ -1242,7 +1242,7 @@ static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
if (s390_has_feat(S390_FEAT_ZPCI)) {
return pcilg_service_call(cpu, r1, r2);
return pcilg_service_call(cpu, r1, r2, RA_IGNORED);
} else {
return -1;
}
@ -1254,7 +1254,7 @@ static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
if (s390_has_feat(S390_FEAT_ZPCI)) {
return pcistg_service_call(cpu, r1, r2);
return pcistg_service_call(cpu, r1, r2, RA_IGNORED);
} else {
return -1;
}
@ -1270,7 +1270,7 @@ static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
cpu_synchronize_state(CPU(cpu));
fiba = get_base_disp_rxy(cpu, run, &ar);
return stpcifc_service_call(cpu, r1, fiba, ar);
return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
} else {
return -1;
}
@ -1302,7 +1302,7 @@ static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
if (s390_has_feat(S390_FEAT_ZPCI)) {
return rpcit_service_call(cpu, r1, r2);
return rpcit_service_call(cpu, r1, r2, RA_IGNORED);
} else {
return -1;
}
@ -1319,7 +1319,7 @@ static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
cpu_synchronize_state(CPU(cpu));
gaddr = get_base_disp_rsy(cpu, run, &ar);
return pcistb_service_call(cpu, r1, r3, gaddr, ar);
return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED);
} else {
return -1;
}
@ -1335,7 +1335,7 @@ static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
cpu_synchronize_state(CPU(cpu));
fiba = get_base_disp_rxy(cpu, run, &ar);
return mpcifc_service_call(cpu, r1, fiba, ar);
return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
} else {
return -1;
}
@ -1451,7 +1451,7 @@ static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
cpu_synchronize_state(CPU(cpu));
r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
r3 = run->s390_sieic.ipa & 0x000f;
handle_diag_308(&cpu->env, r1, r3);
handle_diag_308(&cpu->env, r1, r3, RA_IGNORED);
}
static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
@ -1673,7 +1673,8 @@ static int handle_tsch(S390CPU *cpu)
cpu_synchronize_state(cs);
ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb,
RA_IGNORED);
if (ret < 0) {
/*
* Failure.
@ -1851,33 +1852,12 @@ void kvm_s390_io_interrupt(uint16_t subchannel_id,
kvm_s390_floating_interrupt(&irq);
}
static uint64_t build_channel_report_mcic(void)
{
uint64_t mcic;
/* subclass: indicate channel report pending */
mcic = MCIC_SC_CP |
/* subclass modifiers: none */
/* storage errors: none */
/* validity bits: no damage */
MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP |
MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR |
MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC;
if (s390_has_feat(S390_FEAT_VECTOR)) {
mcic |= MCIC_VB_VR;
}
if (s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
mcic |= MCIC_VB_GS;
}
return mcic;
}
void kvm_s390_crw_mchk(void)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_MCHK,
.u.mchk.cr14 = 1 << 28,
.u.mchk.mcic = build_channel_report_mcic(),
.u.mchk.cr14 = CR14_CHANNEL_REPORT_SC,
.u.mchk.mcic = s390_build_validity_mcic() | MCIC_SC_CP,
};
kvm_s390_floating_interrupt(&irq);
}
@ -1979,7 +1959,10 @@ int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
{
struct kvm_s390_irq_state irq_state;
struct kvm_s390_irq_state irq_state = {
.buf = (uint64_t) cpu->irqstate,
.len = VCPU_IRQ_BUF_SIZE,
};
CPUState *cs = CPU(cpu);
int32_t bytes;
@ -1987,9 +1970,6 @@ void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
return;
}
irq_state.buf = (uint64_t) cpu->irqstate;
irq_state.len = VCPU_IRQ_BUF_SIZE;
bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
if (bytes < 0) {
cpu->irqstate_saved_size = 0;
@ -2003,7 +1983,10 @@ void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
{
CPUState *cs = CPU(cpu);
struct kvm_s390_irq_state irq_state;
struct kvm_s390_irq_state irq_state = {
.buf = (uint64_t) cpu->irqstate,
.len = cpu->irqstate_saved_size,
};
int r;
if (cpu->irqstate_saved_size == 0) {
@ -2014,9 +1997,6 @@ int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
return -ENOSYS;
}
irq_state.buf = (uint64_t) cpu->irqstate;
irq_state.len = cpu->irqstate_saved_size;
r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
if (r) {
error_report("Setting interrupt state failed %d", r);

View file

@ -85,9 +85,7 @@ static inline void check_alignment(CPUS390XState *env, uint64_t v,
int wordsize, uintptr_t ra)
{
if (v % wordsize) {
CPUState *cs = CPU(s390_env_get_cpu(env));
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_SPECIFICATION, 6);
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
}
}
@ -545,8 +543,7 @@ void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
/* Bits 32-55 must contain all 0. */
if (env->regs[0] & 0xffffff00u) {
cpu_restore_state(ENV_GET_CPU(env), ra);
program_interrupt(env, PGM_SPECIFICATION, 6);
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
}
str = get_address(env, r2);
@ -583,8 +580,7 @@ void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
/* Bits 32-47 of R0 must be zero. */
if (env->regs[0] & 0xffff0000u) {
cpu_restore_state(ENV_GET_CPU(env), ra);
program_interrupt(env, PGM_SPECIFICATION, 6);
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
}
str = get_address(env, r2);
@ -1600,8 +1596,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
return cc;
spec_exception:
cpu_restore_state(ENV_GET_CPU(env), ra);
program_interrupt(env, PGM_SPECIFICATION, 6);
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
g_assert_not_reached();
}
@ -1865,8 +1860,7 @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
uint16_t entries, i, index = 0;
if (r2 & 0xff000) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
}
if (!(r2 & 0x800)) {
@ -2014,8 +2008,7 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
/* XXX incomplete - has more corner cases */
if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
cpu_restore_state(cs, GETPC());
program_interrupt(env, PGM_SPECIAL_OP, 2);
s390_program_interrupt(env, PGM_SPECIAL_OP, 2, GETPC());
}
old_exc = cs->exception_index;
@ -2185,7 +2178,6 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
const uint64_t r0 = env->regs[0];
const uintptr_t ra = GETPC();
CPUState *cs = CPU(s390_env_get_cpu(env));
uint8_t dest_key, dest_as, dest_k, dest_a;
uint8_t src_key, src_as, src_k, src_a;
uint64_t val;
@ -2195,8 +2187,7 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
__func__, dest, src, len);
if (!(env->psw.mask & PSW_MASK_DAT)) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_SPECIAL_OP, 6);
s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
}
/* OAC (operand access control) for the first operand -> dest */
@ -2227,17 +2218,14 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
}
if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_SPECIAL_OP, 6);
s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
}
if (!(env->cregs[0] & CR0_SECONDARY) &&
(dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_SPECIAL_OP, 6);
s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
}
if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_PRIVILEGED, 6);
s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
}
len = wrap_length(env, len);
@ -2251,8 +2239,7 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
(env->psw.mask & PSW_MASK_PSTATE)) {
qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
__func__);
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_ADDRESSING, 6);
s390_program_interrupt(env, PGM_ADDRESSING, 6, ra);
}
/* FIXME: a) LAP

View file

@ -45,22 +45,6 @@
#define HELPER_LOG(x...)
#endif
/* Raise an exception dynamically from a helper function. */
void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
uintptr_t retaddr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
cs->exception_index = EXCP_PGM;
env->int_pgm_code = excp;
env->int_pgm_ilen = ILEN_AUTO;
/* Use the (ultimate) callers address to find the insn that trapped. */
cpu_restore_state(cs, retaddr);
cpu_loop_exit(cs);
}
/* Raise an exception statically from a TB. */
void HELPER(exception)(CPUS390XState *env, uint32_t excp)
{
@ -71,6 +55,21 @@ void HELPER(exception)(CPUS390XState *env, uint32_t excp)
cpu_loop_exit(cs);
}
/* Store CPU Timer (also used for EXTRACT CPU TIME) */
uint64_t HELPER(stpt)(CPUS390XState *env)
{
#if defined(CONFIG_USER_ONLY)
/*
* Fake a descending CPU timer. We could get negative values here,
* but we don't care as it is up to the OS when to process that
* interrupt and reset to > 0.
*/
return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
#else
return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
#endif
}
#ifndef CONFIG_USER_ONLY
/* SCLP service call */
@ -78,11 +77,10 @@ uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
{
qemu_mutex_lock_iothread();
int r = sclp_service_call(env, r1, r2);
if (r < 0) {
program_interrupt(env, -r, 4);
r = 0;
}
qemu_mutex_unlock_iothread();
if (r < 0) {
s390_program_interrupt(env, -r, 4, GETPC());
}
return r;
}
@ -104,7 +102,7 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
case 0x308:
/* ipl */
qemu_mutex_lock_iothread();
handle_diag_308(env, r1, r3);
handle_diag_308(env, r1, r3, GETPC());
qemu_mutex_unlock_iothread();
r = 0;
break;
@ -118,7 +116,7 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
}
if (r) {
program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
}
}
@ -163,6 +161,17 @@ void HELPER(sckc)(CPUS390XState *env, uint64_t time)
timer_mod(env->tod_timer, env->tod_basetime + time);
}
/* Set Tod Programmable Field */
void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
{
uint32_t val = r0;
if (val & 0xffff0000) {
s390_program_interrupt(env, PGM_SPECIFICATION, 2, GETPC());
}
env->todpr = val;
}
/* Store Clock Comparator */
uint64_t HELPER(stckc)(CPUS390XState *env)
{
@ -184,12 +193,6 @@ void HELPER(spt)(CPUS390XState *env, uint64_t time)
timer_mod(env->cpu_timer, env->cputm);
}
/* Store CPU Timer */
uint64_t HELPER(stpt)(CPUS390XState *env)
{
return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
}
/* Store System Information */
uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
uint64_t r0, uint64_t r1)
@ -201,7 +204,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
/* valid function code, invalid reserved bits */
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, GETPC());
}
sel1 = r0 & STSI_R0_SEL1_MASK;
@ -339,7 +342,7 @@ void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_xsch(cpu, r1);
ioinst_handle_xsch(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
@ -347,7 +350,7 @@ void HELPER(csch)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_csch(cpu, r1);
ioinst_handle_csch(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
@ -355,7 +358,7 @@ void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_hsch(cpu, r1);
ioinst_handle_hsch(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
@ -363,7 +366,7 @@ void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_msch(cpu, r1, inst >> 16);
ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@ -371,7 +374,7 @@ void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_rchp(cpu, r1);
ioinst_handle_rchp(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
@ -379,7 +382,25 @@ void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_rsch(cpu, r1);
ioinst_handle_rsch(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
void HELPER(sal)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_sal(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@ -387,7 +408,16 @@ void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_ssch(cpu, r1, inst >> 16);
ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@ -395,7 +425,7 @@ void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_stsch(cpu, r1, inst >> 16);
ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@ -403,7 +433,7 @@ void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_tsch(cpu, r1, inst >> 16);
ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@ -411,7 +441,7 @@ void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
ioinst_handle_chsc(cpu, inst >> 16);
ioinst_handle_chsc(cpu, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
#endif
@ -429,7 +459,7 @@ void HELPER(per_check_exception)(CPUS390XState *env)
* of EXECUTE, while per_address contains the target of EXECUTE.
*/
ilen = get_ilen(cpu_ldub_code(env, env->per_address));
program_interrupt(env, PGM_PER, ilen);
s390_program_interrupt(env, PGM_PER, ilen, GETPC());
}
}
@ -519,8 +549,7 @@ uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
int i;
if (addr & 0x7) {
cpu_restore_state(ENV_GET_CPU(env), ra);
program_interrupt(env, PGM_SPECIFICATION, 4);
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
}
prepare_stfl();

View file

@ -22,6 +22,7 @@
#include "internal.h"
#include "kvm_s390x.h"
#include "sysemu/kvm.h"
#include "exec/exec-all.h"
#include "trace.h"
#include "hw/s390x/storage-keys.h"
@ -63,7 +64,9 @@ static void trigger_access_exception(CPUS390XState *env, uint32_t type,
kvm_s390_access_exception(cpu, type, tec);
} else {
CPUState *cs = CPU(cpu);
stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
if (type != PGM_ADDRESSING) {
stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
}
trigger_pgm_exception(env, type, ilen);
}
}
@ -442,7 +445,8 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
/**
* translate_pages: Translate a set of consecutive logical page addresses
* to absolute addresses
* to absolute addresses. This function is used for TCG and old KVM without
* the MEMOP interface.
*/
static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
target_ulong *pages, bool is_write)
@ -458,7 +462,7 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
}
if (!address_space_access_valid(&address_space_memory, pages[i],
TARGET_PAGE_SIZE, is_write)) {
program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
trigger_access_exception(env, PGM_ADDRESSING, ILEN_AUTO, 0);
return -EFAULT;
}
addr += TARGET_PAGE_SIZE;
@ -478,6 +482,9 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
*
* Copy from/to guest memory using logical addresses. Note that we inject a
* program interrupt in case there is an error while accessing the memory.
*
* This function will always return (also for TCG), make sure to call
* s390_cpu_virt_mem_handle_exc() to properly exit the CPU loop.
*/
int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
int len, bool is_write)
@ -514,6 +521,16 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
return ret;
}
void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra)
{
/* KVM will handle the interrupt automatically, TCG has to exit the TB */
#ifdef CONFIG_TCG
if (tcg_enabled()) {
cpu_loop_exit_restore(CPU(cpu), ra);
}
#endif
}
/**
* Translate a real address into a physical (absolute) address.
* @param raddr the real address

View file

@ -240,12 +240,6 @@ static void update_cc_op(DisasContext *s)
}
}
static void potential_page_fault(DisasContext *s)
{
update_psw_addr(s);
update_cc_op(s);
}
static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
{
return (uint64_t)cpu_lduw_code(env, pc);
@ -1370,6 +1364,27 @@ static ExitStatus op_addc(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_asi(DisasContext *s, DisasOps *o)
{
o->in1 = tcg_temp_new_i64();
if (!s390_has_feat(S390_FEAT_STFLE_45)) {
tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic addition in memory. */
tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
s->insn->data);
}
/* Recompute also for atomic case: needed for setting CC. */
tcg_gen_add_i64(o->out, o->in1, o->in2);
if (!s390_has_feat(S390_FEAT_STFLE_45)) {
tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return NO_EXIT;
}
static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
{
gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
@ -1412,6 +1427,27 @@ static ExitStatus op_andi(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_ni(DisasContext *s, DisasOps *o)
{
o->in1 = tcg_temp_new_i64();
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic operation in memory. */
tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
s->insn->data);
}
/* Recompute also for atomic case: needed for setting CC. */
tcg_gen_and_i64(o->out, o->in1, o->in2);
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return NO_EXIT;
}
static ExitStatus op_bas(DisasContext *s, DisasOps *o)
{
tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
@ -2124,9 +2160,6 @@ static ExitStatus op_diag(DisasContext *s, DisasOps *o)
TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
check_privileged(s);
update_psw_addr(s);
gen_op_calc_cc(s);
gen_helper_diag(cpu_env, r1, r3, func_code);
tcg_temp_free_i32(func_code);
@ -2942,7 +2975,8 @@ static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
/* In a parallel context, stop the world and single step. */
if (tb_cflags(s->tb) & CF_PARALLEL) {
potential_page_fault(s);
update_psw_addr(s);
update_cc_op(s);
gen_exception(EXCP_ATOMIC);
return EXIT_NORETURN;
}
@ -3365,6 +3399,27 @@ static ExitStatus op_ori(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_oi(DisasContext *s, DisasOps *o)
{
o->in1 = tcg_temp_new_i64();
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic operation in memory. */
tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
s->insn->data);
}
/* Recompute also for atomic case: needed for setting CC. */
tcg_gen_or_i64(o->out, o->in1, o->in2);
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return NO_EXIT;
}
static ExitStatus op_pack(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
@ -3704,7 +3759,6 @@ static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
static ExitStatus op_servc(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
set_cc_static(s);
return NO_EXIT;
@ -3863,6 +3917,36 @@ static ExitStatus op_spm(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_ectg(DisasContext *s, DisasOps *o)
{
int b1 = get_field(s->fields, b1);
int d1 = get_field(s->fields, d1);
int b2 = get_field(s->fields, b2);
int d2 = get_field(s->fields, d2);
int r3 = get_field(s->fields, r3);
TCGv_i64 tmp = tcg_temp_new_i64();
/* fetch all operands first */
o->in1 = tcg_temp_new_i64();
tcg_gen_addi_i64(o->in1, regs[b1], d1);
o->in2 = tcg_temp_new_i64();
tcg_gen_addi_i64(o->in2, regs[b2], d2);
o->addr1 = get_address(s, 0, r3, 0);
/* load the third operand into r3 before modifying anything */
tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
/* subtract CPU timer from first operand and store in GR0 */
gen_helper_stpt(tmp, cpu_env);
tcg_gen_sub_i64(regs[0], o->in1, tmp);
/* store second operand in GR1 */
tcg_gen_mov_i64(regs[1], o->in2);
tcg_temp_free_i64(tmp);
return NO_EXIT;
}
#ifndef CONFIG_USER_ONLY
static ExitStatus op_spka(DisasContext *s, DisasOps *o)
{
@ -3906,7 +3990,10 @@ static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
{
TCGv_i64 c1 = tcg_temp_new_i64();
TCGv_i64 c2 = tcg_temp_new_i64();
TCGv_i64 todpr = tcg_temp_new_i64();
gen_helper_stck(c1, cpu_env);
/* 16 bit value store in an uint32_t (only valid bits set) */
tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
/* Shift the 64-bit value into its place as a zero-extended
104-bit value. Note that "bit positions 64-103 are always
non-zero so that they compare differently to STCK"; we set
@ -3914,11 +4001,13 @@ static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
tcg_gen_shli_i64(c2, c1, 56);
tcg_gen_shri_i64(c1, c1, 8);
tcg_gen_ori_i64(c2, c2, 0x10000);
tcg_gen_or_i64(c2, c2, todpr);
tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
tcg_temp_free_i64(c1);
tcg_temp_free_i64(c2);
tcg_temp_free_i64(todpr);
/* ??? We don't implement clock states. */
gen_op_movi_cc(s, 0);
return NO_EXIT;
@ -3931,6 +4020,13 @@ static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_sckpf(DisasContext *s, DisasOps *o)
{
check_privileged(s);
gen_helper_sckpf(cpu_env, regs[0]);
return NO_EXIT;
}
static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
{
check_privileged(s);
@ -3992,7 +4088,6 @@ static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
set_cc_static(s);
return NO_EXIT;
@ -4008,7 +4103,6 @@ static ExitStatus op_spx(DisasContext *s, DisasOps *o)
static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_xsch(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
@ -4017,7 +4111,6 @@ static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
static ExitStatus op_csch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_csch(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
@ -4026,7 +4119,6 @@ static ExitStatus op_csch(DisasContext *s, DisasOps *o)
static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_hsch(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
@ -4035,7 +4127,6 @@ static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
static ExitStatus op_msch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_msch(cpu_env, regs[1], o->in2);
set_cc_static(s);
return NO_EXIT;
@ -4044,7 +4135,6 @@ static ExitStatus op_msch(DisasContext *s, DisasOps *o)
static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_rchp(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
@ -4053,16 +4143,43 @@ static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_rsch(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_sal(DisasContext *s, DisasOps *o)
{
check_privileged(s);
gen_helper_sal(cpu_env, regs[1]);
return NO_EXIT;
}
static ExitStatus op_schm(DisasContext *s, DisasOps *o)
{
check_privileged(s);
gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
return NO_EXIT;
}
static ExitStatus op_siga(DisasContext *s, DisasOps *o)
{
check_privileged(s);
/* From KVM code: Not provided, set CC = 3 for subchannel not operational */
gen_op_movi_cc(s, 3);
return NO_EXIT;
}
static ExitStatus op_stcps(DisasContext *s, DisasOps *o)
{
check_privileged(s);
/* The instruction is suppressed if not provided. */
return NO_EXIT;
}
static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_ssch(cpu_env, regs[1], o->in2);
set_cc_static(s);
return NO_EXIT;
@ -4071,16 +4188,22 @@ static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_stsch(cpu_env, regs[1], o->in2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_stcrw(DisasContext *s, DisasOps *o)
{
check_privileged(s);
gen_helper_stcrw(cpu_env, o->in2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_tsch(cpu_env, regs[1], o->in2);
set_cc_static(s);
return NO_EXIT;
@ -4089,7 +4212,6 @@ static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_chsc(cpu_env, o->in2);
set_cc_static(s);
return NO_EXIT;
@ -4622,6 +4744,27 @@ static ExitStatus op_xori(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_xi(DisasContext *s, DisasOps *o)
{
o->in1 = tcg_temp_new_i64();
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic operation in memory. */
tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
s->insn->data);
}
/* Recompute also for atomic case: needed for setting CC. */
tcg_gen_xor_i64(o->out, o->in1, o->in2);
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return NO_EXIT;
}
static ExitStatus op_zero(DisasContext *s, DisasOps *o)
{
o->out = tcg_const_i64(0);
@ -5566,6 +5709,7 @@ enum DisasInsnEnum {
#define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
#define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
#define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
#define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
static const DisasInsn insn_info[] = {
#include "insn-data.def"
@ -5851,9 +5995,6 @@ static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
tcg_gen_movi_i64(psw_addr, s->next_pc);
}
/* Save off cc. */
update_cc_op(s);
/* Call the helper to check for a possible PER exception. */
gen_helper_per_check_exception(cpu_env);
}