s390x patches for 2.3.

Highlight is support for PCI devices on s390x. Otherwise, performance
 improvements (register sync) and small cleanups.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQIcBAABAgAGBQJUs5irAAoJEN7Pa5PG8C+vP2EP/RMQVXd3uLAwwEW03F/bYFH9
 3rOjxoO7+uLZn4N0LPS5APlokX+F5UEyR9KZJlM7NlSpusuivxkd4f5CRfe1/OWK
 IFXenJ0XxvnLfCI36UbD9X5IsxGFtDPHtta9IN/isBoIs53IUvYDN9YM4UQxmjVU
 NODgaJrxmqi38yN4W6b/me3/HMEz13IfUl7LcCY5bV6ehqAdh7a35uf7hytkgDjy
 8mz2uBs/IKRBEh92HjZjlOXIuMiwvsKHMVjUGfAMmypMuipO/D1/NiWynFm+PnQU
 QZxRINvCN47J8A7A4umnxWzMI2YqnrBpPIQ9Bl/Butc/R8Ls+kYZ7LpU1w6h+t2t
 6qDsx/3Y4ELDSIKlmz0p8MllDM2/4+lA4xkVF1XHPTWHASSpYaWmJxrkLK273WJt
 eVytBFtb3XQ3r5lXpQg6G1pxetplJy2mELhVneq116ZvzT9YvLhouetuHxt5RZ6f
 P2vyzuVExrkHXitad8xz4qViPF+u75LA93FGMav8zrxqH6GV2pHCVLY34/daSBEG
 LXrPG1p8OfF4sPqYyaSGEz3A6uUJTnHo6srEaJZRYxQxAlclGLYLdl1CXsOrjP0g
 q83+Z6kS/XBO0E55z/f25vSBak4CXeRvikZWKBuoGQ8F0ikHgTvXg1o6sYiOzPnN
 XBMK4o2pL4ZoiIv/q8pF
 =WEEj
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20150112-v3' into staging

s390x patches for 2.3.

Highlight is support for PCI devices on s390x. Otherwise, performance
improvements (register sync) and small cleanups.

# gpg: Signature made Mon 12 Jan 2015 09:49:31 GMT using RSA key ID C6F02FAF
# gpg: Good signature from "Cornelia Huck <huckc@linux.vnet.ibm.com>"
# gpg:                 aka "Cornelia Huck <cornelia.huck@de.ibm.com>"

* remotes/cohuck/tags/s390x-20150112-v3:
  kvm: extend kvm_irqchip_add_msi_route to work on s390
  s390: implement pci instructions
  s390: Add PCI bus support
  s390x/kvm: avoid syscalls by syncing registers with kvm_run
  s390x/kvm: sync register support helper function
  s390x/css: Clean up unnecessary CONFIG_USER_ONLY wrappers
  s390x/ccw: fix oddity in machine class init

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-01-12 10:09:41 +00:00
commit 5435f1d77e
22 changed files with 2305 additions and 115 deletions

View file

@ -538,6 +538,7 @@ S390 Virtio
M: Alexander Graf <agraf@suse.de>
S: Maintained
F: hw/s390x/s390-*.c
X: hw/s390x/*pci*.[hc]
S390 Virtio-ccw
M: Cornelia Huck <cornelia.huck@de.ibm.com>
@ -548,6 +549,7 @@ F: hw/s390x/s390-virtio-ccw.c
F: hw/s390x/css.[hc]
F: hw/s390x/sclp*.[hc]
F: hw/s390x/ipl*.[hc]
F: hw/s390x/*pci*.[hc]
F: include/hw/s390x/
F: pc-bios/s390-ccw/
T: git git://github.com/cohuck/qemu virtio-ccw-upstr

View file

@ -1,3 +1,4 @@
include pci.mak
CONFIG_VIRTIO=y
CONFIG_SCLPCONSOLE=y
CONFIG_S390_FLIC=y

View file

@ -8,3 +8,4 @@ obj-y += ipl.o
obj-y += css.o
obj-y += s390-virtio-ccw.o
obj-y += virtio-ccw.o
obj-y += s390-pci-bus.o s390-pci-inst.o

View file

@ -1299,6 +1299,11 @@ void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
/* TODO */
}
void css_generate_css_crws(uint8_t cssid)
{
css_queue_crw(CRW_RSC_CSS, 0, 0, cssid);
}
int css_enable_mcsse(void)
{
trace_css_enable_facility("mcsse");

View file

@ -101,6 +101,7 @@ void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid);
void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
int hotplugged, int add);
void css_generate_chp_crws(uint8_t cssid, uint8_t chpid);
void css_generate_css_crws(uint8_t cssid);
void css_adapter_interrupt(uint8_t isc);
#define CSS_IO_ADAPTER_VIRTIO 1

591
hw/s390x/s390-pci-bus.c Normal file
View file

@ -0,0 +1,591 @@
/*
* s390 PCI BUS
*
* Copyright 2014 IBM Corp.
* Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
* Hong Bo Li <lihbbj@cn.ibm.com>
* Yi Min Zhao <zyimin@cn.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
* directory.
*/
#include "s390-pci-bus.h"
#include <hw/pci/pci_bus.h>
#include <hw/pci/msi.h>
#include <qemu/error-report.h>
/* #define DEBUG_S390PCI_BUS */
#ifdef DEBUG_S390PCI_BUS
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, "S390pci-bus: " fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
int chsc_sei_nt2_get_event(void *res)
{
ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res;
PciCcdfAvail *accdf;
PciCcdfErr *eccdf;
int rc = 1;
SeiContainer *sei_cont;
S390pciState *s = S390_PCI_HOST_BRIDGE(
object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
if (!s) {
return rc;
}
sei_cont = QTAILQ_FIRST(&s->pending_sei);
if (sei_cont) {
QTAILQ_REMOVE(&s->pending_sei, sei_cont, link);
nt2_res->nt = 2;
nt2_res->cc = sei_cont->cc;
switch (sei_cont->cc) {
case 1: /* error event */
eccdf = (PciCcdfErr *)nt2_res->ccdf;
eccdf->fid = cpu_to_be32(sei_cont->fid);
eccdf->fh = cpu_to_be32(sei_cont->fh);
eccdf->e = cpu_to_be32(sei_cont->e);
eccdf->faddr = cpu_to_be64(sei_cont->faddr);
eccdf->pec = cpu_to_be16(sei_cont->pec);
break;
case 2: /* availability event */
accdf = (PciCcdfAvail *)nt2_res->ccdf;
accdf->fid = cpu_to_be32(sei_cont->fid);
accdf->fh = cpu_to_be32(sei_cont->fh);
accdf->pec = cpu_to_be16(sei_cont->pec);
break;
default:
abort();
}
g_free(sei_cont);
rc = 0;
}
return rc;
}
int chsc_sei_nt2_have_event(void)
{
S390pciState *s = S390_PCI_HOST_BRIDGE(
object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
if (!s) {
return 0;
}
return !QTAILQ_EMPTY(&s->pending_sei);
}
S390PCIBusDevice *s390_pci_find_dev_by_fid(uint32_t fid)
{
S390PCIBusDevice *pbdev;
int i;
S390pciState *s = S390_PCI_HOST_BRIDGE(
object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
if (!s) {
return NULL;
}
for (i = 0; i < PCI_SLOT_MAX; i++) {
pbdev = &s->pbdev[i];
if ((pbdev->fh != 0) && (pbdev->fid == fid)) {
return pbdev;
}
}
return NULL;
}
void s390_pci_sclp_configure(int configure, SCCB *sccb)
{
PciCfgSccb *psccb = (PciCfgSccb *)sccb;
S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(be32_to_cpu(psccb->aid));
uint16_t rc;
if (pbdev) {
if ((configure == 1 && pbdev->configured == true) ||
(configure == 0 && pbdev->configured == false)) {
rc = SCLP_RC_NO_ACTION_REQUIRED;
} else {
pbdev->configured = !pbdev->configured;
rc = SCLP_RC_NORMAL_COMPLETION;
}
} else {
DPRINTF("sclp config %d no dev found\n", configure);
rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
}
psccb->header.response_code = cpu_to_be16(rc);
return;
}
static uint32_t s390_pci_get_pfid(PCIDevice *pdev)
{
return PCI_SLOT(pdev->devfn);
}
static uint32_t s390_pci_get_pfh(PCIDevice *pdev)
{
return PCI_SLOT(pdev->devfn) | FH_VIRT;
}
S390PCIBusDevice *s390_pci_find_dev_by_idx(uint32_t idx)
{
S390PCIBusDevice *pbdev;
int i;
int j = 0;
S390pciState *s = S390_PCI_HOST_BRIDGE(
object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
if (!s) {
return NULL;
}
for (i = 0; i < PCI_SLOT_MAX; i++) {
pbdev = &s->pbdev[i];
if (pbdev->fh == 0) {
continue;
}
if (j == idx) {
return pbdev;
}
j++;
}
return NULL;
}
S390PCIBusDevice *s390_pci_find_dev_by_fh(uint32_t fh)
{
S390PCIBusDevice *pbdev;
int i;
S390pciState *s = S390_PCI_HOST_BRIDGE(
object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
if (!s) {
return NULL;
}
for (i = 0; i < PCI_SLOT_MAX; i++) {
pbdev = &s->pbdev[i];
if (pbdev->fh == fh) {
return pbdev;
}
}
return NULL;
}
static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh,
uint32_t fid, uint64_t faddr, uint32_t e)
{
SeiContainer *sei_cont = g_malloc0(sizeof(SeiContainer));
S390pciState *s = S390_PCI_HOST_BRIDGE(
object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
if (!s) {
return;
}
sei_cont->fh = fh;
sei_cont->fid = fid;
sei_cont->cc = cc;
sei_cont->pec = pec;
sei_cont->faddr = faddr;
sei_cont->e = e;
QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link);
css_generate_css_crws(0);
}
static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh,
uint32_t fid)
{
s390_pci_generate_event(2, pec, fh, fid, 0, 0);
}
static void s390_pci_generate_error_event(uint16_t pec, uint32_t fh,
uint32_t fid, uint64_t faddr,
uint32_t e)
{
s390_pci_generate_event(1, pec, fh, fid, faddr, e);
}
static void s390_pci_set_irq(void *opaque, int irq, int level)
{
/* nothing to do */
}
static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num)
{
/* nothing to do */
return 0;
}
static uint64_t s390_pci_get_table_origin(uint64_t iota)
{
return iota & ~ZPCI_IOTA_RTTO_FLAG;
}
static unsigned int calc_rtx(dma_addr_t ptr)
{
return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
}
static unsigned int calc_sx(dma_addr_t ptr)
{
return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
}
static unsigned int calc_px(dma_addr_t ptr)
{
return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
}
static uint64_t get_rt_sto(uint64_t entry)
{
return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
? (entry & ZPCI_RTE_ADDR_MASK)
: 0;
}
static uint64_t get_st_pto(uint64_t entry)
{
return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
? (entry & ZPCI_STE_ADDR_MASK)
: 0;
}
static uint64_t s390_guest_io_table_walk(uint64_t guest_iota,
uint64_t guest_dma_address)
{
uint64_t sto_a, pto_a, px_a;
uint64_t sto, pto, pte;
uint32_t rtx, sx, px;
rtx = calc_rtx(guest_dma_address);
sx = calc_sx(guest_dma_address);
px = calc_px(guest_dma_address);
sto_a = guest_iota + rtx * sizeof(uint64_t);
sto = ldq_phys(&address_space_memory, sto_a);
sto = get_rt_sto(sto);
if (!sto) {
pte = 0;
goto out;
}
pto_a = sto + sx * sizeof(uint64_t);
pto = ldq_phys(&address_space_memory, pto_a);
pto = get_st_pto(pto);
if (!pto) {
pte = 0;
goto out;
}
px_a = pto + px * sizeof(uint64_t);
pte = ldq_phys(&address_space_memory, px_a);
out:
return pte;
}
static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr,
bool is_write)
{
uint64_t pte;
uint32_t flags;
S390PCIBusDevice *pbdev = container_of(iommu, S390PCIBusDevice, mr);
S390pciState *s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pbdev->pdev)
->qbus.parent);
IOMMUTLBEntry ret = {
.target_as = &address_space_memory,
.iova = 0,
.translated_addr = 0,
.addr_mask = ~(hwaddr)0,
.perm = IOMMU_NONE,
};
DPRINTF("iommu trans addr 0x%" PRIx64 "\n", addr);
/* s390 does not have an APIC mapped to main storage so we use
* a separate AddressSpace only for msix notifications
*/
if (addr == ZPCI_MSI_ADDR) {
ret.target_as = &s->msix_notify_as;
ret.iova = addr;
ret.translated_addr = addr;
ret.addr_mask = 0xfff;
ret.perm = IOMMU_RW;
return ret;
}
if (!pbdev->g_iota) {
pbdev->error_state = true;
pbdev->lgstg_blocked = true;
s390_pci_generate_error_event(ERR_EVENT_INVALAS, pbdev->fh, pbdev->fid,
addr, 0);
return ret;
}
if (addr < pbdev->pba || addr > pbdev->pal) {
pbdev->error_state = true;
pbdev->lgstg_blocked = true;
s390_pci_generate_error_event(ERR_EVENT_OORANGE, pbdev->fh, pbdev->fid,
addr, 0);
return ret;
}
pte = s390_guest_io_table_walk(s390_pci_get_table_origin(pbdev->g_iota),
addr);
if (!pte) {
pbdev->error_state = true;
pbdev->lgstg_blocked = true;
s390_pci_generate_error_event(ERR_EVENT_SERR, pbdev->fh, pbdev->fid,
addr, ERR_EVENT_Q_BIT);
return ret;
}
flags = pte & ZPCI_PTE_FLAG_MASK;
ret.iova = addr;
ret.translated_addr = pte & ZPCI_PTE_ADDR_MASK;
ret.addr_mask = 0xfff;
if (flags & ZPCI_PTE_INVALID) {
ret.perm = IOMMU_NONE;
} else {
ret.perm = IOMMU_RW;
}
return ret;
}
static const MemoryRegionIOMMUOps s390_iommu_ops = {
.translate = s390_translate_iommu,
};
static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
{
S390pciState *s = opaque;
return &s->pbdev[PCI_SLOT(devfn)].as;
}
static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set)
{
uint8_t ind_old, ind_new;
hwaddr len = 1;
uint8_t *ind_addr;
ind_addr = cpu_physical_memory_map(ind_loc, &len, 1);
if (!ind_addr) {
s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0);
return -1;
}
do {
ind_old = *ind_addr;
ind_new = ind_old | to_be_set;
} while (atomic_cmpxchg(ind_addr, ind_old, ind_new) != ind_old);
cpu_physical_memory_unmap(ind_addr, len, 1, len);
return ind_old;
}
static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data,
unsigned int size)
{
S390PCIBusDevice *pbdev;
uint32_t io_int_word;
uint32_t fid = data >> ZPCI_MSI_VEC_BITS;
uint32_t vec = data & ZPCI_MSI_VEC_MASK;
uint64_t ind_bit;
uint32_t sum_bit;
uint32_t e = 0;
DPRINTF("write_msix data 0x%" PRIx64 " fid %d vec 0x%x\n", data, fid, vec);
pbdev = s390_pci_find_dev_by_fid(fid);
if (!pbdev) {
e |= (vec << ERR_EVENT_MVN_OFFSET);
s390_pci_generate_error_event(ERR_EVENT_NOMSI, 0, fid, addr, e);
return;
}
ind_bit = pbdev->routes.adapter.ind_offset;
sum_bit = pbdev->routes.adapter.summary_offset;
set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8,
0x80 >> ((ind_bit + vec) % 8));
if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8,
0x80 >> (sum_bit % 8))) {
io_int_word = (pbdev->isc << 27) | IO_INT_WORD_AI;
s390_io_interrupt(0, 0, 0, io_int_word);
}
return;
}
static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size)
{
return 0xffffffff;
}
static const MemoryRegionOps s390_msi_ctrl_ops = {
.write = s390_msi_ctrl_write,
.read = s390_msi_ctrl_read,
.endianness = DEVICE_LITTLE_ENDIAN,
};
static void s390_pcihost_init_as(S390pciState *s)
{
int i;
for (i = 0; i < PCI_SLOT_MAX; i++) {
memory_region_init_iommu(&s->pbdev[i].mr, OBJECT(s),
&s390_iommu_ops, "iommu-s390", UINT64_MAX);
address_space_init(&s->pbdev[i].as, &s->pbdev[i].mr, "iommu-pci");
}
memory_region_init_io(&s->msix_notify_mr, OBJECT(s),
&s390_msi_ctrl_ops, s, "msix-s390", UINT64_MAX);
address_space_init(&s->msix_notify_as, &s->msix_notify_mr, "msix-pci");
}
static int s390_pcihost_init(SysBusDevice *dev)
{
PCIBus *b;
BusState *bus;
PCIHostState *phb = PCI_HOST_BRIDGE(dev);
S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
DPRINTF("host_init\n");
b = pci_register_bus(DEVICE(dev), NULL,
s390_pci_set_irq, s390_pci_map_irq, NULL,
get_system_memory(), get_system_io(), 0, 64,
TYPE_PCI_BUS);
s390_pcihost_init_as(s);
pci_setup_iommu(b, s390_pci_dma_iommu, s);
bus = BUS(b);
qbus_set_hotplug_handler(bus, DEVICE(dev), NULL);
phb->bus = b;
QTAILQ_INIT(&s->pending_sei);
return 0;
}
static int s390_pcihost_setup_msix(S390PCIBusDevice *pbdev)
{
uint8_t pos;
uint16_t ctrl;
uint32_t table, pba;
pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX);
if (!pos) {
pbdev->msix.available = false;
return 0;
}
ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_CAP_FLAGS,
pci_config_size(pbdev->pdev), sizeof(ctrl));
table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE,
pci_config_size(pbdev->pdev), sizeof(table));
pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA,
pci_config_size(pbdev->pdev), sizeof(pba));
pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
pbdev->msix.available = true;
return 0;
}
static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
PCIDevice *pci_dev = PCI_DEVICE(dev);
S390PCIBusDevice *pbdev;
S390pciState *s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pci_dev)
->qbus.parent);
pbdev = &s->pbdev[PCI_SLOT(pci_dev->devfn)];
pbdev->fid = s390_pci_get_pfid(pci_dev);
pbdev->pdev = pci_dev;
pbdev->configured = true;
pbdev->fh = s390_pci_get_pfh(pci_dev);
s390_pcihost_setup_msix(pbdev);
if (dev->hotplugged) {
s390_pci_generate_plug_event(HP_EVENT_RESERVED_TO_STANDBY,
pbdev->fh, pbdev->fid);
s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED,
pbdev->fh, pbdev->fid);
}
return;
}
static void s390_pcihost_hot_unplug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
PCIDevice *pci_dev = PCI_DEVICE(dev);
S390pciState *s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pci_dev)
->qbus.parent);
S390PCIBusDevice *pbdev = &s->pbdev[PCI_SLOT(pci_dev->devfn)];
if (pbdev->configured) {
pbdev->configured = false;
s390_pci_generate_plug_event(HP_EVENT_CONFIGURED_TO_STBRES,
pbdev->fh, pbdev->fid);
}
s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED,
pbdev->fh, pbdev->fid);
pbdev->fh = 0;
pbdev->fid = 0;
pbdev->pdev = NULL;
object_unparent(OBJECT(pci_dev));
}
static void s390_pcihost_class_init(ObjectClass *klass, void *data)
{
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
dc->cannot_instantiate_with_device_add_yet = true;
k->init = s390_pcihost_init;
hc->plug = s390_pcihost_hot_plug;
hc->unplug = s390_pcihost_hot_unplug;
msi_supported = true;
}
static const TypeInfo s390_pcihost_info = {
.name = TYPE_S390_PCI_HOST_BRIDGE,
.parent = TYPE_PCI_HOST_BRIDGE,
.instance_size = sizeof(S390pciState),
.class_init = s390_pcihost_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
};
static void s390_pci_register_types(void)
{
type_register_static(&s390_pcihost_info);
}
type_init(s390_pci_register_types)

251
hw/s390x/s390-pci-bus.h Normal file
View file

@ -0,0 +1,251 @@
/*
* s390 PCI BUS definitions
*
* Copyright 2014 IBM Corp.
* Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
* Hong Bo Li <lihbbj@cn.ibm.com>
* Yi Min Zhao <zyimin@cn.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
* directory.
*/
#ifndef HW_S390_PCI_BUS_H
#define HW_S390_PCI_BUS_H
#include <hw/pci/pci.h>
#include <hw/pci/pci_host.h>
#include "hw/s390x/sclp.h"
#include "hw/s390x/s390_flic.h"
#include "hw/s390x/css.h"
#define TYPE_S390_PCI_HOST_BRIDGE "s390-pcihost"
#define FH_VIRT 0x00ff0000
#define ENABLE_BIT_OFFSET 31
#define S390_PCIPT_ADAPTER 2
#define S390_PCI_HOST_BRIDGE(obj) \
OBJECT_CHECK(S390pciState, (obj), TYPE_S390_PCI_HOST_BRIDGE)
#define HP_EVENT_TO_CONFIGURED 0x0301
#define HP_EVENT_RESERVED_TO_STANDBY 0x0302
#define HP_EVENT_CONFIGURED_TO_STBRES 0x0304
#define HP_EVENT_STANDBY_TO_RESERVED 0x0308
#define ERR_EVENT_INVALAS 0x1
#define ERR_EVENT_OORANGE 0x2
#define ERR_EVENT_INVALTF 0x3
#define ERR_EVENT_TPROTE 0x4
#define ERR_EVENT_APROTE 0x5
#define ERR_EVENT_KEYE 0x6
#define ERR_EVENT_INVALTE 0x7
#define ERR_EVENT_INVALTL 0x8
#define ERR_EVENT_TT 0x9
#define ERR_EVENT_INVALMS 0xa
#define ERR_EVENT_SERR 0xb
#define ERR_EVENT_NOMSI 0x10
#define ERR_EVENT_INVALBV 0x11
#define ERR_EVENT_AIBV 0x12
#define ERR_EVENT_AIRERR 0x13
#define ERR_EVENT_FMBA 0x2a
#define ERR_EVENT_FMBUP 0x2b
#define ERR_EVENT_FMBPRO 0x2c
#define ERR_EVENT_CCONF 0x30
#define ERR_EVENT_SERVAC 0x3a
#define ERR_EVENT_PERMERR 0x3b
#define ERR_EVENT_Q_BIT 0x2
#define ERR_EVENT_MVN_OFFSET 16
#define ZPCI_MSI_VEC_BITS 11
#define ZPCI_MSI_VEC_MASK 0x7ff
#define ZPCI_MSI_ADDR 0xfe00000000000000ULL
#define ZPCI_SDMA_ADDR 0x100000000ULL
#define ZPCI_EDMA_ADDR 0x1ffffffffffffffULL
#define PAGE_SHIFT 12
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_DEFAULT_ACC 0
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
/* I/O Translation Anchor (IOTA) */
enum ZpciIoatDtype {
ZPCI_IOTA_STO = 0,
ZPCI_IOTA_RTTO = 1,
ZPCI_IOTA_RSTO = 2,
ZPCI_IOTA_RFTO = 3,
ZPCI_IOTA_PFAA = 4,
ZPCI_IOTA_IOPFAA = 5,
ZPCI_IOTA_IOPTO = 7
};
#define ZPCI_IOTA_IOT_ENABLED 0x800ULL
#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
#define ZPCI_IOTA_FS_4K 0
#define ZPCI_IOTA_FS_1M 1
#define ZPCI_IOTA_FS_2G 2
#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY |\
ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
/* I/O Region and segment tables */
#define ZPCI_INDEX_MASK 0x7ffULL
#define ZPCI_TABLE_TYPE_MASK 0xc
#define ZPCI_TABLE_TYPE_RFX 0xc
#define ZPCI_TABLE_TYPE_RSX 0x8
#define ZPCI_TABLE_TYPE_RTX 0x4
#define ZPCI_TABLE_TYPE_SX 0x0
#define ZPCI_TABLE_LEN_RFX 0x3
#define ZPCI_TABLE_LEN_RSX 0x3
#define ZPCI_TABLE_LEN_RTX 0x3
#define ZPCI_TABLE_OFFSET_MASK 0xc0
#define ZPCI_TABLE_SIZE 0x4000
#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
#define ZPCI_TABLE_BITS 11
#define ZPCI_PT_BITS 8
#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT)
#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
#define ZPCI_RTE_FLAG_MASK 0x3fffULL
#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
#define ZPCI_STE_FLAG_MASK 0x7ffULL
#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
/* I/O Page tables */
#define ZPCI_PTE_VALID_MASK 0x400
#define ZPCI_PTE_INVALID 0x400
#define ZPCI_PTE_VALID 0x000
#define ZPCI_PT_SIZE 0x800
#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
#define ZPCI_PTE_FLAG_MASK 0xfffULL
#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
/* Shared bits */
#define ZPCI_TABLE_VALID 0x00
#define ZPCI_TABLE_INVALID 0x20
#define ZPCI_TABLE_PROTECTED 0x200
#define ZPCI_TABLE_UNPROTECTED 0x000
#define ZPCI_TABLE_VALID_MASK 0x20
#define ZPCI_TABLE_PROT_MASK 0x200
typedef struct SeiContainer {
QTAILQ_ENTRY(SeiContainer) link;
uint32_t fid;
uint32_t fh;
uint8_t cc;
uint16_t pec;
uint64_t faddr;
uint32_t e;
} SeiContainer;
typedef struct PciCcdfErr {
uint32_t reserved1;
uint32_t fh;
uint32_t fid;
uint32_t e;
uint64_t faddr;
uint32_t reserved3;
uint16_t reserved4;
uint16_t pec;
} QEMU_PACKED PciCcdfErr;
typedef struct PciCcdfAvail {
uint32_t reserved1;
uint32_t fh;
uint32_t fid;
uint32_t reserved2;
uint32_t reserved3;
uint32_t reserved4;
uint32_t reserved5;
uint16_t reserved6;
uint16_t pec;
} QEMU_PACKED PciCcdfAvail;
typedef struct ChscSeiNt2Res {
uint16_t length;
uint16_t code;
uint16_t reserved1;
uint8_t reserved2;
uint8_t nt;
uint8_t flags;
uint8_t reserved3;
uint8_t reserved4;
uint8_t cc;
uint32_t reserved5[13];
uint8_t ccdf[4016];
} QEMU_PACKED ChscSeiNt2Res;
typedef struct PciCfgSccb {
SCCBHeader header;
uint8_t atype;
uint8_t reserved1;
uint16_t reserved2;
uint32_t aid;
} QEMU_PACKED PciCfgSccb;
typedef struct S390MsixInfo {
bool available;
uint8_t table_bar;
uint8_t pba_bar;
uint16_t entries;
uint32_t table_offset;
uint32_t pba_offset;
} S390MsixInfo;
typedef struct S390PCIBusDevice {
PCIDevice *pdev;
bool configured;
bool error_state;
bool lgstg_blocked;
uint32_t fh;
uint32_t fid;
uint64_t g_iota;
uint64_t pba;
uint64_t pal;
uint64_t fmb_addr;
uint8_t isc;
uint16_t noi;
uint8_t sum;
S390MsixInfo msix;
AdapterRoutes routes;
AddressSpace as;
MemoryRegion mr;
} S390PCIBusDevice;
typedef struct S390pciState {
PCIHostState parent_obj;
S390PCIBusDevice pbdev[PCI_SLOT_MAX];
AddressSpace msix_notify_as;
MemoryRegion msix_notify_mr;
QTAILQ_HEAD(, SeiContainer) pending_sei;
} S390pciState;
int chsc_sei_nt2_get_event(void *res);
int chsc_sei_nt2_have_event(void);
void s390_pci_sclp_configure(int configure, SCCB *sccb);
S390PCIBusDevice *s390_pci_find_dev_by_idx(uint32_t idx);
S390PCIBusDevice *s390_pci_find_dev_by_fh(uint32_t fh);
S390PCIBusDevice *s390_pci_find_dev_by_fid(uint32_t fid);
#endif

811
hw/s390x/s390-pci-inst.c Normal file
View file

@ -0,0 +1,811 @@
/*
* s390 PCI instructions
*
* Copyright 2014 IBM Corp.
* Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
* Hong Bo Li <lihbbj@cn.ibm.com>
* Yi Min Zhao <zyimin@cn.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
* directory.
*/
#include "s390-pci-inst.h"
#include "s390-pci-bus.h"
#include <exec/memory-internal.h>
#include <qemu/error-report.h>
/* #define DEBUG_S390PCI_INST */
#ifdef DEBUG_S390PCI_INST
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
static void s390_set_status_code(CPUS390XState *env,
uint8_t r, uint64_t status_code)
{
env->regs[r] &= ~0xff000000ULL;
env->regs[r] |= (status_code & 0xff) << 24;
}
static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
{
S390PCIBusDevice *pbdev;
uint32_t res_code, initial_l2, g_l2, finish;
int rc, idx;
uint64_t resume_token;
rc = 0;
if (lduw_p(&rrb->request.hdr.len) != 32) {
res_code = CLP_RC_LEN;
rc = -EINVAL;
goto out;
}
if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
res_code = CLP_RC_FMT;
rc = -EINVAL;
goto out;
}
if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
ldq_p(&rrb->request.reserved1) != 0 ||
ldq_p(&rrb->request.reserved2) != 0) {
res_code = CLP_RC_RESNOT0;
rc = -EINVAL;
goto out;
}
resume_token = ldq_p(&rrb->request.resume_token);
if (resume_token) {
pbdev = s390_pci_find_dev_by_idx(resume_token);
if (!pbdev) {
res_code = CLP_RC_LISTPCI_BADRT;
rc = -EINVAL;
goto out;
}
}
if (lduw_p(&rrb->response.hdr.len) < 48) {
res_code = CLP_RC_8K;
rc = -EINVAL;
goto out;
}
initial_l2 = lduw_p(&rrb->response.hdr.len);
if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
!= 0) {
res_code = CLP_RC_LEN;
rc = -EINVAL;
*cc = 3;
goto out;
}
stl_p(&rrb->response.fmt, 0);
stq_p(&rrb->response.reserved1, 0);
stq_p(&rrb->response.reserved2, 0);
stl_p(&rrb->response.mdd, FH_VIRT);
stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
rrb->response.entry_size = sizeof(ClpFhListEntry);
finish = 0;
idx = resume_token;
g_l2 = LIST_PCI_HDR_LEN;
do {
pbdev = s390_pci_find_dev_by_idx(idx);
if (!pbdev) {
finish = 1;
break;
}
stw_p(&rrb->response.fh_list[idx - resume_token].device_id,
pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id,
pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
stl_p(&rrb->response.fh_list[idx - resume_token].config, 0x80000000);
stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid);
stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh);
g_l2 += sizeof(ClpFhListEntry);
/* Add endian check for DPRINTF? */
DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
g_l2,
lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id),
lduw_p(&rrb->response.fh_list[idx - resume_token].device_id),
ldl_p(&rrb->response.fh_list[idx - resume_token].fid),
ldl_p(&rrb->response.fh_list[idx - resume_token].fh));
idx++;
} while (g_l2 < initial_l2);
if (finish == 1) {
resume_token = 0;
} else {
resume_token = idx;
}
stq_p(&rrb->response.resume_token, resume_token);
stw_p(&rrb->response.hdr.len, g_l2);
stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
out:
if (rc) {
DPRINTF("list pci failed rc 0x%x\n", rc);
stw_p(&rrb->response.hdr.rsp, res_code);
}
return rc;
}
int clp_service_call(S390CPU *cpu, uint8_t r2)
{
ClpReqHdr *reqh;
ClpRspHdr *resh;
S390PCIBusDevice *pbdev;
uint32_t req_len;
uint32_t res_len;
uint8_t buffer[4096 * 2];
uint8_t cc = 0;
CPUS390XState *env = &cpu->env;
int i;
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 4);
return 0;
}
cpu_physical_memory_read(env->regs[r2], buffer, sizeof(*reqh));
reqh = (ClpReqHdr *)buffer;
req_len = lduw_p(&reqh->len);
if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
cpu_physical_memory_read(env->regs[r2], buffer, req_len + sizeof(*resh));
resh = (ClpRspHdr *)(buffer + req_len);
res_len = lduw_p(&resh->len);
if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
if ((req_len + res_len) > 8192) {
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
cpu_physical_memory_read(env->regs[r2], buffer, req_len + res_len);
if (req_len != 32) {
stw_p(&resh->rsp, CLP_RC_LEN);
goto out;
}
switch (lduw_p(&reqh->cmd)) {
case CLP_LIST_PCI: {
ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
list_pci(rrb, &cc);
break;
}
case CLP_SET_PCI_FN: {
ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh));
if (!pbdev) {
stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
goto out;
}
switch (reqsetpci->oc) {
case CLP_SET_ENABLE_PCI_FN:
pbdev->fh = pbdev->fh | 1 << ENABLE_BIT_OFFSET;
stl_p(&ressetpci->fh, pbdev->fh);
stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
break;
case CLP_SET_DISABLE_PCI_FN:
pbdev->fh = pbdev->fh & ~(1 << ENABLE_BIT_OFFSET);
pbdev->error_state = false;
pbdev->lgstg_blocked = false;
stl_p(&ressetpci->fh, pbdev->fh);
stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
break;
default:
DPRINTF("unknown set pci command\n");
stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
break;
}
break;
}
case CLP_QUERY_PCI_FN: {
ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh));
if (!pbdev) {
DPRINTF("query pci no pci dev\n");
stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
goto out;
}
for (i = 0; i < PCI_BAR_COUNT; i++) {
uint32_t data = pci_get_long(pbdev->pdev->config +
PCI_BASE_ADDRESS_0 + (i * 4));
stl_p(&resquery->bar[i], data);
resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
ctz64(pbdev->pdev->io_regions[i].size) : 0;
DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
ldl_p(&resquery->bar[i]),
pbdev->pdev->io_regions[i].size,
resquery->bar_size[i]);
}
stq_p(&resquery->sdma, ZPCI_SDMA_ADDR);
stq_p(&resquery->edma, ZPCI_EDMA_ADDR);
stw_p(&resquery->pchid, 0);
stw_p(&resquery->ug, 1);
stl_p(&resquery->uid, pbdev->fid);
stw_p(&resquery->hdr.rsp, CLP_RC_OK);
break;
}
case CLP_QUERY_PCI_FNGRP: {
ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
resgrp->fr = 1;
stq_p(&resgrp->dasm, 0);
stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
stw_p(&resgrp->mui, 0);
stw_p(&resgrp->i, 128);
resgrp->version = 0;
stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
break;
}
default:
DPRINTF("unknown clp command\n");
stw_p(&resh->rsp, CLP_RC_CMD);
break;
}
out:
cpu_physical_memory_write(env->regs[r2], buffer, req_len + res_len);
setcc(cpu, cc);
return 0;
}
int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
uint64_t offset;
uint64_t data;
uint8_t len;
uint32_t fh;
uint8_t pcias;
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 4);
return 0;
}
if (r2 & 0x1) {
program_interrupt(env, PGM_SPECIFICATION, 4);
return 0;
}
fh = env->regs[r2] >> 32;
pcias = (env->regs[r2] >> 16) & 0xf;
len = env->regs[r2] & 0xf;
offset = env->regs[r2 + 1];
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
DPRINTF("pcilg no pci dev\n");
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
if (pbdev->lgstg_blocked) {
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
return 0;
}
if (pcias < 6) {
if ((8 - (offset & 0x7)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
io_mem_read(mr, offset, &data, len);
} else if (pcias == 15) {
if ((4 - (offset & 0x3)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
data = pci_host_config_read_common(
pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
switch (len) {
case 1:
break;
case 2:
data = bswap16(data);
break;
case 4:
data = bswap32(data);
break;
case 8:
data = bswap64(data);
break;
default:
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
} else {
DPRINTF("invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
return 0;
}
env->regs[r1] = data;
setcc(cpu, ZPCI_PCI_LS_OK);
return 0;
}
static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset,
uint64_t *data, uint8_t len)
{
uint32_t val;
uint8_t *msg_data;
if (offset % PCI_MSIX_ENTRY_SIZE != 8) {
return;
}
if (len != 4) {
DPRINTF("access msix table msg data but len is %d\n", len);
return;
}
msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS);
pci_set_long(msg_data, val);
DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data);
}
static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
{
if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
offset >= pbdev->msix.table_offset &&
offset <= pbdev->msix.table_offset +
(pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) {
return 1;
} else {
return 0;
}
}
int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
{
CPUS390XState *env = &cpu->env;
uint64_t offset, data;
S390PCIBusDevice *pbdev;
uint8_t len;
uint32_t fh;
uint8_t pcias;
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 4);
return 0;
}
if (r2 & 0x1) {
program_interrupt(env, PGM_SPECIFICATION, 4);
return 0;
}
fh = env->regs[r2] >> 32;
pcias = (env->regs[r2] >> 16) & 0xf;
len = env->regs[r2] & 0xf;
offset = env->regs[r2 + 1];
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
DPRINTF("pcistg no pci dev\n");
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
if (pbdev->lgstg_blocked) {
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
return 0;
}
data = env->regs[r1];
if (pcias < 6) {
if ((8 - (offset & 0x7)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
MemoryRegion *mr;
if (trap_msix(pbdev, offset, pcias)) {
offset = offset - pbdev->msix.table_offset;
mr = &pbdev->pdev->msix_table_mmio;
update_msix_table_msg_data(pbdev, offset, &data, len);
} else {
mr = pbdev->pdev->io_regions[pcias].memory;
}
io_mem_write(mr, offset, data, len);
} else if (pcias == 15) {
if ((4 - (offset & 0x3)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
switch (len) {
case 1:
break;
case 2:
data = bswap16(data);
break;
case 4:
data = bswap32(data);
break;
case 8:
data = bswap64(data);
break;
default:
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
pci_host_config_write_common(pbdev->pdev, offset,
pci_config_size(pbdev->pdev),
data, len);
} else {
DPRINTF("pcistg invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
return 0;
}
setcc(cpu, ZPCI_PCI_LS_OK);
return 0;
}
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
{
CPUS390XState *env = &cpu->env;
uint32_t fh;
S390PCIBusDevice *pbdev;
ram_addr_t size;
IOMMUTLBEntry entry;
MemoryRegion *mr;
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 4);
goto out;
}
if (r2 & 0x1) {
program_interrupt(env, PGM_SPECIFICATION, 4);
goto out;
}
fh = env->regs[r1] >> 32;
size = env->regs[r2 + 1];
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
DPRINTF("rpcit no pci dev\n");
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
goto out;
}
mr = pci_device_iommu_address_space(pbdev->pdev)->root;
entry = mr->iommu_ops->translate(mr, env->regs[r2], 0);
if (!entry.translated_addr) {
setcc(cpu, ZPCI_PCI_LS_ERR);
goto out;
}
entry.addr_mask = size - 1;
memory_region_notify_iommu(mr, entry);
setcc(cpu, ZPCI_PCI_LS_OK);
out:
return 0;
}
int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
MemoryRegion *mr;
int i;
uint64_t val;
uint32_t fh;
uint8_t pcias;
uint8_t len;
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
return 0;
}
fh = env->regs[r1] >> 32;
pcias = (env->regs[r1] >> 16) & 0xf;
len = env->regs[r1] & 0xff;
if (pcias > 5) {
DPRINTF("pcistb invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
return 0;
}
switch (len) {
case 16:
case 32:
case 64:
case 128:
break;
default:
program_interrupt(env, PGM_SPECIFICATION, 6);
return 0;
}
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
if (pbdev->lgstg_blocked) {
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
return 0;
}
mr = pbdev->pdev->io_regions[pcias].memory;
if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
program_interrupt(env, PGM_ADDRESSING, 6);
return 0;
}
for (i = 0; i < len / 8; i++) {
val = ldq_phys(&address_space_memory, gaddr + i * 8);
io_mem_write(mr, env->regs[r3] + i * 8, val, 8);
}
setcc(cpu, ZPCI_PCI_LS_OK);
return 0;
}
static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
{
int ret;
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
ret = css_register_io_adapter(S390_PCIPT_ADAPTER,
FIB_DATA_ISC(ldl_p(&fib.data)), true, false,
&pbdev->routes.adapter.adapter_id);
assert(ret == 0);
fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
ldq_p(&fib.aisb), true);
fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
ldq_p(&fib.aibv), true);
pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data));
pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
return 0;
}
static int dereg_irqs(S390PCIBusDevice *pbdev)
{
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
pbdev->routes.adapter.ind_addr, false);
pbdev->routes.adapter.summary_addr = 0;
pbdev->routes.adapter.summary_offset = 0;
pbdev->routes.adapter.ind_addr = 0;
pbdev->routes.adapter.ind_offset = 0;
pbdev->isc = 0;
pbdev->noi = 0;
pbdev->sum = 0;
DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
return 0;
}
static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
{
uint64_t pba = ldq_p(&fib.pba);
uint64_t pal = ldq_p(&fib.pal);
uint64_t g_iota = ldq_p(&fib.iota);
uint8_t dt = (g_iota >> 2) & 0x7;
uint8_t t = (g_iota >> 11) & 0x1;
if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
program_interrupt(env, PGM_OPERAND, 6);
return -EINVAL;
}
/* currently we only support designation type 1 with translation */
if (!(dt == ZPCI_IOTA_RTTO && t)) {
error_report("unsupported ioat dt %d t %d", dt, t);
program_interrupt(env, PGM_OPERAND, 6);
return -EINVAL;
}
pbdev->pba = pba;
pbdev->pal = pal;
pbdev->g_iota = g_iota;
return 0;
}
static void dereg_ioat(S390PCIBusDevice *pbdev)
{
pbdev->pba = 0;
pbdev->pal = 0;
pbdev->g_iota = 0;
}
int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
{
CPUS390XState *env = &cpu->env;
uint8_t oc;
uint32_t fh;
ZpciFib fib;
S390PCIBusDevice *pbdev;
uint64_t cc = ZPCI_PCI_LS_OK;
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
return 0;
}
oc = env->regs[r1] & 0xff;
fh = env->regs[r1] >> 32;
if (fiba & 0x7) {
program_interrupt(env, PGM_SPECIFICATION, 6);
return 0;
}
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
cpu_physical_memory_read(fiba, (uint8_t *)&fib, sizeof(fib));
switch (oc) {
case ZPCI_MOD_FC_REG_INT:
if (reg_irqs(env, pbdev, fib)) {
cc = ZPCI_PCI_LS_ERR;
}
break;
case ZPCI_MOD_FC_DEREG_INT:
dereg_irqs(pbdev);
break;
case ZPCI_MOD_FC_REG_IOAT:
if (reg_ioat(env, pbdev, fib)) {
cc = ZPCI_PCI_LS_ERR;
}
break;
case ZPCI_MOD_FC_DEREG_IOAT:
dereg_ioat(pbdev);
break;
case ZPCI_MOD_FC_REREG_IOAT:
dereg_ioat(pbdev);
if (reg_ioat(env, pbdev, fib)) {
cc = ZPCI_PCI_LS_ERR;
}
break;
case ZPCI_MOD_FC_RESET_ERROR:
pbdev->error_state = false;
pbdev->lgstg_blocked = false;
break;
case ZPCI_MOD_FC_RESET_BLOCK:
pbdev->lgstg_blocked = false;
break;
case ZPCI_MOD_FC_SET_MEASURE:
pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
break;
default:
program_interrupt(&cpu->env, PGM_OPERAND, 6);
cc = ZPCI_PCI_LS_ERR;
}
setcc(cpu, cc);
return 0;
}
int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
{
CPUS390XState *env = &cpu->env;
uint32_t fh;
ZpciFib fib;
S390PCIBusDevice *pbdev;
uint32_t data;
uint64_t cc = ZPCI_PCI_LS_OK;
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
return 0;
}
fh = env->regs[r1] >> 32;
if (fiba & 0x7) {
program_interrupt(env, PGM_SPECIFICATION, 6);
return 0;
}
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
memset(&fib, 0, sizeof(fib));
stq_p(&fib.pba, pbdev->pba);
stq_p(&fib.pal, pbdev->pal);
stq_p(&fib.iota, pbdev->g_iota);
stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
stq_p(&fib.fmb_addr, pbdev->fmb_addr);
data = (pbdev->isc << 28) | (pbdev->noi << 16) |
(pbdev->routes.adapter.ind_offset << 8) | (pbdev->sum << 7) |
pbdev->routes.adapter.summary_offset;
stw_p(&fib.data, data);
if (pbdev->fh >> ENABLE_BIT_OFFSET) {
fib.fc |= 0x80;
}
if (pbdev->error_state) {
fib.fc |= 0x40;
}
if (pbdev->lgstg_blocked) {
fib.fc |= 0x20;
}
if (pbdev->g_iota) {
fib.fc |= 0x10;
}
cpu_physical_memory_write(fiba, (uint8_t *)&fib, sizeof(fib));
setcc(cpu, cc);
return 0;
}

288
hw/s390x/s390-pci-inst.h Normal file
View file

@ -0,0 +1,288 @@
/*
* s390 PCI instruction definitions
*
* Copyright 2014 IBM Corp.
* Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
* Hong Bo Li <lihbbj@cn.ibm.com>
* Yi Min Zhao <zyimin@cn.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
* directory.
*/
#ifndef HW_S390_PCI_INST_H
#define HW_S390_PCI_INST_H
#include <sysemu/dma.h>
/* CLP common request & response block size */
#define CLP_BLK_SIZE 4096
#define PCI_BAR_COUNT 6
#define PCI_MAX_FUNCTIONS 4096
typedef struct ClpReqHdr {
uint16_t len;
uint16_t cmd;
} QEMU_PACKED ClpReqHdr;
typedef struct ClpRspHdr {
uint16_t len;
uint16_t rsp;
} QEMU_PACKED ClpRspHdr;
/* CLP Response Codes */
#define CLP_RC_OK 0x0010 /* Command request successfully */
#define CLP_RC_CMD 0x0020 /* Command code not recognized */
#define CLP_RC_PERM 0x0030 /* Command not authorized */
#define CLP_RC_FMT 0x0040 /* Invalid command request format */
#define CLP_RC_LEN 0x0050 /* Invalid command request length */
#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
#define CLP_RC_NODATA 0x0080 /* No data available */
#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
/*
* Call Logical Processor - Command Codes
*/
#define CLP_LIST_PCI 0x0002
#define CLP_QUERY_PCI_FN 0x0003
#define CLP_QUERY_PCI_FNGRP 0x0004
#define CLP_SET_PCI_FN 0x0005
/* PCI function handle list entry */
typedef struct ClpFhListEntry {
uint16_t device_id;
uint16_t vendor_id;
#define CLP_FHLIST_MASK_CONFIG 0x80000000
uint32_t config;
uint32_t fid;
uint32_t fh;
} QEMU_PACKED ClpFhListEntry;
#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
/* request or response block header length */
#define LIST_PCI_HDR_LEN 32
/* Number of function handles fitting in response block */
#define CLP_FH_LIST_NR_ENTRIES \
((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
/ sizeof(ClpFhListEntry))
#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
#define CLP_UTIL_STR_LEN 64
#define CLP_MASK_FMT 0xf0000000
/* List PCI functions request */
typedef struct ClpReqListPci {
ClpReqHdr hdr;
uint32_t fmt;
uint64_t reserved1;
uint64_t resume_token;
uint64_t reserved2;
} QEMU_PACKED ClpReqListPci;
/* List PCI functions response */
typedef struct ClpRspListPci {
ClpRspHdr hdr;
uint32_t fmt;
uint64_t reserved1;
uint64_t resume_token;
uint32_t mdd;
uint16_t max_fn;
uint8_t reserved2;
uint8_t entry_size;
ClpFhListEntry fh_list[CLP_FH_LIST_NR_ENTRIES];
} QEMU_PACKED ClpRspListPci;
/* Query PCI function request */
typedef struct ClpReqQueryPci {
ClpReqHdr hdr;
uint32_t fmt;
uint64_t reserved1;
uint32_t fh; /* function handle */
uint32_t reserved2;
uint64_t reserved3;
} QEMU_PACKED ClpReqQueryPci;
/* Query PCI function response */
typedef struct ClpRspQueryPci {
ClpRspHdr hdr;
uint32_t fmt;
uint64_t reserved1;
uint16_t vfn; /* virtual fn number */
#define CLP_RSP_QPCI_MASK_UTIL 0x100
#define CLP_RSP_QPCI_MASK_PFGID 0xff
uint16_t ug;
uint32_t fid; /* pci function id */
uint8_t bar_size[PCI_BAR_COUNT];
uint16_t pchid;
uint32_t bar[PCI_BAR_COUNT];
uint64_t reserved2;
uint64_t sdma; /* start dma as */
uint64_t edma; /* end dma as */
uint32_t reserved3[11];
uint32_t uid;
uint8_t util_str[CLP_UTIL_STR_LEN]; /* utility string */
} QEMU_PACKED ClpRspQueryPci;
/* Query PCI function group request */
typedef struct ClpReqQueryPciGrp {
ClpReqHdr hdr;
uint32_t fmt;
uint64_t reserved1;
#define CLP_REQ_QPCIG_MASK_PFGID 0xff
uint32_t g;
uint32_t reserved2;
uint64_t reserved3;
} QEMU_PACKED ClpReqQueryPciGrp;
/* Query PCI function group response */
typedef struct ClpRspQueryPciGrp {
ClpRspHdr hdr;
uint32_t fmt;
uint64_t reserved1;
#define CLP_RSP_QPCIG_MASK_NOI 0xfff
uint16_t i;
uint8_t version;
#define CLP_RSP_QPCIG_MASK_FRAME 0x2
#define CLP_RSP_QPCIG_MASK_REFRESH 0x1
uint8_t fr;
uint16_t reserved2;
uint16_t mui;
uint64_t reserved3;
uint64_t dasm; /* dma address space mask */
uint64_t msia; /* MSI address */
uint64_t reserved4;
uint64_t reserved5;
} QEMU_PACKED ClpRspQueryPciGrp;
/* Set PCI function request */
typedef struct ClpReqSetPci {
ClpReqHdr hdr;
uint32_t fmt;
uint64_t reserved1;
uint32_t fh; /* function handle */
uint16_t reserved2;
uint8_t oc; /* operation controls */
uint8_t ndas; /* number of dma spaces */
uint64_t reserved3;
} QEMU_PACKED ClpReqSetPci;
/* Set PCI function response */
typedef struct ClpRspSetPci {
ClpRspHdr hdr;
uint32_t fmt;
uint64_t reserved1;
uint32_t fh; /* function handle */
uint32_t reserved3;
uint64_t reserved4;
} QEMU_PACKED ClpRspSetPci;
typedef struct ClpReqRspListPci {
ClpReqListPci request;
ClpRspListPci response;
} QEMU_PACKED ClpReqRspListPci;
typedef struct ClpReqRspSetPci {
ClpReqSetPci request;
ClpRspSetPci response;
} QEMU_PACKED ClpReqRspSetPci;
typedef struct ClpReqRspQueryPci {
ClpReqQueryPci request;
ClpRspQueryPci response;
} QEMU_PACKED ClpReqRspQueryPci;
typedef struct ClpReqRspQueryPciGrp {
ClpReqQueryPciGrp request;
ClpRspQueryPciGrp response;
} QEMU_PACKED ClpReqRspQueryPciGrp;
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
#define ZPCI_PCI_ST_BLOCKED 12
#define ZPCI_PCI_ST_INSUF_RES 16
#define ZPCI_PCI_ST_INVAL_AS 20
#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
/* Load/Store return codes */
#define ZPCI_PCI_LS_OK 0
#define ZPCI_PCI_LS_ERR 1
#define ZPCI_PCI_LS_BUSY 2
#define ZPCI_PCI_LS_INVAL_HANDLE 3
/* Modify PCI Function Controls */
#define ZPCI_MOD_FC_REG_INT 2
#define ZPCI_MOD_FC_DEREG_INT 3
#define ZPCI_MOD_FC_REG_IOAT 4
#define ZPCI_MOD_FC_DEREG_IOAT 5
#define ZPCI_MOD_FC_REREG_IOAT 6
#define ZPCI_MOD_FC_RESET_ERROR 7
#define ZPCI_MOD_FC_RESET_BLOCK 9
#define ZPCI_MOD_FC_SET_MEASURE 10
/* FIB function controls */
#define ZPCI_FIB_FC_ENABLED 0x80
#define ZPCI_FIB_FC_ERROR 0x40
#define ZPCI_FIB_FC_LS_BLOCKED 0x20
#define ZPCI_FIB_FC_DMAAS_REG 0x10
/* FIB function controls */
#define ZPCI_FIB_FC_ENABLED 0x80
#define ZPCI_FIB_FC_ERROR 0x40
#define ZPCI_FIB_FC_LS_BLOCKED 0x20
#define ZPCI_FIB_FC_DMAAS_REG 0x10
/* Function Information Block */
typedef struct ZpciFib {
uint8_t fmt; /* format */
uint8_t reserved1[7];
uint8_t fc; /* function controls */
uint8_t reserved2;
uint16_t reserved3;
uint32_t reserved4;
uint64_t pba; /* PCI base address */
uint64_t pal; /* PCI address limit */
uint64_t iota; /* I/O Translation Anchor */
#define FIB_DATA_ISC(x) (((x) >> 28) & 0x7)
#define FIB_DATA_NOI(x) (((x) >> 16) & 0xfff)
#define FIB_DATA_AIBVO(x) (((x) >> 8) & 0x3f)
#define FIB_DATA_SUM(x) (((x) >> 7) & 0x1)
#define FIB_DATA_AISBO(x) ((x) & 0x3f)
uint32_t data;
uint32_t reserved5;
uint64_t aibv; /* Adapter int bit vector address */
uint64_t aisb; /* Adapter int summary bit address */
uint64_t fmb_addr; /* Function measurement address and key */
uint32_t reserved6;
uint32_t gd;
} QEMU_PACKED ZpciFib;
int clp_service_call(S390CPU *cpu, uint8_t r2);
int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr);
int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba);
int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba);
#endif

View file

@ -18,6 +18,7 @@
#include "css.h"
#include "virtio-ccw.h"
#include "qemu/config-file.h"
#include "s390-pci-bus.h"
#define TYPE_S390_CCW_MACHINE "s390-ccw-machine"
@ -91,6 +92,7 @@ static void ccw_init(MachineState *machine)
uint8_t *storage_keys;
int ret;
VirtualCssBus *css_bus;
DeviceState *dev;
QemuOpts *opts = qemu_opts_find(qemu_find_opts("memory"), NULL);
ram_addr_t pad_size = 0;
ram_addr_t maxmem = qemu_opt_get_size(opts, "maxmem", my_ram_size);
@ -127,6 +129,11 @@ static void ccw_init(MachineState *machine)
machine->initrd_filename, "s390-ccw.img");
s390_flic_init();
dev = qdev_create(NULL, TYPE_S390_PCI_HOST_BRIDGE);
object_property_add_child(qdev_get_machine(), TYPE_S390_PCI_HOST_BRIDGE,
OBJECT(dev), NULL);
qdev_init_nofail(dev);
/* register hypercalls */
virtio_ccw_register_hcalls();
@ -181,7 +188,7 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
mc->no_serial = 1;
mc->no_parallel = 1;
mc->no_sdcard = 1;
mc->use_sclp = 1,
mc->use_sclp = 1;
mc->max_cpus = 255;
nc->nmi_monitor_handler = s390_nmi;
}

View file

@ -20,6 +20,7 @@
#include "qemu/config-file.h"
#include "hw/s390x/sclp.h"
#include "hw/s390x/event-facility.h"
#include "hw/s390x/s390-pci-bus.h"
static inline SCLPEventFacility *get_event_facility(void)
{
@ -62,7 +63,8 @@ static void read_SCP_info(SCCB *sccb)
read_info->entries[i].type = 0;
}
read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO);
read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO |
SCLP_HAS_PCI_RECONFIG);
/*
* The storage increment size is a multiple of 1M and is a power of 2.
@ -350,6 +352,12 @@ static void sclp_execute(SCCB *sccb, uint32_t code)
case SCLP_UNASSIGN_STORAGE:
unassign_storage(sccb);
break;
case SCLP_CMDW_CONFIGURE_PCI:
s390_pci_sclp_configure(1, sccb);
break;
case SCLP_CMDW_DECONFIGURE_PCI:
s390_pci_sclp_configure(0, sccb);
break;
default:
efc->command_handler(ef, sccb, code);
break;

View file

@ -43,14 +43,22 @@
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
/* SCLP PCI codes */
#define SCLP_HAS_PCI_RECONFIG 0x0000000040000000ULL
#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
#define SCLP_RECONFIG_PCI_ATPYE 2
/* SCLP response codes */
#define SCLP_RC_NORMAL_READ_COMPLETION 0x0010
#define SCLP_RC_NORMAL_COMPLETION 0x0020
#define SCLP_RC_SCCB_BOUNDARY_VIOLATION 0x0100
#define SCLP_RC_NO_ACTION_REQUIRED 0x0120
#define SCLP_RC_INVALID_SCLP_COMMAND 0x01f0
#define SCLP_RC_CONTAINED_EQUIPMENT_CHECK 0x0340
#define SCLP_RC_INSUFFICIENT_SCCB_LENGTH 0x0300
#define SCLP_RC_STANDBY_READ_COMPLETION 0x0410
#define SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED 0x09f0
#define SCLP_RC_INVALID_FUNCTION 0x40f0
#define SCLP_RC_NO_EVENT_BUFFERS_STORED 0x60f0
#define SCLP_RC_INVALID_SELECTION_MASK 0x70f0

View file

@ -158,6 +158,7 @@ extern bool kvm_readonly_mem_allowed;
struct kvm_run;
struct kvm_lapic_state;
struct kvm_irq_routing_entry;
typedef struct KVMCapabilityInfo {
const char *name;
@ -270,6 +271,9 @@ int kvm_arch_on_sigbus(int code, void *addr);
void kvm_arch_init_irq_routing(KVMState *s);
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint64_t address, uint32_t data);
int kvm_set_irq(KVMState *s, int irq, int level);
int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);

View file

@ -1225,6 +1225,10 @@ int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
kroute.u.msi.address_lo = (uint32_t)msg.address;
kroute.u.msi.address_hi = msg.address >> 32;
kroute.u.msi.data = le32_to_cpu(msg.data);
if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data)) {
kvm_irqchip_release_virq(s, virq);
return -EINVAL;
}
kvm_add_routing_entry(s, &kroute);
kvm_irqchip_commit_routes(s);
@ -1250,6 +1254,9 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
kroute.u.msi.address_lo = (uint32_t)msg.address;
kroute.u.msi.address_hi = msg.address >> 32;
kroute.u.msi.data = le32_to_cpu(msg.data);
if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data)) {
return -EINVAL;
}
return kvm_update_routing_entry(s, &kroute);
}

View file

@ -548,3 +548,9 @@ int kvm_arch_irqchip_create(KVMState *s)
return 0;
}
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint64_t address, uint32_t data)
{
return 0;
}

View file

@ -2733,3 +2733,9 @@ int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
KVM_DEV_IRQ_HOST_MSIX);
}
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint64_t address, uint32_t data)
{
return 0;
}

View file

@ -688,3 +688,9 @@ int kvm_arch_get_registers(CPUState *cs)
return ret;
}
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint64_t address, uint32_t data)
{
return 0;
}

View file

@ -2402,3 +2402,9 @@ out_close:
error_out:
return;
}
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint64_t address, uint32_t data)
{
return 0;
}

View file

@ -457,87 +457,6 @@ int css_enable_mss(void);
int css_do_rsch(SubchDev *sch);
int css_do_rchp(uint8_t cssid, uint8_t chpid);
bool css_present(uint8_t cssid);
#else
static inline SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
uint16_t schid)
{
return NULL;
}
static inline bool css_subch_visible(SubchDev *sch)
{
return false;
}
static inline void css_conditional_io_interrupt(SubchDev *sch)
{
}
static inline int css_do_stsch(SubchDev *sch, SCHIB *schib)
{
return -ENODEV;
}
static inline bool css_schid_final(uint8_t cssid, uint8_t ssid, uint16_t schid)
{
return true;
}
static inline int css_do_msch(SubchDev *sch, SCHIB *schib)
{
return -ENODEV;
}
static inline int css_do_xsch(SubchDev *sch)
{
return -ENODEV;
}
static inline int css_do_csch(SubchDev *sch)
{
return -ENODEV;
}
static inline int css_do_hsch(SubchDev *sch)
{
return -ENODEV;
}
static inline int css_do_ssch(SubchDev *sch, ORB *orb)
{
return -ENODEV;
}
static inline int css_do_tsch(SubchDev *sch, IRB *irb)
{
return -ENODEV;
}
static inline int css_do_stcrw(CRW *crw)
{
return 1;
}
static inline int css_do_tpi(IOIntCode *int_code, int lowcore)
{
return 0;
}
static inline int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid,
int rfmt, uint8_t l_chpid, void *buf)
{
return 0;
}
static inline void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
{
}
static inline int css_enable_mss(void)
{
return -EINVAL;
}
static inline int css_enable_mcsse(void)
{
return -EINVAL;
}
static inline int css_do_rsch(SubchDev *sch)
{
return -ENODEV;
}
static inline int css_do_rchp(uint8_t cssid, uint8_t chpid)
{
return -ENODEV;
}
static inline bool css_present(uint8_t cssid)
{
return false;
}
#endif
#define cpu_init(model) (&cpu_s390x_init(model)->env)

View file

@ -14,6 +14,7 @@
#include "cpu.h"
#include "ioinst.h"
#include "trace.h"
#include "hw/s390x/s390-pci-bus.h"
int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
int *schid)
@ -398,6 +399,7 @@ typedef struct ChscResp {
#define CHSC_SCPD 0x0002
#define CHSC_SCSC 0x0010
#define CHSC_SDA 0x0031
#define CHSC_SEI 0x000e
#define CHSC_SCPD_0_M 0x20000000
#define CHSC_SCPD_0_C 0x10000000
@ -566,6 +568,53 @@ out:
res->param = 0;
}
static int chsc_sei_nt0_get_event(void *res)
{
/* no events yet */
return 1;
}
static int chsc_sei_nt0_have_event(void)
{
/* no events yet */
return 0;
}
#define CHSC_SEI_NT0 (1ULL << 63)
#define CHSC_SEI_NT2 (1ULL << 61)
static void ioinst_handle_chsc_sei(ChscReq *req, ChscResp *res)
{
uint64_t selection_mask = ldq_p(&req->param1);
uint8_t *res_flags = (uint8_t *)res->data;
int have_event = 0;
int have_more = 0;
/* regarding architecture nt0 can not be masked */
have_event = !chsc_sei_nt0_get_event(res);
have_more = chsc_sei_nt0_have_event();
if (selection_mask & CHSC_SEI_NT2) {
if (!have_event) {
have_event = !chsc_sei_nt2_get_event(res);
}
if (!have_more) {
have_more = chsc_sei_nt2_have_event();
}
}
if (have_event) {
res->code = cpu_to_be16(0x0001);
if (have_more) {
(*res_flags) |= 0x80;
} else {
(*res_flags) &= ~0x80;
}
} else {
res->code = cpu_to_be16(0x0004);
}
}
static void ioinst_handle_chsc_unimplemented(ChscResp *res)
{
res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
@ -617,6 +666,9 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
case CHSC_SDA:
ioinst_handle_chsc_sda(req, res);
break;
case CHSC_SEI:
ioinst_handle_chsc_sei(req, res);
break;
default:
ioinst_handle_chsc_unimplemented(res);
break;

View file

@ -204,6 +204,7 @@ typedef struct CRW {
#define CRW_RSC_SUBCH 0x3
#define CRW_RSC_CHP 0x4
#define CRW_RSC_CSS 0xb
/* I/O interruption code */
typedef struct IOIntCode {

View file

@ -40,6 +40,8 @@
#include "exec/gdbstub.h"
#include "trace.h"
#include "qapi-event.h"
#include "hw/s390x/s390-pci-inst.h"
#include "hw/s390x/s390-pci-bus.h"
/* #define DEBUG_KVM */
@ -56,6 +58,7 @@
#define IPA0_B2 0xb200
#define IPA0_B9 0xb900
#define IPA0_EB 0xeb00
#define IPA0_E3 0xe300
#define PRIV_B2_SCLP_CALL 0x20
#define PRIV_B2_CSCH 0x30
@ -76,8 +79,17 @@
#define PRIV_B2_XSCH 0x76
#define PRIV_EB_SQBS 0x8a
#define PRIV_EB_PCISTB 0xd0
#define PRIV_EB_SIC 0xd1
#define PRIV_B9_EQBS 0x9c
#define PRIV_B9_CLP 0xa0
#define PRIV_B9_PCISTG 0xd0
#define PRIV_B9_PCILG 0xd2
#define PRIV_B9_RPCIT 0xd3
#define PRIV_E3_MPCIFC 0xd0
#define PRIV_E3_STPCIFC 0xd4
#define DIAG_IPL 0x308
#define DIAG_KVM_HYPERCALL 0x500
@ -202,6 +214,11 @@ void kvm_s390_reset_vcpu(S390CPU *cpu)
}
}
static int can_sync_regs(CPUState *cs, int regs)
{
return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
}
int kvm_arch_put_registers(CPUState *cs, int level)
{
S390CPU *cpu = S390_CPU(cs);
@ -216,7 +233,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
cs->kvm_run->psw_addr = env->psw.addr;
cs->kvm_run->psw_mask = env->psw.mask;
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
for (i = 0; i < 16; i++) {
cs->kvm_run->s.regs.gprs[i] = env->regs[i];
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
@ -247,18 +264,33 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return 0;
}
/*
* These ONE_REGS are not protected by a capability. As they are only
* necessary for migration we just trace a possible error, but don't
* return with an error return code.
*/
kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
cs->kvm_run->s.regs.cputm = env->cputm;
cs->kvm_run->s.regs.ckc = env->ckc;
cs->kvm_run->s.regs.todpr = env->todpr;
cs->kvm_run->s.regs.gbea = env->gbea;
cs->kvm_run->s.regs.pp = env->pp;
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
} else {
/*
* These ONE_REGS are not protected by a capability. As they are only
* necessary for migration we just trace a possible error, but don't
* return with an error return code.
*/
kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
}
if (cap_async_pf) {
/* pfault parameters */
if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
cs->kvm_run->s.regs.pft = env->pfault_token;
cs->kvm_run->s.regs.pfs = env->pfault_select;
cs->kvm_run->s.regs.pfc = env->pfault_compare;
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
} else if (cap_async_pf) {
r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
if (r < 0) {
return r;
@ -273,9 +305,8 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
}
if (cap_sync_regs &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
/* access registers and control registers*/
if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
for (i = 0; i < 16; i++) {
cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
cs->kvm_run->s.regs.crs[i] = env->cregs[i];
@ -294,7 +325,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
/* Finally the prefix */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
cs->kvm_run->s.regs.prefix = env->psa;
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
} else {
@ -317,7 +348,7 @@ int kvm_arch_get_registers(CPUState *cs)
env->psw.mask = cs->kvm_run->psw_mask;
/* the GPRS */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
for (i = 0; i < 16; i++) {
env->regs[i] = cs->kvm_run->s.regs.gprs[i];
}
@ -332,9 +363,7 @@ int kvm_arch_get_registers(CPUState *cs)
}
/* The ACRS and CRS */
if (cap_sync_regs &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
for (i = 0; i < 16; i++) {
env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
env->cregs[i] = cs->kvm_run->s.regs.crs[i];
@ -361,22 +390,35 @@ int kvm_arch_get_registers(CPUState *cs)
env->fpc = fpu.fpc;
/* The prefix */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
env->psa = cs->kvm_run->s.regs.prefix;
}
/*
* These ONE_REGS are not protected by a capability. As they are only
* necessary for migration we just trace a possible error, but don't
* return with an error return code.
*/
kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
env->cputm = cs->kvm_run->s.regs.cputm;
env->ckc = cs->kvm_run->s.regs.ckc;
env->todpr = cs->kvm_run->s.regs.todpr;
env->gbea = cs->kvm_run->s.regs.gbea;
env->pp = cs->kvm_run->s.regs.pp;
} else {
/*
* These ONE_REGS are not protected by a capability. As they are only
* necessary for migration we just trace a possible error, but don't
* return with an error return code.
*/
kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
}
if (cap_async_pf) {
/* pfault parameters */
if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
env->pfault_token = cs->kvm_run->s.regs.pft;
env->pfault_select = cs->kvm_run->s.regs.pfs;
env->pfault_compare = cs->kvm_run->s.regs.pfc;
} else if (cap_async_pf) {
r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
if (r < 0) {
return r;
@ -809,11 +851,124 @@ static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
return rc;
}
static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run)
{
CPUS390XState *env = &cpu->env;
uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
uint32_t base2 = run->s390_sieic.ipb >> 28;
uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
((run->s390_sieic.ipb & 0xff00) << 4);
if (disp2 & 0x80000) {
disp2 += 0xfff00000;
}
return (base2 ? env->regs[base2] : 0) +
(x2 ? env->regs[x2] : 0) + (long)(int)disp2;
}
static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run)
{
CPUS390XState *env = &cpu->env;
uint32_t base2 = run->s390_sieic.ipb >> 28;
uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
((run->s390_sieic.ipb & 0xff00) << 4);
if (disp2 & 0x80000) {
disp2 += 0xfff00000;
}
return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
}
static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
{
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
return clp_service_call(cpu, r2);
}
static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
{
uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
return pcilg_service_call(cpu, r1, r2);
}
static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
{
uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
return pcistg_service_call(cpu, r1, r2);
}
static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
{
uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
uint64_t fiba;
cpu_synchronize_state(CPU(cpu));
fiba = get_base_disp_rxy(cpu, run);
return stpcifc_service_call(cpu, r1, fiba);
}
static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
{
/* NOOP */
return 0;
}
static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
{
uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
return rpcit_service_call(cpu, r1, r2);
}
static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
{
uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
uint8_t r3 = run->s390_sieic.ipa & 0x000f;
uint64_t gaddr;
cpu_synchronize_state(CPU(cpu));
gaddr = get_base_disp_rsy(cpu, run);
return pcistb_service_call(cpu, r1, r3, gaddr);
}
static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
{
uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
uint64_t fiba;
cpu_synchronize_state(CPU(cpu));
fiba = get_base_disp_rxy(cpu, run);
return mpcifc_service_call(cpu, r1, fiba);
}
static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
{
int r = 0;
switch (ipa1) {
case PRIV_B9_CLP:
r = kvm_clp_service_call(cpu, run);
break;
case PRIV_B9_PCISTG:
r = kvm_pcistg_service_call(cpu, run);
break;
case PRIV_B9_PCILG:
r = kvm_pcilg_service_call(cpu, run);
break;
case PRIV_B9_RPCIT:
r = kvm_rpcit_service_call(cpu, run);
break;
case PRIV_B9_EQBS:
/* just inject exception */
r = -1;
@ -832,6 +987,12 @@ static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
int r = 0;
switch (ipbl) {
case PRIV_EB_PCISTB:
r = kvm_pcistb_service_call(cpu, run);
break;
case PRIV_EB_SIC:
r = kvm_sic_service_call(cpu, run);
break;
case PRIV_EB_SQBS:
/* just inject exception */
r = -1;
@ -845,6 +1006,26 @@ static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
return r;
}
static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
{
int r = 0;
switch (ipbl) {
case PRIV_E3_MPCIFC:
r = kvm_mpcifc_service_call(cpu, run);
break;
case PRIV_E3_STPCIFC:
r = kvm_stpcifc_service_call(cpu, run);
break;
default:
r = -1;
DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
break;
}
return r;
}
static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
{
CPUS390XState *env = &cpu->env;
@ -1041,6 +1222,9 @@ static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
case IPA0_EB:
r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
break;
case IPA0_E3:
r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
break;
case IPA0_DIAG:
r = handle_diag(cpu, run, run->s390_sieic.ipb);
break;
@ -1361,3 +1545,28 @@ int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
return ret;
}
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint64_t address, uint32_t data)
{
S390PCIBusDevice *pbdev;
uint32_t fid = data >> ZPCI_MSI_VEC_BITS;
uint32_t vec = data & ZPCI_MSI_VEC_MASK;
pbdev = s390_pci_find_dev_by_fid(fid);
if (!pbdev) {
DPRINTF("add_msi_route no dev\n");
return -ENODEV;
}
pbdev->routes.adapter.ind_offset = vec;
route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
route->flags = 0;
route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset;
route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
return 0;
}