pc,pci,virtio: features, fixes

virtio-iommu support for x86/ACPI.
 Fixes, cleanups all over the place.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmGAefYPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpiCUH/2pIs3FmOGIasEqn4BnqXr4dHMReUO5Ghg0v
 cXle4ZUrbg7Qpnxh07CwMuUpJV3Qv+xtVK7hzbD13nnxrkTZuKzBRV1AthkA1Hly
 zIKOxnEgV497LaXoaSOtqAx48fuznk5XOHju91usgu4mehJ0qe2gcwb4H8uWGkQi
 hrsR7a9woP0M4H/jvb3+aQRCJKMscj8ReabM1ulOugNpPdNI/jIKtBvZBtTxAqtQ
 CH9/DJLfVmzDRYdeBpnF06A+tXm4uU1Q5BmpmF9qaymk/PzthN54gdnDd6zH405Z
 Tmjp9UA2xfEYDmKzuTCBdPmoUe6OI7mU9o0WbB5MGYx5RRRBETw=
 =R7DD
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

pc,pci,virtio: features, fixes

virtio-iommu support for x86/ACPI.
Fixes, cleanups all over the place.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Mon 01 Nov 2021 07:36:22 PM EDT
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]

* remotes/mst/tags/for_upstream:
  hw/i386: fix vmmouse registration
  pci: Export pci_for_each_device_under_bus*()
  pci: Define pci_bus_dev_fn/pci_bus_fn/pci_bus_ret_fn
  hw/i386/pc: Allow instantiating a virtio-iommu device
  hw/i386/pc: Move IOMMU singleton into PCMachineState
  hw/i386/pc: Remove x86_iommu_get_type()
  hw/acpi: Add VIOT table
  vhost-vdpa: Set discarding of RAM broken when initializing the backend
  qtest: fix 'expression is always false' build failure in qtest_has_accel()

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
staging
Richard Henderson 2021-11-02 19:24:17 -04:00
commit 7fa736595e
24 changed files with 239 additions and 109 deletions

View File

@ -51,6 +51,10 @@ config ACPI_VMGENID
default y
depends on PC
config ACPI_VIOT
bool
depends on ACPI
config ACPI_HW_REDUCED
bool
select ACPI

View File

@ -20,6 +20,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files(
acpi_ss.add(when: 'CONFIG_ACPI_PIIX4', if_true: files('piix4.c'))
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c'))
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false: files('acpi-pci-hotplug-stub.c'))
acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c'))
acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c'))
acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c'))
acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c'))

114
hw/acpi/viot.c 100644
View File

@ -0,0 +1,114 @@
/*
* ACPI Virtual I/O Translation table implementation
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/viot.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_host.h"
struct viot_pci_ranges {
GArray *blob;
size_t count;
uint16_t output_node;
};
/* Build PCI range for a given PCI host bridge */
static int build_pci_range_node(Object *obj, void *opaque)
{
struct viot_pci_ranges *pci_ranges = opaque;
GArray *blob = pci_ranges->blob;
if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) {
PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
if (bus && !pci_bus_bypass_iommu(bus)) {
int min_bus, max_bus;
pci_bus_range(bus, &min_bus, &max_bus);
/* Type */
build_append_int_noprefix(blob, 1 /* PCI range */, 1);
/* Reserved */
build_append_int_noprefix(blob, 0, 1);
/* Length */
build_append_int_noprefix(blob, 24, 2);
/* Endpoint start */
build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 4);
/* PCI Segment start */
build_append_int_noprefix(blob, 0, 2);
/* PCI Segment end */
build_append_int_noprefix(blob, 0, 2);
/* PCI BDF start */
build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 2);
/* PCI BDF end */
build_append_int_noprefix(blob, PCI_BUILD_BDF(max_bus, 0xff), 2);
/* Output node */
build_append_int_noprefix(blob, pci_ranges->output_node, 2);
/* Reserved */
build_append_int_noprefix(blob, 0, 6);
pci_ranges->count++;
}
}
return 0;
}
/*
* Generate a VIOT table with one PCI-based virtio-iommu that manages PCI
* endpoints.
*
* Defined in the ACPI Specification (Version TBD)
*/
void build_viot(MachineState *ms, GArray *table_data, BIOSLinker *linker,
uint16_t virtio_iommu_bdf, const char *oem_id,
const char *oem_table_id)
{
/* The virtio-iommu node follows the 48-bytes header */
int viommu_off = 48;
AcpiTable table = { .sig = "VIOT", .rev = 0,
.oem_id = oem_id, .oem_table_id = oem_table_id };
struct viot_pci_ranges pci_ranges = {
.output_node = viommu_off,
.blob = g_array_new(false, true /* clear */, 1),
};
/* Build the list of PCI ranges that this viommu manages */
object_child_foreach_recursive(OBJECT(ms), build_pci_range_node,
&pci_ranges);
/* ACPI table header */
acpi_table_begin(&table, table_data);
/* Node count */
build_append_int_noprefix(table_data, pci_ranges.count + 1, 2);
/* Node offset */
build_append_int_noprefix(table_data, viommu_off, 2);
/* Reserved */
build_append_int_noprefix(table_data, 0, 8);
/* Virtio-iommu node */
/* Type */
build_append_int_noprefix(table_data, 3 /* virtio-pci IOMMU */, 1);
/* Reserved */
build_append_int_noprefix(table_data, 0, 1);
/* Length */
build_append_int_noprefix(table_data, 16, 2);
/* PCI Segment */
build_append_int_noprefix(table_data, 0, 2);
/* PCI BDF number */
build_append_int_noprefix(table_data, virtio_iommu_bdf, 2);
/* Reserved */
build_append_int_noprefix(table_data, 0, 8);
/* PCI ranges found above */
g_array_append_vals(table_data, pci_ranges.blob->data,
pci_ranges.blob->len);
g_array_free(pci_ranges.blob, true);
acpi_table_end(linker, &table);
}

13
hw/acpi/viot.h 100644
View File

@ -0,0 +1,13 @@
/*
* ACPI Virtual I/O Translation Table implementation
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef VIOT_H
#define VIOT_H
void build_viot(MachineState *ms, GArray *table_data, BIOSLinker *linker,
uint16_t virtio_iommu_bdf, const char *oem_id,
const char *oem_table_id);
#endif /* VIOT_H */

View File

@ -59,6 +59,7 @@ config PC_ACPI
select ACPI_X86
select ACPI_CPU_HOTPLUG
select ACPI_MEMORY_HOTPLUG
select ACPI_VIOT
select SMBUS_EEPROM
select PFLASH_CFI01
depends on ACPI_SMBUS

View File

@ -68,9 +68,11 @@
#include "qom/qom-qobject.h"
#include "hw/i386/amd_iommu.h"
#include "hw/i386/intel_iommu.h"
#include "hw/virtio/virtio-iommu.h"
#include "hw/acpi/ipmi.h"
#include "hw/acpi/hmat.h"
#include "hw/acpi/viot.h"
/* These are used to size the ACPI tables for -M pc-i440fx-1.7 and
* -M pc-i440fx-2.0. Even if the actual amount of AML generated grows
@ -2132,8 +2134,7 @@ dmar_host_bridges(Object *obj, void *opaque)
PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
if (bus && !pci_bus_bypass_iommu(bus)) {
pci_for_each_device(bus, pci_bus_num(bus), insert_scope,
scope_blob);
pci_for_each_device_under_bus(bus, insert_scope, scope_blob);
}
}
@ -2339,7 +2340,7 @@ ivrs_host_bridges(Object *obj, void *opaque)
PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
if (bus && !pci_bus_bypass_iommu(bus)) {
pci_for_each_device(bus, pci_bus_num(bus), insert_ivhd, ivhd_blob);
pci_for_each_device_under_bus(bus, insert_ivhd, ivhd_blob);
}
}
@ -2488,6 +2489,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
PCMachineState *pcms = PC_MACHINE(machine);
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
X86MachineState *x86ms = X86_MACHINE(machine);
DeviceState *iommu = pcms->iommu;
GArray *table_offsets;
unsigned facs, dsdt, rsdt, fadt;
AcpiPmInfo pm;
@ -2604,17 +2606,20 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
build_mcfg(tables_blob, tables->linker, &mcfg, x86ms->oem_id,
x86ms->oem_table_id);
}
if (x86_iommu_get_default()) {
IommuType IOMMUType = x86_iommu_get_type();
if (IOMMUType == TYPE_AMD) {
acpi_add_table(table_offsets, tables_blob);
build_amd_iommu(tables_blob, tables->linker, x86ms->oem_id,
x86ms->oem_table_id);
} else if (IOMMUType == TYPE_INTEL) {
acpi_add_table(table_offsets, tables_blob);
build_dmar_q35(tables_blob, tables->linker, x86ms->oem_id,
x86ms->oem_table_id);
}
if (object_dynamic_cast(OBJECT(iommu), TYPE_AMD_IOMMU_DEVICE)) {
acpi_add_table(table_offsets, tables_blob);
build_amd_iommu(tables_blob, tables->linker, x86ms->oem_id,
x86ms->oem_table_id);
} else if (object_dynamic_cast(OBJECT(iommu), TYPE_INTEL_IOMMU_DEVICE)) {
acpi_add_table(table_offsets, tables_blob);
build_dmar_q35(tables_blob, tables->linker, x86ms->oem_id,
x86ms->oem_table_id);
} else if (object_dynamic_cast(OBJECT(iommu), TYPE_VIRTIO_IOMMU_PCI)) {
PCIDevice *pdev = PCI_DEVICE(iommu);
acpi_add_table(table_offsets, tables_blob);
build_viot(machine, tables_blob, tables->linker, pci_get_bdf(pdev),
x86ms->oem_id, x86ms->oem_table_id);
}
if (machine->nvdimms_state->is_enabled) {
nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,

View File

@ -1538,7 +1538,6 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
{
int ret = 0;
AMDVIState *s = AMD_IOMMU_DEVICE(dev);
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
MachineState *ms = MACHINE(qdev_get_machine());
PCMachineState *pcms = PC_MACHINE(ms);
X86MachineState *x86ms = X86_MACHINE(ms);
@ -1548,7 +1547,6 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
amdvi_uint64_equal, g_free, g_free);
/* This device should take care of IOMMU PCI properties */
x86_iommu->type = TYPE_AMD;
if (!qdev_realize(DEVICE(&s->pci), &bus->qbus, errp)) {
return;
}

View File

@ -3806,9 +3806,6 @@ static void vtd_realize(DeviceState *dev, Error **errp)
X86MachineState *x86ms = X86_MACHINE(ms);
PCIBus *bus = pcms->bus;
IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
x86_iommu->type = TYPE_INTEL;
if (!vtd_decide_config(s, errp)) {
return;

View File

@ -83,6 +83,7 @@
#include "hw/i386/intel_iommu.h"
#include "hw/net/ne2000-isa.h"
#include "standard-headers/asm-x86/bootparam.h"
#include "hw/virtio/virtio-iommu.h"
#include "hw/virtio/virtio-pmem-pci.h"
#include "hw/virtio/virtio-mem-pci.h"
#include "hw/mem/memory-device.h"
@ -1330,6 +1331,27 @@ static void pc_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,
} else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
pc_virtio_md_pci_pre_plug(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
/* Declare the APIC range as the reserved MSI region */
char *resv_prop_str = g_strdup_printf("0xfee00000:0xfeefffff:%d",
VIRTIO_IOMMU_RESV_MEM_T_MSI);
object_property_set_uint(OBJECT(dev), "len-reserved-regions", 1, errp);
object_property_set_str(OBJECT(dev), "reserved-regions[0]",
resv_prop_str, errp);
g_free(resv_prop_str);
}
if (object_dynamic_cast(OBJECT(dev), TYPE_X86_IOMMU_DEVICE) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
if (pcms->iommu) {
error_setg(errp, "QEMU does not support multiple vIOMMUs "
"for x86 yet.");
return;
}
pcms->iommu = dev;
}
}
@ -1384,7 +1406,9 @@ static HotplugHandler *pc_get_hotplug_handler(MachineState *machine,
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
object_dynamic_cast(OBJECT(dev), TYPE_CPU) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) ||
object_dynamic_cast(OBJECT(dev), TYPE_X86_IOMMU_DEVICE)) {
return HOTPLUG_HANDLER(machine);
}

View File

@ -158,6 +158,7 @@ static void vmmouse_read_id(VMMouseState *s)
s->queue[s->nb_queue++] = VMMOUSE_VERSION;
s->status = 0;
vmmouse_update_handler(s, s->absolute);
}
static void vmmouse_request_relative(VMMouseState *s)

View File

@ -36,8 +36,3 @@ bool x86_iommu_ir_supported(X86IOMMUState *s)
{
return false;
}
IommuType x86_iommu_get_type(void)
{
abort();
}

View File

@ -77,30 +77,17 @@ void x86_iommu_irq_to_msi_message(X86IOMMUIrq *irq, MSIMessage *msg_out)
msg_out->data = msg.msi_data;
}
/* Default X86 IOMMU device */
static X86IOMMUState *x86_iommu_default = NULL;
static void x86_iommu_set_default(X86IOMMUState *x86_iommu)
{
assert(x86_iommu);
if (x86_iommu_default) {
error_report("QEMU does not support multiple vIOMMUs "
"for x86 yet.");
exit(1);
}
x86_iommu_default = x86_iommu;
}
X86IOMMUState *x86_iommu_get_default(void)
{
return x86_iommu_default;
}
MachineState *ms = MACHINE(qdev_get_machine());
PCMachineState *pcms =
PC_MACHINE(object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE));
IommuType x86_iommu_get_type(void)
{
return x86_iommu_default->type;
if (pcms &&
object_dynamic_cast(OBJECT(pcms->iommu), TYPE_X86_IOMMU_DEVICE)) {
return X86_IOMMU_DEVICE(pcms->iommu);
}
return NULL;
}
static void x86_iommu_realize(DeviceState *dev, Error **errp)
@ -136,8 +123,6 @@ static void x86_iommu_realize(DeviceState *dev, Error **errp)
if (x86_class->realize) {
x86_class->realize(dev, errp);
}
x86_iommu_set_default(X86_IOMMU_DEVICE(dev));
}
static Property x86_iommu_properties[] = {

View File

@ -1654,11 +1654,9 @@ static const pci_class_desc pci_class_descriptions[] =
{ 0, NULL}
};
static void pci_for_each_device_under_bus_reverse(PCIBus *bus,
void (*fn)(PCIBus *b,
PCIDevice *d,
void *opaque),
void *opaque)
void pci_for_each_device_under_bus_reverse(PCIBus *bus,
pci_bus_dev_fn fn,
void *opaque)
{
PCIDevice *d;
int devfn;
@ -1672,8 +1670,7 @@ static void pci_for_each_device_under_bus_reverse(PCIBus *bus,
}
void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
void (*fn)(PCIBus *b, PCIDevice *d, void *opaque),
void *opaque)
pci_bus_dev_fn fn, void *opaque)
{
bus = pci_find_bus_nr(bus, bus_num);
@ -1682,10 +1679,8 @@ void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
}
}
static void pci_for_each_device_under_bus(PCIBus *bus,
void (*fn)(PCIBus *b, PCIDevice *d,
void *opaque),
void *opaque)
void pci_for_each_device_under_bus(PCIBus *bus,
pci_bus_dev_fn fn, void *opaque)
{
PCIDevice *d;
int devfn;
@ -1699,8 +1694,7 @@ static void pci_for_each_device_under_bus(PCIBus *bus,
}
void pci_for_each_device(PCIBus *bus, int bus_num,
void (*fn)(PCIBus *b, PCIDevice *d, void *opaque),
void *opaque)
pci_bus_dev_fn fn, void *opaque)
{
bus = pci_find_bus_nr(bus, bus_num);
@ -2078,10 +2072,8 @@ static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num)
return NULL;
}
void pci_for_each_bus_depth_first(PCIBus *bus,
void *(*begin)(PCIBus *bus, void *parent_state),
void (*end)(PCIBus *bus, void *state),
void *parent_state)
void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
pci_bus_fn end, void *parent_state)
{
PCIBus *sec;
void *state;

View File

@ -694,9 +694,7 @@ void pcie_cap_slot_write_config(PCIDevice *dev,
(!(old_slt_ctl & PCI_EXP_SLTCTL_PCC) ||
(old_slt_ctl & PCI_EXP_SLTCTL_PIC_OFF) != PCI_EXP_SLTCTL_PIC_OFF)) {
PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
pcie_unplug_device, NULL);
pci_for_each_device_under_bus(sec_bus, pcie_unplug_device, NULL);
pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDS);
if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA ||

View File

@ -1317,8 +1317,7 @@ static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus,
RESOURCE_CELLS_SIZE));
assert(bus);
pci_for_each_device_reverse(bus, pci_bus_num(bus),
spapr_dt_pci_device_cb, &cbinfo);
pci_for_each_device_under_bus_reverse(bus, spapr_dt_pci_device_cb, &cbinfo);
if (cbinfo.err) {
return cbinfo.err;
}
@ -2306,8 +2305,8 @@ static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
return;
}
pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
spapr_phb_pci_enumerate_bridge, bus_no);
pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_enumerate_bridge,
bus_no);
pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
}
@ -2316,9 +2315,8 @@ static void spapr_phb_pci_enumerate(SpaprPhbState *phb)
PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
unsigned int bus_no = 0;
pci_for_each_device(bus, pci_bus_num(bus),
spapr_phb_pci_enumerate_bridge,
&bus_no);
pci_for_each_device_under_bus(bus, spapr_phb_pci_enumerate_bridge,
&bus_no);
}

View File

@ -164,8 +164,7 @@ static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev,
return;
}
pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
spapr_phb_pci_collect_nvgpu, opaque);
pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_collect_nvgpu, opaque);
}
void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
@ -183,8 +182,8 @@ void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr;
bus = PCI_HOST_BRIDGE(sphb)->bus;
pci_for_each_device(bus, pci_bus_num(bus),
spapr_phb_pci_collect_nvgpu, sphb->nvgpus);
pci_for_each_device_under_bus(bus, spapr_phb_pci_collect_nvgpu,
sphb->nvgpus);
if (sphb->nvgpus->err) {
error_propagate(errp, sphb->nvgpus->err);

View File

@ -164,8 +164,8 @@ static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus *bus,
static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque)
{
pci_for_each_device(bus, pci_bus_num(bus),
spapr_phb_vfio_eeh_clear_dev_msix, NULL);
pci_for_each_device_under_bus(bus, spapr_phb_vfio_eeh_clear_dev_msix,
NULL);
}
static void spapr_phb_vfio_eeh_pre_reset(SpaprPhbState *sphb)

View File

@ -1163,8 +1163,7 @@ static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
}
/* Assign numbers to all child bridges. The last is the highest number. */
pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
s390_pci_enumerate_bridge, s);
pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s);
pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
}
@ -1193,7 +1192,7 @@ static void s390_pcihost_reset(DeviceState *dev)
* on every system reset, we also have to reassign numbers.
*/
s->bus_no = 0;
pci_for_each_device(bus, pci_bus_num(bus), s390_pci_enumerate_bridge, s);
pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s);
}
static void s390_pcihost_class_init(ObjectClass *klass, void *data)

View File

@ -331,6 +331,17 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
struct vhost_vdpa *v;
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
trace_vhost_vdpa_init(dev, opaque);
int ret;
/*
* Similar to VFIO, we end up pinning all guest memory and have to
* disable discarding of RAM.
*/
ret = ram_block_discard_disable(true);
if (ret) {
error_report("Cannot set discarding of RAM broken");
return ret;
}
v = opaque;
v->dev = dev;
@ -442,6 +453,8 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
memory_listener_unregister(&v->listener);
dev->opaque = NULL;
ram_block_discard_disable(false);
return 0;
}

View File

@ -615,8 +615,8 @@ static void xen_pt_region_update(XenPCIPassthroughState *s,
}
args.type = d->io_regions[bar].type;
pci_for_each_device(pci_get_bus(d), pci_dev_bus_num(d),
xen_pt_check_bar_overlap, &args);
pci_for_each_device_under_bus(pci_get_bus(d),
xen_pt_check_bar_overlap, &args);
if (args.rc) {
XEN_PT_WARN(d, "Region: %d (addr: 0x%"FMT_PCIBUS
", len: 0x%"FMT_PCIBUS") is overlapped.\n",

View File

@ -35,6 +35,7 @@ typedef struct PCMachineState {
I2CBus *smbus;
PFlashCFI01 *flash[2];
ISADevice *pcspk;
DeviceState *iommu;
/* Configuration options: */
uint64_t max_ram_below_4g;

View File

@ -33,12 +33,6 @@ OBJECT_DECLARE_TYPE(X86IOMMUState, X86IOMMUClass, X86_IOMMU_DEVICE)
typedef struct X86IOMMUIrq X86IOMMUIrq;
typedef struct X86IOMMU_MSIMessage X86IOMMU_MSIMessage;
typedef enum IommuType {
TYPE_INTEL,
TYPE_AMD,
TYPE_NONE
} IommuType;
struct X86IOMMUClass {
SysBusDeviceClass parent;
/* Intel/AMD specific realize() hook */
@ -71,7 +65,6 @@ struct X86IOMMUState {
OnOffAuto intr_supported; /* Whether vIOMMU supports IR */
bool dt_supported; /* Whether vIOMMU supports DT */
bool pt_supported; /* Whether vIOMMU supports pass-through */
IommuType type; /* IOMMU type - AMD/Intel */
QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */
};
@ -140,11 +133,6 @@ struct X86IOMMU_MSIMessage {
*/
X86IOMMUState *x86_iommu_get_default(void);
/*
* x86_iommu_get_type - get IOMMU type
*/
IommuType x86_iommu_get_type(void);
/**
* x86_iommu_iec_register_notifier - register IEC (Interrupt Entry
* Cache) notifiers

View File

@ -401,6 +401,10 @@ typedef PCIINTxRoute (*pci_route_irq_fn)(void *opaque, int pin);
OBJECT_DECLARE_TYPE(PCIBus, PCIBusClass, PCI_BUS)
#define TYPE_PCIE_BUS "PCIE"
typedef void (*pci_bus_dev_fn)(PCIBus *b, PCIDevice *d, void *opaque);
typedef void (*pci_bus_fn)(PCIBus *b, void *opaque);
typedef void *(*pci_bus_ret_fn)(PCIBus *b, void *opaque);
bool pci_bus_is_express(PCIBus *bus);
void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
@ -458,23 +462,23 @@ static inline int pci_dev_bus_num(const PCIDevice *dev)
int pci_bus_numa_node(PCIBus *bus);
void pci_for_each_device(PCIBus *bus, int bus_num,
void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque),
pci_bus_dev_fn fn,
void *opaque);
void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
void (*fn)(PCIBus *bus, PCIDevice *d,
void *opaque),
pci_bus_dev_fn fn,
void *opaque);
void pci_for_each_bus_depth_first(PCIBus *bus,
void *(*begin)(PCIBus *bus, void *parent_state),
void (*end)(PCIBus *bus, void *state),
void *parent_state);
void pci_for_each_device_under_bus(PCIBus *bus,
pci_bus_dev_fn fn, void *opaque);
void pci_for_each_device_under_bus_reverse(PCIBus *bus,
pci_bus_dev_fn fn,
void *opaque);
void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
pci_bus_fn end, void *parent_state);
PCIDevice *pci_get_function_0(PCIDevice *pci_dev);
/* Use this wrapper when specific scan order is not required. */
static inline
void pci_for_each_bus(PCIBus *bus,
void (*fn)(PCIBus *bus, void *opaque),
void *opaque)
void pci_for_each_bus(PCIBus *bus, pci_bus_fn fn, void *opaque)
{
pci_for_each_bus_depth_first(bus, NULL, fn, opaque);
}

View File

@ -75,7 +75,7 @@ else
kvm_targets = []
endif
kvm_targets_c = ''
kvm_targets_c = '""'
if not get_option('kvm').disabled() and targetos == 'linux'
kvm_targets_c = '"' + '" ,"'.join(kvm_targets) + '"'
endif