virtio, vhost, pc: fixes, features

beginnings of iotlb support for vhost
 acpi hotplug rework
 vhost net tx flush on link down
 passing mtu to guests
 hotplug for virtio crypto
 fixes and cleanups all over the place
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJYdHMsAAoJECgfDbjSjVRpBNAH/idFn79Lp/ed6foSajKo+Zow
 4xBHadEi32xFpYBOe9RBvwVAM6Rr5jdi/7KAvNAU4TQDAu/XBrkSpV4P1H6nmpEY
 M3tKDul8cNkIUB3D8KQB40uWS5G/XvuazwOjAFemKvlKywcYaC0Q3u5n+j8yDZ6/
 EmPzMsNJ/9/KBJm6VyZjDBKkzZNz8lagq5x0xmsu9iZmkk3qvwebNVBiBIkmGp03
 76Ep0ulRUpx60bPw6aTTlT+TcdRmvQp+midDOPetwoi/nkxuYcVctLoBE1fMPtYf
 mgG6ZIzM1KYvpE7Yj/ksAN0V5F7ELZ3rNLsLewrc7nIIREEZrlNjtlDemcD8Rb0=
 =tixs
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

virtio, vhost, pc: fixes, features

beginnings of iotlb support for vhost
acpi hotplug rework
vhost net tx flush on link down
passing mtu to guests
hotplug for virtio crypto
fixes and cleanups all over the place

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Tue 10 Jan 2017 05:37:48 GMT
# gpg:                using RSA key 0x281F0DB8D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* remotes/mst/tags/for_upstream: (41 commits)
  acpi-test: update expected files
  memhp: move DIMM devices into dedicated scope with related common methods
  memhp: don't generate memory hotplug AML if it's not enabled/supported
  memhp: move memory hotplug only defines to memory_hotplug.c
  memhp: move GPE handler_E03 into build_memory_hotplug_aml()
  memhp: merge build_memory_devices() into build_memory_hotplug_aml()
  memhp: consolidate scattered MHPD device declaration
  memhp: move build_memory_devices() into memory_hotplug.c
  memhp: move build_memory_hotplug_aml() into memory_hotplug.c
  tests: pc: add memory hotplug acpi tables tests
  virtio-net: Add MTU feature support
  vhost-net: Notify the backend about the host MTU
  vhost-user: Add MTU protocol feature and op
  net: virtio-net discards TX data after link down
  virtio: Introduce virtqueue_drop_all procedure
  net: vhost stop updates virtio queue state
  net: Add virtio queue interface to update used index from vring state
  balloon: Don't balloon roms
  virtio: fix vq->inuse recalc after migr
  pcie_aer: support configurable AER capa version
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2017-01-10 10:46:21 +00:00
commit 41a0e54756
66 changed files with 1167 additions and 560 deletions

View file

@ -94,6 +94,8 @@ static void cryptodev_builtin_init(
backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendSymOpInfo);
backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
cryptodev_backend_set_ready(backend, true);
}
static int
@ -366,6 +368,8 @@ static void cryptodev_builtin_cleanup(
backend->conf.peers.ccs[i] = NULL;
}
}
cryptodev_backend_set_ready(backend, false);
}
static void

View file

@ -73,8 +73,6 @@ void cryptodev_backend_cleanup(
if (bc->cleanup) {
bc->cleanup(backend, errp);
}
backend->ready = false;
}
int64_t cryptodev_backend_sym_create_session(
@ -189,14 +187,39 @@ cryptodev_backend_complete(UserCreatable *uc, Error **errp)
goto out;
}
}
backend->ready = true;
return;
out:
backend->ready = false;
error_propagate(errp, local_err);
}
void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used)
{
backend->is_used = used;
}
bool cryptodev_backend_is_used(CryptoDevBackend *backend)
{
return backend->is_used;
}
void cryptodev_backend_set_ready(CryptoDevBackend *backend, bool ready)
{
backend->ready = ready;
}
bool cryptodev_backend_is_ready(CryptoDevBackend *backend)
{
return backend->ready;
}
static bool
cryptodev_backend_can_be_deleted(UserCreatable *uc, Error **errp)
{
return !cryptodev_backend_is_used(CRYPTODEV_BACKEND(uc));
}
static void cryptodev_backend_instance_init(Object *obj)
{
object_property_add(obj, "queues", "int",
@ -209,7 +232,9 @@ static void cryptodev_backend_instance_init(Object *obj)
static void cryptodev_backend_finalize(Object *obj)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
cryptodev_backend_cleanup(backend, NULL);
}
static void
@ -218,6 +243,7 @@ cryptodev_backend_class_init(ObjectClass *oc, void *data)
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
ucc->complete = cryptodev_backend_complete;
ucc->can_be_deleted = cryptodev_backend_can_be_deleted;
QTAILQ_INIT(&crypto_clients);
}

View file

@ -110,18 +110,18 @@ Plug only PCI Express devices into PCI Express Ports.
-device ioh3420,id=root_port1,chassis=x,slot=y[,bus=pcie.0][,addr=z] \
-device <dev>,bus=root_port1
2.2.2 Using multi-function PCI Express Root Ports:
-device ioh3420,id=root_port1,multifunction=on,chassis=x,slot=y[,bus=pcie.0][,addr=z.0] \
-device ioh3420,id=root_port2,chassis=x1,slot=y1[,bus=pcie.0][,addr=z.1] \
-device ioh3420,id=root_port3,chassis=x2,slot=y2[,bus=pcie.0][,addr=z.2] \
2.2.2 Plugging a PCI Express device into a Switch:
-device ioh3420,id=root_port1,multifunction=on,chassis=x,addr=z.0[,slot=y][,bus=pcie.0] \
-device ioh3420,id=root_port2,chassis=x1,addr=z.1[,slot=y1][,bus=pcie.0] \
-device ioh3420,id=root_port3,chassis=x2,addr=z.2[,slot=y2][,bus=pcie.0] \
2.2.3 Plugging a PCI Express device into a Switch:
-device ioh3420,id=root_port1,chassis=x,slot=y[,bus=pcie.0][,addr=z] \
-device x3130-upstream,id=upstream_port1,bus=root_port1[,addr=x] \
-device xio3130-downstream,id=downstream_port1,bus=upstream_port1,chassis=x1,slot=y1[,addr=z1]] \
-device <dev>,bus=downstream_port1
Notes:
- (slot, chassis) pair is mandatory and must be
unique for each PCI Express Root Port.
- (slot, chassis) pair is mandatory and must be unique for each
PCI Express Root Port. slot defaults to 0 when not specified.
- 'addr' parameter can be 0 for all the examples above.

View file

@ -259,6 +259,7 @@ Protocol features
#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
#define VHOST_USER_PROTOCOL_F_RARP 2
#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
#define VHOST_USER_PROTOCOL_F_MTU 4
Message types
-------------
@ -470,6 +471,21 @@ Message types
The first 6 bytes of the payload contain the mac address of the guest to
allow the vhost user backend to construct and broadcast the fake RARP.
* VHOST_USER_NET_SET_MTU
Id: 20
Equivalent ioctl: N/A
Master payload: u64
Set host MTU value exposed to the guest.
This request should be sent only when VIRTIO_NET_F_MTU feature has been
successfully negotiated, VHOST_USER_F_PROTOCOL_FEATURES is present in
VHOST_USER_GET_FEATURES and protocol feature bit
VHOST_USER_PROTOCOL_F_NET_MTU is present in
VHOST_USER_GET_PROTOCOL_FEATURES.
If VHOST_USER_PROTOCOL_F_REPLY_ACK is negotiated, slave must respond
with zero in case the specified MTU is valid, or non-zero otherwise.
VHOST_USER_PROTOCOL_F_REPLY_ACK:
-------------------------------
The original vhost-user specification only demands replies for certain

33
exec.c
View file

@ -448,6 +448,39 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
return section;
}
/* Called from RCU critical section */
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
bool is_write)
{
IOMMUTLBEntry iotlb = {0};
MemoryRegionSection *section;
MemoryRegion *mr;
for (;;) {
AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
section = address_space_lookup_region(d, addr, false);
addr = addr - section->offset_within_address_space
+ section->offset_within_region;
mr = section->mr;
if (!mr->iommu_ops) {
break;
}
iotlb = mr->iommu_ops->translate(mr, addr, is_write);
if (!(iotlb.perm & (1 << is_write))) {
iotlb.target_as = NULL;
break;
}
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
as = iotlb.target_as;
}
return iotlb;
}
/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,

View file

@ -1,7 +1,7 @@
common-obj-$(CONFIG_ACPI_X86) += core.o piix4.o pcihp.o
common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o memory_hotplug_acpi_table.o
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu.o
common-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
common-obj-$(CONFIG_ACPI) += acpi_interface.o

View file

@ -306,7 +306,8 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
if (pm->acpi_memory_hotplug.is_enabled) {
acpi_memory_hotplug_init(pci_address_space_io(lpc_pci), OBJECT(lpc_pci),
&pm->acpi_memory_hotplug);
&pm->acpi_memory_hotplug,
ACPI_MEMORY_HOTPLUG_BASE);
}
}

View file

@ -7,6 +7,34 @@
#include "trace.h"
#include "qapi-event.h"
#define MEMORY_SLOTS_NUMBER "MDNR"
#define MEMORY_HOTPLUG_IO_REGION "HPMR"
#define MEMORY_SLOT_ADDR_LOW "MRBL"
#define MEMORY_SLOT_ADDR_HIGH "MRBH"
#define MEMORY_SLOT_SIZE_LOW "MRLL"
#define MEMORY_SLOT_SIZE_HIGH "MRLH"
#define MEMORY_SLOT_PROXIMITY "MPX"
#define MEMORY_SLOT_ENABLED "MES"
#define MEMORY_SLOT_INSERT_EVENT "MINS"
#define MEMORY_SLOT_REMOVE_EVENT "MRMV"
#define MEMORY_SLOT_EJECT "MEJ"
#define MEMORY_SLOT_SLECTOR "MSEL"
#define MEMORY_SLOT_OST_EVENT "MOEV"
#define MEMORY_SLOT_OST_STATUS "MOSC"
#define MEMORY_SLOT_LOCK "MLCK"
#define MEMORY_SLOT_STATUS_METHOD "MRST"
#define MEMORY_SLOT_CRS_METHOD "MCRS"
#define MEMORY_SLOT_OST_METHOD "MOST"
#define MEMORY_SLOT_PROXIMITY_METHOD "MPXM"
#define MEMORY_SLOT_EJECT_METHOD "MEJ0"
#define MEMORY_SLOT_NOTIFY_METHOD "MTFY"
#define MEMORY_SLOT_SCAN_METHOD "MSCN"
#define MEMORY_HOTPLUG_DEVICE "MHPD"
#define MEMORY_HOTPLUG_IO_LEN 24
#define MEMORY_DEVICES_CONTAINER "\\_SB.MHPC"
static uint16_t memhp_io_base;
static ACPIOSTInfo *acpi_memory_device_status(int slot, MemStatus *mdev)
{
ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1);
@ -178,7 +206,7 @@ static const MemoryRegionOps acpi_memory_hotplug_ops = {
};
void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner,
MemHotplugState *state)
MemHotplugState *state, uint16_t io_base)
{
MachineState *machine = MACHINE(qdev_get_machine());
@ -187,10 +215,12 @@ void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner,
return;
}
assert(!memhp_io_base);
memhp_io_base = io_base;
state->devs = g_malloc0(sizeof(*state->devs) * state->dev_count);
memory_region_init_io(&state->io, owner, &acpi_memory_hotplug_ops, state,
"acpi-mem-hotplug", ACPI_MEMORY_HOTPLUG_IO_LEN);
memory_region_add_subregion(as, ACPI_MEMORY_HOTPLUG_BASE, &state->io);
"acpi-mem-hotplug", MEMORY_HOTPLUG_IO_LEN);
memory_region_add_subregion(as, memhp_io_base, &state->io);
}
/**
@ -306,3 +336,387 @@ const VMStateDescription vmstate_memory_hotplug = {
VMSTATE_END_OF_LIST()
}
};
void build_memory_hotplug_aml(Aml *table, uint32_t nr_mem,
const char *res_root,
const char *event_handler_method)
{
int i;
Aml *ifctx;
Aml *method;
Aml *dev_container;
Aml *mem_ctrl_dev;
char *mhp_res_path;
if (!memhp_io_base) {
return;
}
mhp_res_path = g_strdup_printf("%s." MEMORY_HOTPLUG_DEVICE, res_root);
mem_ctrl_dev = aml_device("%s", mhp_res_path);
{
Aml *crs;
aml_append(mem_ctrl_dev, aml_name_decl("_HID", aml_string("PNP0A06")));
aml_append(mem_ctrl_dev,
aml_name_decl("_UID", aml_string("Memory hotplug resources")));
crs = aml_resource_template();
aml_append(crs,
aml_io(AML_DECODE16, memhp_io_base, memhp_io_base, 0,
MEMORY_HOTPLUG_IO_LEN)
);
aml_append(mem_ctrl_dev, aml_name_decl("_CRS", crs));
aml_append(mem_ctrl_dev, aml_operation_region(
MEMORY_HOTPLUG_IO_REGION, AML_SYSTEM_IO,
aml_int(memhp_io_base), MEMORY_HOTPLUG_IO_LEN)
);
}
aml_append(table, mem_ctrl_dev);
dev_container = aml_device(MEMORY_DEVICES_CONTAINER);
{
Aml *field;
Aml *one = aml_int(1);
Aml *zero = aml_int(0);
Aml *ret_val = aml_local(0);
Aml *slot_arg0 = aml_arg(0);
Aml *slots_nr = aml_name(MEMORY_SLOTS_NUMBER);
Aml *ctrl_lock = aml_name(MEMORY_SLOT_LOCK);
Aml *slot_selector = aml_name(MEMORY_SLOT_SLECTOR);
char *mmio_path = g_strdup_printf("%s." MEMORY_HOTPLUG_IO_REGION,
mhp_res_path);
aml_append(dev_container, aml_name_decl("_HID", aml_string("PNP0A06")));
aml_append(dev_container,
aml_name_decl("_UID", aml_string("DIMM devices")));
assert(nr_mem <= ACPI_MAX_RAM_SLOTS);
aml_append(dev_container,
aml_name_decl(MEMORY_SLOTS_NUMBER, aml_int(nr_mem))
);
field = aml_field(mmio_path, AML_DWORD_ACC,
AML_NOLOCK, AML_PRESERVE);
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_ADDR_LOW, 32));
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_ADDR_HIGH, 32));
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_SIZE_LOW, 32));
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_SIZE_HIGH, 32));
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_PROXIMITY, 32));
aml_append(dev_container, field);
field = aml_field(mmio_path, AML_BYTE_ACC,
AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_reserved_field(160 /* bits, Offset(20) */));
aml_append(field, /* 1 if enabled, read only */
aml_named_field(MEMORY_SLOT_ENABLED, 1));
aml_append(field,
/*(read) 1 if has a insert event. (write) 1 to clear event */
aml_named_field(MEMORY_SLOT_INSERT_EVENT, 1));
aml_append(field,
/* (read) 1 if has a remove event. (write) 1 to clear event */
aml_named_field(MEMORY_SLOT_REMOVE_EVENT, 1));
aml_append(field,
/* initiates device eject, write only */
aml_named_field(MEMORY_SLOT_EJECT, 1));
aml_append(dev_container, field);
field = aml_field(mmio_path, AML_DWORD_ACC,
AML_NOLOCK, AML_PRESERVE);
aml_append(field, /* DIMM selector, write only */
aml_named_field(MEMORY_SLOT_SLECTOR, 32));
aml_append(field, /* _OST event code, write only */
aml_named_field(MEMORY_SLOT_OST_EVENT, 32));
aml_append(field, /* _OST status code, write only */
aml_named_field(MEMORY_SLOT_OST_STATUS, 32));
aml_append(dev_container, field);
g_free(mmio_path);
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
ifctx = aml_if(aml_equal(slots_nr, zero));
{
aml_append(ifctx, aml_return(zero));
}
aml_append(method, ifctx);
/* present, functioning, decoding, not shown in UI */
aml_append(method, aml_return(aml_int(0xB)));
aml_append(dev_container, method);
aml_append(dev_container, aml_mutex(MEMORY_SLOT_LOCK, 0));
method = aml_method(MEMORY_SLOT_SCAN_METHOD, 0, AML_NOTSERIALIZED);
{
Aml *else_ctx;
Aml *while_ctx;
Aml *idx = aml_local(0);
Aml *eject_req = aml_int(3);
Aml *dev_chk = aml_int(1);
ifctx = aml_if(aml_equal(slots_nr, zero));
{
aml_append(ifctx, aml_return(zero));
}
aml_append(method, ifctx);
aml_append(method, aml_store(zero, idx));
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
/* build AML that:
* loops over all slots and Notifies DIMMs with
* Device Check or Eject Request notifications if
* slot has corresponding status bit set and clears
* slot status.
*/
while_ctx = aml_while(aml_lless(idx, slots_nr));
{
Aml *ins_evt = aml_name(MEMORY_SLOT_INSERT_EVENT);
Aml *rm_evt = aml_name(MEMORY_SLOT_REMOVE_EVENT);
aml_append(while_ctx, aml_store(idx, slot_selector));
ifctx = aml_if(aml_equal(ins_evt, one));
{
aml_append(ifctx,
aml_call2(MEMORY_SLOT_NOTIFY_METHOD,
idx, dev_chk));
aml_append(ifctx, aml_store(one, ins_evt));
}
aml_append(while_ctx, ifctx);
else_ctx = aml_else();
ifctx = aml_if(aml_equal(rm_evt, one));
{
aml_append(ifctx,
aml_call2(MEMORY_SLOT_NOTIFY_METHOD,
idx, eject_req));
aml_append(ifctx, aml_store(one, rm_evt));
}
aml_append(else_ctx, ifctx);
aml_append(while_ctx, else_ctx);
aml_append(while_ctx, aml_add(idx, one, idx));
}
aml_append(method, while_ctx);
aml_append(method, aml_release(ctrl_lock));
aml_append(method, aml_return(one));
}
aml_append(dev_container, method);
method = aml_method(MEMORY_SLOT_STATUS_METHOD, 1, AML_NOTSERIALIZED);
{
Aml *slot_enabled = aml_name(MEMORY_SLOT_ENABLED);
aml_append(method, aml_store(zero, ret_val));
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method,
aml_store(aml_to_integer(slot_arg0), slot_selector));
ifctx = aml_if(aml_equal(slot_enabled, one));
{
aml_append(ifctx, aml_store(aml_int(0xF), ret_val));
}
aml_append(method, ifctx);
aml_append(method, aml_release(ctrl_lock));
aml_append(method, aml_return(ret_val));
}
aml_append(dev_container, method);
method = aml_method(MEMORY_SLOT_CRS_METHOD, 1, AML_SERIALIZED);
{
Aml *mr64 = aml_name("MR64");
Aml *mr32 = aml_name("MR32");
Aml *crs_tmpl = aml_resource_template();
Aml *minl = aml_name("MINL");
Aml *minh = aml_name("MINH");
Aml *maxl = aml_name("MAXL");
Aml *maxh = aml_name("MAXH");
Aml *lenl = aml_name("LENL");
Aml *lenh = aml_name("LENH");
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method, aml_store(aml_to_integer(slot_arg0),
slot_selector));
aml_append(crs_tmpl,
aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
AML_CACHEABLE, AML_READ_WRITE,
0, 0x0, 0xFFFFFFFFFFFFFFFEULL, 0,
0xFFFFFFFFFFFFFFFFULL));
aml_append(method, aml_name_decl("MR64", crs_tmpl));
aml_append(method,
aml_create_dword_field(mr64, aml_int(14), "MINL"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(18), "MINH"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(38), "LENL"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(42), "LENH"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(22), "MAXL"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(26), "MAXH"));
aml_append(method,
aml_store(aml_name(MEMORY_SLOT_ADDR_HIGH), minh));
aml_append(method,
aml_store(aml_name(MEMORY_SLOT_ADDR_LOW), minl));
aml_append(method,
aml_store(aml_name(MEMORY_SLOT_SIZE_HIGH), lenh));
aml_append(method,
aml_store(aml_name(MEMORY_SLOT_SIZE_LOW), lenl));
/* 64-bit math: MAX = MIN + LEN - 1 */
aml_append(method, aml_add(minl, lenl, maxl));
aml_append(method, aml_add(minh, lenh, maxh));
ifctx = aml_if(aml_lless(maxl, minl));
{
aml_append(ifctx, aml_add(maxh, one, maxh));
}
aml_append(method, ifctx);
ifctx = aml_if(aml_lless(maxl, one));
{
aml_append(ifctx, aml_subtract(maxh, one, maxh));
}
aml_append(method, ifctx);
aml_append(method, aml_subtract(maxl, one, maxl));
/* return 32-bit _CRS if addr/size is in low mem */
/* TODO: remove it since all hotplugged DIMMs are in high mem */
ifctx = aml_if(aml_equal(maxh, zero));
{
crs_tmpl = aml_resource_template();
aml_append(crs_tmpl,
aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
AML_MAX_FIXED, AML_CACHEABLE,
AML_READ_WRITE,
0, 0x0, 0xFFFFFFFE, 0,
0xFFFFFFFF));
aml_append(ifctx, aml_name_decl("MR32", crs_tmpl));
aml_append(ifctx,
aml_create_dword_field(mr32, aml_int(10), "MIN"));
aml_append(ifctx,
aml_create_dword_field(mr32, aml_int(14), "MAX"));
aml_append(ifctx,
aml_create_dword_field(mr32, aml_int(22), "LEN"));
aml_append(ifctx, aml_store(minl, aml_name("MIN")));
aml_append(ifctx, aml_store(maxl, aml_name("MAX")));
aml_append(ifctx, aml_store(lenl, aml_name("LEN")));
aml_append(ifctx, aml_release(ctrl_lock));
aml_append(ifctx, aml_return(mr32));
}
aml_append(method, ifctx);
aml_append(method, aml_release(ctrl_lock));
aml_append(method, aml_return(mr64));
}
aml_append(dev_container, method);
method = aml_method(MEMORY_SLOT_PROXIMITY_METHOD, 1,
AML_NOTSERIALIZED);
{
Aml *proximity = aml_name(MEMORY_SLOT_PROXIMITY);
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method, aml_store(aml_to_integer(slot_arg0),
slot_selector));
aml_append(method, aml_store(proximity, ret_val));
aml_append(method, aml_release(ctrl_lock));
aml_append(method, aml_return(ret_val));
}
aml_append(dev_container, method);
method = aml_method(MEMORY_SLOT_OST_METHOD, 4, AML_NOTSERIALIZED);
{
Aml *ost_evt = aml_name(MEMORY_SLOT_OST_EVENT);
Aml *ost_status = aml_name(MEMORY_SLOT_OST_STATUS);
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method, aml_store(aml_to_integer(slot_arg0),
slot_selector));
aml_append(method, aml_store(aml_arg(1), ost_evt));
aml_append(method, aml_store(aml_arg(2), ost_status));
aml_append(method, aml_release(ctrl_lock));
}
aml_append(dev_container, method);
method = aml_method(MEMORY_SLOT_EJECT_METHOD, 2, AML_NOTSERIALIZED);
{
Aml *eject = aml_name(MEMORY_SLOT_EJECT);
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method, aml_store(aml_to_integer(slot_arg0),
slot_selector));
aml_append(method, aml_store(one, eject));
aml_append(method, aml_release(ctrl_lock));
}
aml_append(dev_container, method);
/* build memory devices */
for (i = 0; i < nr_mem; i++) {
Aml *dev;
const char *s;
dev = aml_device("MP%02X", i);
aml_append(dev, aml_name_decl("_UID", aml_string("0x%02X", i)));
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C80")));
method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
s = MEMORY_SLOT_CRS_METHOD;
aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
aml_append(dev, method);
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
s = MEMORY_SLOT_STATUS_METHOD;
aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
aml_append(dev, method);
method = aml_method("_PXM", 0, AML_NOTSERIALIZED);
s = MEMORY_SLOT_PROXIMITY_METHOD;
aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
aml_append(dev, method);
method = aml_method("_OST", 3, AML_NOTSERIALIZED);
s = MEMORY_SLOT_OST_METHOD;
aml_append(method, aml_return(aml_call4(
s, aml_name("_UID"), aml_arg(0), aml_arg(1), aml_arg(2)
)));
aml_append(dev, method);
method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
s = MEMORY_SLOT_EJECT_METHOD;
aml_append(method, aml_return(aml_call2(
s, aml_name("_UID"), aml_arg(0))));
aml_append(dev, method);
aml_append(dev_container, dev);
}
/* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) {
* If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ... }
*/
method = aml_method(MEMORY_SLOT_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
for (i = 0; i < nr_mem; i++) {
ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i)));
aml_append(ifctx,
aml_notify(aml_name("MP%.02X", i), aml_arg(1))
);
aml_append(method, ifctx);
}
aml_append(dev_container, method);
}
aml_append(table, dev_container);
method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED);
aml_append(method,
aml_call0(MEMORY_DEVICES_CONTAINER "." MEMORY_SLOT_SCAN_METHOD));
aml_append(table, method);
g_free(mhp_res_path);
}

View file

@ -1,262 +0,0 @@
/*
* Memory hotplug AML code of DSDT ACPI table
*
* Copyright (C) 2015 Red Hat Inc
*
* Author: Igor Mammedov <imammedo@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "hw/acpi/memory_hotplug.h"
#include "include/hw/acpi/pc-hotplug.h"
#include "hw/boards.h"
void build_memory_hotplug_aml(Aml *ctx, uint32_t nr_mem,
uint16_t io_base, uint16_t io_len)
{
Aml *ifctx;
Aml *method;
Aml *pci_scope;
Aml *mem_ctrl_dev;
/* scope for memory hotplug controller device node */
pci_scope = aml_scope("_SB.PCI0");
mem_ctrl_dev = aml_device(MEMORY_HOTPLUG_DEVICE);
{
Aml *one = aml_int(1);
Aml *zero = aml_int(0);
Aml *ret_val = aml_local(0);
Aml *slot_arg0 = aml_arg(0);
Aml *slots_nr = aml_name(MEMORY_SLOTS_NUMBER);
Aml *ctrl_lock = aml_name(MEMORY_SLOT_LOCK);
Aml *slot_selector = aml_name(MEMORY_SLOT_SLECTOR);
aml_append(mem_ctrl_dev, aml_name_decl("_HID", aml_string("PNP0A06")));
aml_append(mem_ctrl_dev,
aml_name_decl("_UID", aml_string("Memory hotplug resources")));
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
ifctx = aml_if(aml_equal(slots_nr, zero));
{
aml_append(ifctx, aml_return(zero));
}
aml_append(method, ifctx);
/* present, functioning, decoding, not shown in UI */
aml_append(method, aml_return(aml_int(0xB)));
aml_append(mem_ctrl_dev, method);
aml_append(mem_ctrl_dev, aml_mutex(MEMORY_SLOT_LOCK, 0));
method = aml_method(MEMORY_SLOT_SCAN_METHOD, 0, AML_NOTSERIALIZED);
{
Aml *else_ctx;
Aml *while_ctx;
Aml *idx = aml_local(0);
Aml *eject_req = aml_int(3);
Aml *dev_chk = aml_int(1);
ifctx = aml_if(aml_equal(slots_nr, zero));
{
aml_append(ifctx, aml_return(zero));
}
aml_append(method, ifctx);
aml_append(method, aml_store(zero, idx));
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
/* build AML that:
* loops over all slots and Notifies DIMMs with
* Device Check or Eject Request notifications if
* slot has corresponding status bit set and clears
* slot status.
*/
while_ctx = aml_while(aml_lless(idx, slots_nr));
{
Aml *ins_evt = aml_name(MEMORY_SLOT_INSERT_EVENT);
Aml *rm_evt = aml_name(MEMORY_SLOT_REMOVE_EVENT);
aml_append(while_ctx, aml_store(idx, slot_selector));
ifctx = aml_if(aml_equal(ins_evt, one));
{
aml_append(ifctx,
aml_call2(MEMORY_SLOT_NOTIFY_METHOD,
idx, dev_chk));
aml_append(ifctx, aml_store(one, ins_evt));
}
aml_append(while_ctx, ifctx);
else_ctx = aml_else();
ifctx = aml_if(aml_equal(rm_evt, one));
{
aml_append(ifctx,
aml_call2(MEMORY_SLOT_NOTIFY_METHOD,
idx, eject_req));
aml_append(ifctx, aml_store(one, rm_evt));
}
aml_append(else_ctx, ifctx);
aml_append(while_ctx, else_ctx);
aml_append(while_ctx, aml_add(idx, one, idx));
}
aml_append(method, while_ctx);
aml_append(method, aml_release(ctrl_lock));
aml_append(method, aml_return(one));
}
aml_append(mem_ctrl_dev, method);
method = aml_method(MEMORY_SLOT_STATUS_METHOD, 1, AML_NOTSERIALIZED);
{
Aml *slot_enabled = aml_name(MEMORY_SLOT_ENABLED);
aml_append(method, aml_store(zero, ret_val));
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method,
aml_store(aml_to_integer(slot_arg0), slot_selector));
ifctx = aml_if(aml_equal(slot_enabled, one));
{
aml_append(ifctx, aml_store(aml_int(0xF), ret_val));
}
aml_append(method, ifctx);
aml_append(method, aml_release(ctrl_lock));
aml_append(method, aml_return(ret_val));
}
aml_append(mem_ctrl_dev, method);
method = aml_method(MEMORY_SLOT_CRS_METHOD, 1, AML_SERIALIZED);
{
Aml *mr64 = aml_name("MR64");
Aml *mr32 = aml_name("MR32");
Aml *crs_tmpl = aml_resource_template();
Aml *minl = aml_name("MINL");
Aml *minh = aml_name("MINH");
Aml *maxl = aml_name("MAXL");
Aml *maxh = aml_name("MAXH");
Aml *lenl = aml_name("LENL");
Aml *lenh = aml_name("LENH");
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method, aml_store(aml_to_integer(slot_arg0),
slot_selector));
aml_append(crs_tmpl,
aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
AML_CACHEABLE, AML_READ_WRITE,
0, 0x0, 0xFFFFFFFFFFFFFFFEULL, 0,
0xFFFFFFFFFFFFFFFFULL));
aml_append(method, aml_name_decl("MR64", crs_tmpl));
aml_append(method,
aml_create_dword_field(mr64, aml_int(14), "MINL"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(18), "MINH"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(38), "LENL"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(42), "LENH"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(22), "MAXL"));
aml_append(method,
aml_create_dword_field(mr64, aml_int(26), "MAXH"));
aml_append(method,
aml_store(aml_name(MEMORY_SLOT_ADDR_HIGH), minh));
aml_append(method,
aml_store(aml_name(MEMORY_SLOT_ADDR_LOW), minl));
aml_append(method,
aml_store(aml_name(MEMORY_SLOT_SIZE_HIGH), lenh));
aml_append(method,
aml_store(aml_name(MEMORY_SLOT_SIZE_LOW), lenl));
/* 64-bit math: MAX = MIN + LEN - 1 */
aml_append(method, aml_add(minl, lenl, maxl));
aml_append(method, aml_add(minh, lenh, maxh));
ifctx = aml_if(aml_lless(maxl, minl));
{
aml_append(ifctx, aml_add(maxh, one, maxh));
}
aml_append(method, ifctx);
ifctx = aml_if(aml_lless(maxl, one));
{
aml_append(ifctx, aml_subtract(maxh, one, maxh));
}
aml_append(method, ifctx);
aml_append(method, aml_subtract(maxl, one, maxl));
/* return 32-bit _CRS if addr/size is in low mem */
/* TODO: remove it since all hotplugged DIMMs are in high mem */
ifctx = aml_if(aml_equal(maxh, zero));
{
crs_tmpl = aml_resource_template();
aml_append(crs_tmpl,
aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
AML_MAX_FIXED, AML_CACHEABLE,
AML_READ_WRITE,
0, 0x0, 0xFFFFFFFE, 0,
0xFFFFFFFF));
aml_append(ifctx, aml_name_decl("MR32", crs_tmpl));
aml_append(ifctx,
aml_create_dword_field(mr32, aml_int(10), "MIN"));
aml_append(ifctx,
aml_create_dword_field(mr32, aml_int(14), "MAX"));
aml_append(ifctx,
aml_create_dword_field(mr32, aml_int(22), "LEN"));
aml_append(ifctx, aml_store(minl, aml_name("MIN")));
aml_append(ifctx, aml_store(maxl, aml_name("MAX")));
aml_append(ifctx, aml_store(lenl, aml_name("LEN")));
aml_append(ifctx, aml_release(ctrl_lock));
aml_append(ifctx, aml_return(mr32));
}
aml_append(method, ifctx);
aml_append(method, aml_release(ctrl_lock));
aml_append(method, aml_return(mr64));
}
aml_append(mem_ctrl_dev, method);
method = aml_method(MEMORY_SLOT_PROXIMITY_METHOD, 1,
AML_NOTSERIALIZED);
{
Aml *proximity = aml_name(MEMORY_SLOT_PROXIMITY);
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method, aml_store(aml_to_integer(slot_arg0),
slot_selector));
aml_append(method, aml_store(proximity, ret_val));
aml_append(method, aml_release(ctrl_lock));
aml_append(method, aml_return(ret_val));
}
aml_append(mem_ctrl_dev, method);
method = aml_method(MEMORY_SLOT_OST_METHOD, 4, AML_NOTSERIALIZED);
{
Aml *ost_evt = aml_name(MEMORY_SLOT_OST_EVENT);
Aml *ost_status = aml_name(MEMORY_SLOT_OST_STATUS);
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method, aml_store(aml_to_integer(slot_arg0),
slot_selector));
aml_append(method, aml_store(aml_arg(1), ost_evt));
aml_append(method, aml_store(aml_arg(2), ost_status));
aml_append(method, aml_release(ctrl_lock));
}
aml_append(mem_ctrl_dev, method);
method = aml_method(MEMORY_SLOT_EJECT_METHOD, 2, AML_NOTSERIALIZED);
{
Aml *eject = aml_name(MEMORY_SLOT_EJECT);
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method, aml_store(aml_to_integer(slot_arg0),
slot_selector));
aml_append(method, aml_store(one, eject));
aml_append(method, aml_release(ctrl_lock));
}
aml_append(mem_ctrl_dev, method);
}
aml_append(pci_scope, mem_ctrl_dev);
aml_append(ctx, pci_scope);
}

View file

@ -644,7 +644,8 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
PIIX4_CPU_HOTPLUG_IO_BASE);
if (s->acpi_memory_hotplug.is_enabled) {
acpi_memory_hotplug_init(parent, OBJECT(s), &s->acpi_memory_hotplug);
acpi_memory_hotplug_init(parent, OBJECT(s), &s->acpi_memory_hotplug,
ACPI_MEMORY_HOTPLUG_BASE);
}
}

View file

@ -863,7 +863,7 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
}
}
req = qemu_get_virtqueue_element(f, sizeof(VirtIOBlockReq));
req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
req->next = s->rq;
s->rq = req;

View file

@ -732,6 +732,7 @@ static void virtio_serial_post_load_timer_cb(void *opaque)
static int fetch_active_ports_list(QEMUFile *f,
VirtIOSerial *s, uint32_t nr_active_ports)
{
VirtIODevice *vdev = VIRTIO_DEVICE(s);
uint32_t i;
s->post_load = g_malloc0(sizeof(*s->post_load));
@ -765,7 +766,7 @@ static int fetch_active_ports_list(QEMUFile *f,
qemu_get_be64s(f, &port->iov_offset);
port->elem =
qemu_get_virtqueue_element(f, sizeof(VirtQueueElement));
qemu_get_virtqueue_element(vdev, f, sizeof(VirtQueueElement));
/*
* Port was throttled on source machine. Let's

View file

@ -101,8 +101,6 @@ typedef struct AcpiPmInfo {
uint32_t gpe0_blk_len;
uint32_t io_base;
uint16_t cpu_hp_io_base;
uint16_t mem_hp_io_base;
uint16_t mem_hp_io_len;
uint16_t pcihp_io_base;
uint16_t pcihp_io_len;
} AcpiPmInfo;
@ -148,9 +146,6 @@ static void acpi_get_pm_info(AcpiPmInfo *pm)
}
assert(obj);
pm->mem_hp_io_base = ACPI_MEMORY_HOTPLUG_BASE;
pm->mem_hp_io_len = ACPI_MEMORY_HOTPLUG_IO_LEN;
/* Fill in optional s3/s4 related properties */
o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL);
if (o) {
@ -1038,130 +1033,6 @@ static Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set)
return crs;
}
static void build_memory_devices(Aml *sb_scope, int nr_mem,
uint16_t io_base, uint16_t io_len)
{
int i;
Aml *scope;
Aml *crs;
Aml *field;
Aml *dev;
Aml *method;
Aml *ifctx;
/* build memory devices */
assert(nr_mem <= ACPI_MAX_RAM_SLOTS);
scope = aml_scope("\\_SB.PCI0." MEMORY_HOTPLUG_DEVICE);
aml_append(scope,
aml_name_decl(MEMORY_SLOTS_NUMBER, aml_int(nr_mem))
);
crs = aml_resource_template();
aml_append(crs,
aml_io(AML_DECODE16, io_base, io_base, 0, io_len)
);
aml_append(scope, aml_name_decl("_CRS", crs));
aml_append(scope, aml_operation_region(
MEMORY_HOTPLUG_IO_REGION, AML_SYSTEM_IO,
aml_int(io_base), io_len)
);
field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_DWORD_ACC,
AML_NOLOCK, AML_PRESERVE);
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_ADDR_LOW, 32));
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_ADDR_HIGH, 32));
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_SIZE_LOW, 32));
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_SIZE_HIGH, 32));
aml_append(field, /* read only */
aml_named_field(MEMORY_SLOT_PROXIMITY, 32));
aml_append(scope, field);
field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_BYTE_ACC,
AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_reserved_field(160 /* bits, Offset(20) */));
aml_append(field, /* 1 if enabled, read only */
aml_named_field(MEMORY_SLOT_ENABLED, 1));
aml_append(field,
/*(read) 1 if has a insert event. (write) 1 to clear event */
aml_named_field(MEMORY_SLOT_INSERT_EVENT, 1));
aml_append(field,
/* (read) 1 if has a remove event. (write) 1 to clear event */
aml_named_field(MEMORY_SLOT_REMOVE_EVENT, 1));
aml_append(field,
/* initiates device eject, write only */
aml_named_field(MEMORY_SLOT_EJECT, 1));
aml_append(scope, field);
field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_DWORD_ACC,
AML_NOLOCK, AML_PRESERVE);
aml_append(field, /* DIMM selector, write only */
aml_named_field(MEMORY_SLOT_SLECTOR, 32));
aml_append(field, /* _OST event code, write only */
aml_named_field(MEMORY_SLOT_OST_EVENT, 32));
aml_append(field, /* _OST status code, write only */
aml_named_field(MEMORY_SLOT_OST_STATUS, 32));
aml_append(scope, field);
aml_append(sb_scope, scope);
for (i = 0; i < nr_mem; i++) {
#define BASEPATH "\\_SB.PCI0." MEMORY_HOTPLUG_DEVICE "."
const char *s;
dev = aml_device("MP%02X", i);
aml_append(dev, aml_name_decl("_UID", aml_string("0x%02X", i)));
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C80")));
method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
s = BASEPATH MEMORY_SLOT_CRS_METHOD;
aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
aml_append(dev, method);
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
s = BASEPATH MEMORY_SLOT_STATUS_METHOD;
aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
aml_append(dev, method);
method = aml_method("_PXM", 0, AML_NOTSERIALIZED);
s = BASEPATH MEMORY_SLOT_PROXIMITY_METHOD;
aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
aml_append(dev, method);
method = aml_method("_OST", 3, AML_NOTSERIALIZED);
s = BASEPATH MEMORY_SLOT_OST_METHOD;
aml_append(method, aml_return(aml_call4(
s, aml_name("_UID"), aml_arg(0), aml_arg(1), aml_arg(2)
)));
aml_append(dev, method);
method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
s = BASEPATH MEMORY_SLOT_EJECT_METHOD;
aml_append(method, aml_return(aml_call2(
s, aml_name("_UID"), aml_arg(0))));
aml_append(dev, method);
aml_append(sb_scope, dev);
}
/* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) {
* If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ... }
*/
method = aml_method(MEMORY_SLOT_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
for (i = 0; i < nr_mem; i++) {
ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i)));
aml_append(ifctx,
aml_notify(aml_name("MP%.02X", i), aml_arg(1))
);
aml_append(method, ifctx);
}
aml_append(sb_scope, method);
}
static void build_hpet_aml(Aml *table)
{
Aml *crs;
@ -2049,8 +1920,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
build_cpus_aml(dsdt, machine, opts, pm->cpu_hp_io_base,
"\\_SB.PCI0", "\\_GPE._E02");
}
build_memory_hotplug_aml(dsdt, nr_mem, pm->mem_hp_io_base,
pm->mem_hp_io_len);
build_memory_hotplug_aml(dsdt, nr_mem, "\\_SB.PCI0", "\\_GPE._E03");
scope = aml_scope("_GPE");
{
@ -2065,10 +1935,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
aml_append(scope, method);
}
method = aml_method("_E03", 0, AML_NOTSERIALIZED);
aml_append(method, aml_call0(MEMORY_HOTPLUG_HANDLER_PATH));
aml_append(scope, method);
if (pcms->acpi_nvdimm_state.is_enabled) {
method = aml_method("_E04", 0, AML_NOTSERIALIZED);
aml_append(method, aml_notify(aml_name("\\_SB.NVDR"),
@ -2321,45 +2187,40 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
sb_scope = aml_scope("\\_SB");
{
build_memory_devices(sb_scope, nr_mem, pm->mem_hp_io_base,
pm->mem_hp_io_len);
Object *pci_host;
PCIBus *bus = NULL;
{
Object *pci_host;
PCIBus *bus = NULL;
pci_host = acpi_get_i386_pci_host();
if (pci_host) {
bus = PCI_HOST_BRIDGE(pci_host)->bus;
}
if (bus) {
Aml *scope = aml_scope("PCI0");
/* Scan all PCI buses. Generate tables to support hotplug. */
build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en);
if (misc->tpm_version != TPM_VERSION_UNSPEC) {
dev = aml_device("ISA.TPM");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31")));
aml_append(dev, aml_name_decl("_STA", aml_int(0xF)));
crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE,
TPM_TIS_ADDR_SIZE, AML_READ_WRITE));
/*
FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs,
Rewrite to take IRQ from TPM device model and
fix default IRQ value there to use some unused IRQ
*/
/* aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); */
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
}
aml_append(sb_scope, scope);
}
pci_host = acpi_get_i386_pci_host();
if (pci_host) {
bus = PCI_HOST_BRIDGE(pci_host)->bus;
}
if (bus) {
Aml *scope = aml_scope("PCI0");
/* Scan all PCI buses. Generate tables to support hotplug. */
build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en);
if (misc->tpm_version != TPM_VERSION_UNSPEC) {
dev = aml_device("ISA.TPM");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31")));
aml_append(dev, aml_name_decl("_STA", aml_int(0xF)));
crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE,
TPM_TIS_ADDR_SIZE, AML_READ_WRITE));
/*
FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs,
Rewrite to take IRQ from TPM device model and
fix default IRQ value there to use some unused IRQ
*/
/* aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); */
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
}
aml_append(sb_scope, scope);
}
aml_append(dsdt, sb_scope);
}
aml_append(dsdt, sb_scope);
/* copy AML table into ACPI tables blob and patch header there */
g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
@ -2575,6 +2436,7 @@ build_dmar_q35(GArray *table_data, BIOSLinker *linker)
AcpiTableDmar *dmar;
AcpiDmarHardwareUnit *drhd;
AcpiDmarRootPortATS *atsr;
uint8_t dmar_flags = 0;
X86IOMMUState *iommu = x86_iommu_get_default();
AcpiDmarDeviceScope *scope = NULL;
@ -2608,6 +2470,14 @@ build_dmar_q35(GArray *table_data, BIOSLinker *linker)
scope->path[0].device = PCI_SLOT(Q35_PSEUDO_DEVFN_IOAPIC);
scope->path[0].function = PCI_FUNC(Q35_PSEUDO_DEVFN_IOAPIC);
if (iommu->dt_supported) {
atsr = acpi_data_push(table_data, sizeof(*atsr));
atsr->type = cpu_to_le16(ACPI_DMAR_TYPE_ATSR);
atsr->length = cpu_to_le16(sizeof(*atsr));
atsr->flags = ACPI_DMAR_ATSR_ALL_PORTS;
atsr->pci_segment = cpu_to_le16(0);
}
build_header(linker, table_data, (void *)(table_data->data + dmar_start),
"DMAR", table_data->len - dmar_start, 1, NULL, NULL);
}

View file

@ -738,11 +738,18 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
"context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
} else if (ce->lo & VTD_CONTEXT_ENTRY_TT) {
VTD_DPRINTF(GENERAL, "error: unsupported Translation Type in "
"context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
} else {
switch (ce->lo & VTD_CONTEXT_ENTRY_TT) {
case VTD_CONTEXT_TT_MULTI_LEVEL:
/* fall through */
case VTD_CONTEXT_TT_DEV_IOTLB:
break;
default:
VTD_DPRINTF(GENERAL, "error: unsupported Translation Type in "
"context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
}
}
return 0;
}
@ -1438,7 +1445,61 @@ static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
vtd_iec_notify_all(s, !inv_desc->iec.granularity,
inv_desc->iec.index,
inv_desc->iec.index_mask);
return true;
}
static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
VTDInvDesc *inv_desc)
{
VTDAddressSpace *vtd_dev_as;
IOMMUTLBEntry entry;
struct VTDBus *vtd_bus;
hwaddr addr;
uint64_t sz;
uint16_t sid;
uint8_t devfn;
bool size;
uint8_t bus_num;
addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
devfn = sid & 0xff;
bus_num = sid >> 8;
size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
(inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Device "
"IOTLB Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
inv_desc->hi, inv_desc->lo);
return false;
}
vtd_bus = vtd_find_as_from_bus_num(s, bus_num);
if (!vtd_bus) {
goto done;
}
vtd_dev_as = vtd_bus->dev_as[devfn];
if (!vtd_dev_as) {
goto done;
}
if (size) {
sz = 1 << (ctz64(~(addr | (VTD_PAGE_MASK_4K - 1))) + 1);
addr &= ~(sz - 1);
} else {
sz = VTD_PAGE_SIZE;
}
entry.target_as = &vtd_dev_as->as;
entry.addr_mask = sz - 1;
entry.iova = addr;
entry.perm = IOMMU_NONE;
entry.translated_addr = 0;
memory_region_notify_iommu(entry.target_as->root, entry);
done:
return true;
}
@ -1490,6 +1551,14 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s)
}
break;
case VTD_INV_DESC_DEVICE:
VTD_DPRINTF(INV, "Device IOTLB Invalidation Descriptor hi 0x%"PRIx64
" lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
if (!vtd_process_device_iotlb_desc(s, &inv_desc)) {
return false;
}
break;
default:
VTD_DPRINTF(GENERAL, "error: unkonw Invalidation Descriptor type "
"hi 0x%"PRIx64 " lo 0x%"PRIx64 " type %"PRIu8,
@ -1996,7 +2065,27 @@ static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu,
static const VMStateDescription vtd_vmstate = {
.name = "iommu-intel",
.unmigratable = 1,
.version_id = 1,
.minimum_version_id = 1,
.priority = MIG_PRI_IOMMU,
.fields = (VMStateField[]) {
VMSTATE_UINT64(root, IntelIOMMUState),
VMSTATE_UINT64(intr_root, IntelIOMMUState),
VMSTATE_UINT64(iq, IntelIOMMUState),
VMSTATE_UINT32(intr_size, IntelIOMMUState),
VMSTATE_UINT16(iq_head, IntelIOMMUState),
VMSTATE_UINT16(iq_tail, IntelIOMMUState),
VMSTATE_UINT16(iq_size, IntelIOMMUState),
VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState),
VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
VMSTATE_BOOL(root_extended, IntelIOMMUState),
VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
VMSTATE_BOOL(intr_eime, IntelIOMMUState),
VMSTATE_END_OF_LIST()
}
};
static const MemoryRegionOps vtd_mem_ops = {
@ -2324,19 +2413,22 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
uintptr_t key = (uintptr_t)bus;
VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
VTDAddressSpace *vtd_dev_as;
char name[128];
if (!vtd_bus) {
uintptr_t *new_key = g_malloc(sizeof(*new_key));
*new_key = (uintptr_t)bus;
/* No corresponding free() */
vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \
X86_IOMMU_PCI_DEVFN_MAX);
vtd_bus->bus = bus;
key = (uintptr_t)bus;
g_hash_table_insert(s->vtd_as_by_busptr, &key, vtd_bus);
g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus);
}
vtd_dev_as = vtd_bus->dev_as[devfn];
if (!vtd_dev_as) {
snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn);
vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
vtd_dev_as->bus = bus;
@ -2351,7 +2443,7 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
memory_region_add_subregion(&vtd_dev_as->iommu, VTD_INTERRUPT_ADDR_FIRST,
&vtd_dev_as->iommu_ir);
address_space_init(&vtd_dev_as->as,
&vtd_dev_as->iommu, "intel_iommu");
&vtd_dev_as->iommu, name);
}
return vtd_dev_as;
}
@ -2392,6 +2484,10 @@ static void vtd_init(IntelIOMMUState *s)
assert(s->intr_eim != ON_OFF_AUTO_AUTO);
}
if (x86_iommu->dt_supported) {
s->ecap |= VTD_ECAP_DT;
}
vtd_reset_context_cache(s);
vtd_reset_iotlb(s);

View file

@ -183,6 +183,7 @@
/* (offset >> 4) << 8 */
#define VTD_ECAP_IRO (DMAR_IOTLB_REG_OFFSET << 4)
#define VTD_ECAP_QI (1ULL << 1)
#define VTD_ECAP_DT (1ULL << 2)
/* Interrupt Remapping support */
#define VTD_ECAP_IR (1ULL << 3)
#define VTD_ECAP_EIM (1ULL << 4)
@ -326,6 +327,7 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_INV_DESC_TYPE 0xf
#define VTD_INV_DESC_CC 0x1 /* Context-cache Invalidate Desc */
#define VTD_INV_DESC_IOTLB 0x2
#define VTD_INV_DESC_DEVICE 0x3
#define VTD_INV_DESC_IEC 0x4 /* Interrupt Entry Cache
Invalidate Descriptor */
#define VTD_INV_DESC_WAIT 0x5 /* Invalidation Wait Descriptor */
@ -361,6 +363,13 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000ff00ULL
#define VTD_INV_DESC_IOTLB_RSVD_HI 0xf80ULL
/* Mask for Device IOTLB Invalidate Descriptor */
#define VTD_INV_DESC_DEVICE_IOTLB_ADDR(val) ((val) & 0xfffffffffffff000ULL)
#define VTD_INV_DESC_DEVICE_IOTLB_SIZE(val) ((val) & 0x1)
#define VTD_INV_DESC_DEVICE_IOTLB_SID(val) (((val) >> 32) & 0xFFFFULL)
#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI 0xffeULL
#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0fff8
/* Information about page-selective IOTLB invalidate */
struct VTDIOTLBPageInvInfo {
uint16_t domain_id;
@ -399,8 +408,8 @@ typedef struct VTDRootEntry VTDRootEntry;
#define VTD_CONTEXT_ENTRY_FPD (1ULL << 1) /* Fault Processing Disable */
#define VTD_CONTEXT_ENTRY_TT (3ULL << 2) /* Translation Type */
#define VTD_CONTEXT_TT_MULTI_LEVEL 0
#define VTD_CONTEXT_TT_DEV_IOTLB 1
#define VTD_CONTEXT_TT_PASS_THROUGH 2
#define VTD_CONTEXT_TT_DEV_IOTLB (1ULL << 2)
#define VTD_CONTEXT_TT_PASS_THROUGH (2ULL << 2)
/* Second Level Page Translation Pointer*/
#define VTD_CONTEXT_ENTRY_SLPTPTR (~0xfffULL)
#define VTD_CONTEXT_ENTRY_RSVD_LO (0xff0ULL | ~VTD_HAW_MASK)

View file

@ -106,6 +106,18 @@ static void x86_iommu_intremap_prop_set(Object *o, bool value, Error **errp)
s->intr_supported = value;
}
static bool x86_iommu_device_iotlb_prop_get(Object *o, Error **errp)
{
X86IOMMUState *s = X86_IOMMU_DEVICE(o);
return s->dt_supported;
}
static void x86_iommu_device_iotlb_prop_set(Object *o, bool value, Error **errp)
{
X86IOMMUState *s = X86_IOMMU_DEVICE(o);
s->dt_supported = value;
}
static void x86_iommu_instance_init(Object *o)
{
X86IOMMUState *s = X86_IOMMU_DEVICE(o);
@ -114,6 +126,11 @@ static void x86_iommu_instance_init(Object *o)
s->intr_supported = false;
object_property_add_bool(o, "intremap", x86_iommu_intremap_prop_get,
x86_iommu_intremap_prop_set, NULL);
s->dt_supported = false;
object_property_add_bool(o, "device-iotlb",
x86_iommu_device_iotlb_prop_get,
x86_iommu_device_iotlb_prop_set,
NULL);
}
static const TypeInfo x86_iommu_info = {

View file

@ -472,7 +472,8 @@ static void e1000e_pci_realize(PCIDevice *pci_dev, Error **errp)
hw_error("Failed to initialize PM capability");
}
if (pcie_aer_init(pci_dev, e1000e_aer_offset, PCI_ERR_SIZEOF) < 0) {
if (pcie_aer_init(pci_dev, PCI_ERR_VER, e1000e_aer_offset,
PCI_ERR_SIZEOF, NULL) < 0) {
hw_error("Failed to initialize AER capability");
}

View file

@ -51,6 +51,7 @@ static const int kernel_feature_bits[] = {
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_NET_F_MRG_RXBUF,
VIRTIO_F_VERSION_1,
VIRTIO_NET_F_MTU,
VHOST_INVALID_FEATURE_BIT
};
@ -74,6 +75,7 @@ static const int user_feature_bits[] = {
VIRTIO_NET_F_HOST_ECN,
VIRTIO_NET_F_HOST_UFO,
VIRTIO_NET_F_MRG_RXBUF,
VIRTIO_NET_F_MTU,
/* This bit implies RARP isn't sent by QEMU out of band */
VIRTIO_NET_F_GUEST_ANNOUNCE,
@ -435,6 +437,17 @@ int vhost_set_vring_enable(NetClientState *nc, int enable)
return 0;
}
int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
{
const VhostOps *vhost_ops = net->dev.vhost_ops;
if (!vhost_ops->vhost_net_set_mtu) {
return 0;
}
return vhost_ops->vhost_net_set_mtu(&net->dev, mtu);
}
#else
uint64_t vhost_net_get_max_queues(VHostNetState *net)
{
@ -501,4 +514,9 @@ int vhost_set_vring_enable(NetClientState *nc, int enable)
{
return 0;
}
int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
{
return 0;
}
#endif

View file

@ -55,6 +55,8 @@ static VirtIOFeature feature_sizes[] = {
.end = endof(struct virtio_net_config, status)},
{.flags = 1 << VIRTIO_NET_F_MQ,
.end = endof(struct virtio_net_config, max_virtqueue_pairs)},
{.flags = 1 << VIRTIO_NET_F_MTU,
.end = endof(struct virtio_net_config, mtu)},
{}
};
@ -81,6 +83,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
virtio_stw_p(vdev, &netcfg.status, n->status);
virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
memcpy(netcfg.mac, n->mac, ETH_ALEN);
memcpy(config, &netcfg, n->config_size);
}
@ -152,6 +155,16 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
}
if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
if (r < 0) {
error_report("%uBytes MTU not supported by the backend",
n->net_conf.mtu);
return;
}
}
n->vhost_started = 1;
r = vhost_net_start(vdev, n->nic->ncs, queues);
if (r < 0) {
@ -218,6 +231,14 @@ static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
}
}
static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
{
unsigned int dropped = virtqueue_drop_all(vq);
if (dropped) {
virtio_notify(vdev, vq);
}
}
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = VIRTIO_NET(vdev);
@ -262,6 +283,14 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
} else {
qemu_bh_cancel(q->tx_bh);
}
if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
(queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) {
/* if tx is waiting we are likely have some packets in tx queue
* and disabled notification */
q->tx_waiting = 0;
virtio_queue_set_notification(q->tx_vq, 1);
virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
}
}
}
}
@ -1323,6 +1352,11 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
VirtIONet *n = VIRTIO_NET(vdev);
VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
virtio_net_drop_tx_queue_data(vdev, vq);
return;
}
/* This happens when device was stopped but VCPU wasn't. */
if (!vdev->vm_running) {
q->tx_waiting = 1;
@ -1349,6 +1383,11 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
VirtIONet *n = VIRTIO_NET(vdev);
VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
virtio_net_drop_tx_queue_data(vdev, vq);
return;
}
if (unlikely(q->tx_waiting)) {
return;
}
@ -1695,6 +1734,7 @@ static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
{
int i, config_size = 0;
virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
for (i = 0; feature_sizes[i].flags != 0; i++) {
if (host_features & feature_sizes[i].flags) {
config_size = MAX(feature_sizes[i].end, config_size);
@ -1724,6 +1764,10 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
NetClientState *nc;
int i;
if (n->net_conf.mtu) {
n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
}
virtio_net_set_config_size(n, n->host_features);
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
@ -1922,6 +1966,7 @@ static Property virtio_net_properties[] = {
DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
DEFINE_PROP_END_OF_LIST(),
};

View file

@ -135,8 +135,10 @@ static int ioh3420_initfn(PCIDevice *d)
goto err_pcie_cap;
}
rc = pcie_aer_init(d, IOH_EP_AER_OFFSET, PCI_ERR_SIZEOF);
rc = pcie_aer_init(d, PCI_ERR_VER, IOH_EP_AER_OFFSET,
PCI_ERR_SIZEOF, &err);
if (rc < 0) {
error_report_err(err);
goto err;
}
pcie_aer_root_init(d);

View file

@ -97,8 +97,10 @@ static int xio3130_downstream_initfn(PCIDevice *d)
goto err_pcie_cap;
}
rc = pcie_aer_init(d, XIO3130_AER_OFFSET, PCI_ERR_SIZEOF);
rc = pcie_aer_init(d, PCI_ERR_VER, XIO3130_AER_OFFSET,
PCI_ERR_SIZEOF, &err);
if (rc < 0) {
error_report_err(err);
goto err;
}

View file

@ -85,8 +85,10 @@ static int xio3130_upstream_initfn(PCIDevice *d)
pcie_cap_flr_init(d);
pcie_cap_deverr_init(d);
rc = pcie_aer_init(d, XIO3130_AER_OFFSET, PCI_ERR_SIZEOF);
rc = pcie_aer_init(d, PCI_ERR_VER, XIO3130_AER_OFFSET,
PCI_ERR_SIZEOF, &err);
if (rc < 0) {
error_report_err(err);
goto err;
}

View file

@ -717,3 +717,18 @@ void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num)
PCI_EXT_CAP_DSN_SIZEOF);
pci_set_quad(dev->config + offset + pci_dsn_cap, ser_num);
}
void pcie_ats_init(PCIDevice *dev, uint16_t offset)
{
pcie_add_capability(dev, PCI_EXT_CAP_ID_ATS, 0x1,
offset, PCI_EXT_CAP_ATS_SIZEOF);
dev->exp.ats_cap = offset;
/* Invalidate Queue Depth 0, Page Aligned Request 0 */
pci_set_word(dev->config + offset + PCI_ATS_CAP, 0);
/* STU 0, Disabled by default */
pci_set_word(dev->config + offset + PCI_ATS_CTRL, 0);
pci_set_word(dev->wmask + dev->exp.ats_cap + PCI_ATS_CTRL, 0x800f);
}

View file

@ -29,6 +29,7 @@
#include "hw/pci/msi.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci/pcie_regs.h"
#include "qapi/error.h"
//#define DEBUG_PCIE
#ifdef DEBUG_PCIE
@ -96,21 +97,17 @@ static void aer_log_clear_all_err(PCIEAERLog *aer_log)
aer_log->log_num = 0;
}
int pcie_aer_init(PCIDevice *dev, uint16_t offset, uint16_t size)
int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset,
uint16_t size, Error **errp)
{
PCIExpressDevice *exp;
pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, PCI_ERR_VER,
pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, cap_ver,
offset, size);
exp = &dev->exp;
exp->aer_cap = offset;
dev->exp.aer_cap = offset;
/* log_max is property */
if (dev->exp.aer_log.log_max == PCIE_AER_LOG_MAX_UNSET) {
dev->exp.aer_log.log_max = PCIE_AER_LOG_MAX_DEFAULT;
}
/* clip down the value to avoid unreasobale memory usage */
/* clip down the value to avoid unreasonable memory usage */
if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) {
error_setg(errp, "Invalid aer_log_max %d. The max number of aer log "
"is %d", dev->exp.aer_log.log_max, PCIE_AER_LOG_MAX_LIMIT);
return -EINVAL;
}
dev->exp.aer_log.log = g_malloc0(sizeof dev->exp.aer_log.log[0] *

View file

@ -198,12 +198,14 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
SCSIBus *bus = sreq->bus;
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
VirtIODevice *vdev = VIRTIO_DEVICE(s);
VirtIOSCSIReq *req;
uint32_t n;
qemu_get_be32s(f, &n);
assert(n < vs->conf.num_queues);
req = qemu_get_virtqueue_element(f, sizeof(VirtIOSCSIReq) + vs->cdb_size);
req = qemu_get_virtqueue_element(vdev, f,
sizeof(VirtIOSCSIReq) + vs->cdb_size);
virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,

View file

@ -15,6 +15,8 @@ virtio_rng_pushed(void *rng, size_t len) "rng %p: %zd bytes pushed"
virtio_rng_request(void *rng, size_t size, unsigned quota) "rng %p: %zd bytes requested, %u bytes quota left"
# hw/virtio/virtio-balloon.c
#
virtio_balloon_bad_addr(uint64_t gpa) "%"PRIx64
virtio_balloon_handle_output(const char *name, uint64_t gpa) "section name: %s gpa: %"PRIx64
virtio_balloon_get_config(uint32_t num_pages, uint32_t actual) "num_pages: %d actual: %d"
virtio_balloon_set_config(uint32_t actual, uint32_t oldactual) "actual: %d oldactual: %d"

View file

@ -32,6 +32,7 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
VHOST_USER_PROTOCOL_F_RARP = 2,
VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
VHOST_USER_PROTOCOL_F_NET_MTU = 4,
VHOST_USER_PROTOCOL_F_MAX
};
@ -59,6 +60,7 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_QUEUE_NUM = 17,
VHOST_USER_SET_VRING_ENABLE = 18,
VHOST_USER_SEND_RARP = 19,
VHOST_USER_NET_SET_MTU = 20,
VHOST_USER_MAX
} VhostUserRequest;
@ -186,6 +188,7 @@ static bool vhost_user_one_time_request(VhostUserRequest request)
case VHOST_USER_RESET_OWNER:
case VHOST_USER_SET_MEM_TABLE:
case VHOST_USER_GET_QUEUE_NUM:
case VHOST_USER_NET_SET_MTU:
return true;
default:
return false;
@ -685,6 +688,36 @@ static bool vhost_user_can_merge(struct vhost_dev *dev,
return mfd == rfd;
}
static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
{
VhostUserMsg msg;
bool reply_supported = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK);
if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
return 0;
}
msg.request = VHOST_USER_NET_SET_MTU;
msg.payload.u64 = mtu;
msg.size = sizeof(msg.payload.u64);
msg.flags = VHOST_USER_VERSION;
if (reply_supported) {
msg.flags |= VHOST_USER_NEED_REPLY_MASK;
}
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
return -1;
}
/* If reply_ack supported, slave has to ack specified MTU is valid */
if (reply_supported) {
return process_message_reply(dev, msg.request);
}
return 0;
}
const VhostOps user_ops = {
.backend_type = VHOST_BACKEND_TYPE_USER,
.vhost_backend_init = vhost_user_init,
@ -708,4 +741,5 @@ const VhostOps user_ops = {
.vhost_requires_shm_log = vhost_user_requires_shm_log,
.vhost_migration_done = vhost_user_migration_done,
.vhost_backend_can_merge = vhost_user_can_merge,
.vhost_net_set_mtu = vhost_user_net_set_mtu,
};

View file

@ -993,6 +993,7 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
virtio_queue_set_last_avail_idx(vdev, idx, state.num);
}
virtio_queue_invalidate_signalled_used(vdev, idx);
virtio_queue_update_used_idx(vdev, idx);
/* In the cross-endian case, we need to reset the vring endianness to
* native as legacy devices expect so by default.

View file

@ -228,8 +228,13 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
/* FIXME: remove get_system_memory(), but how? */
section = memory_region_find(get_system_memory(), pa, 1);
if (!int128_nz(section.size) || !memory_region_is_ram(section.mr))
if (!int128_nz(section.size) ||
!memory_region_is_ram(section.mr) ||
memory_region_is_rom(section.mr) ||
memory_region_is_romd(section.mr)) {
trace_virtio_balloon_bad_addr(pa);
continue;
}
trace_virtio_balloon_handle_output(memory_region_name(section.mr),
pa);

View file

@ -28,6 +28,7 @@
#include "hw/qdev.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio.h"
#include "exec/address-spaces.h"
/* #define DEBUG_VIRTIO_BUS */
@ -61,6 +62,13 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp)
if (klass->device_plugged != NULL) {
klass->device_plugged(qbus->parent, errp);
}
if (klass->get_dma_as != NULL &&
virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
vdev->dma_as = klass->get_dma_as(qbus->parent);
} else {
vdev->dma_as = &address_space_memory;
}
}
/* Reset the virtio_bus */

View file

@ -31,6 +31,11 @@ static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
VirtIOCryptoPCI *vcrypto = VIRTIO_CRYPTO_PCI(vpci_dev);
DeviceState *vdev = DEVICE(&vcrypto->vdev);
if (vcrypto->vdev.conf.cryptodev == NULL) {
error_setg(errp, "'cryptodev' parameter expects a valid object");
return;
}
qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
virtio_pci_force_virtio_1(vpci_dev);
object_property_set_bool(OBJECT(vdev), true, "realized", errp);
@ -48,7 +53,6 @@ static void virtio_crypto_pci_class_init(ObjectClass *klass, void *data)
k->realize = virtio_crypto_pci_realize;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->props = virtio_crypto_pci_properties;
dc->hotpluggable = false;
pcidev_k->class_id = PCI_CLASS_OTHERS;
}

View file

@ -337,7 +337,18 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req)
{
if (req) {
if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
g_free(req->u.sym_op_info);
size_t max_len;
CryptoDevBackendSymOpInfo *op_info = req->u.sym_op_info;
max_len = op_info->iv_len +
op_info->aad_len +
op_info->src_len +
op_info->dst_len +
op_info->digest_result_len;
/* Zeroize and free request data structure */
memset(op_info, 0, sizeof(*op_info) + max_len);
g_free(op_info);
}
g_free(req);
}
@ -355,7 +366,7 @@ virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
return;
}
len = sym_op_info->dst_len;
len = sym_op_info->src_len;
/* Save the cipher result */
s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
if (s != len) {
@ -416,7 +427,7 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev,
uint32_t hash_start_src_offset = 0, len_to_hash = 0;
uint32_t cipher_start_src_offset = 0, len_to_cipher = 0;
size_t max_len, curr_size = 0;
uint64_t max_len, curr_size = 0;
size_t s;
/* Plain cipher */
@ -441,7 +452,7 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev,
return NULL;
}
max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len;
if (unlikely(max_len > vcrypto->conf.max_size)) {
virtio_error(vdev, "virtio-crypto too big length");
return NULL;
@ -732,7 +743,7 @@ static void virtio_crypto_reset(VirtIODevice *vdev)
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
/* multiqueue is disabled by default */
vcrypto->curr_queues = 1;
if (!vcrypto->cryptodev->ready) {
if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) {
vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
} else {
vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
@ -792,13 +803,14 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
}
vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
if (!vcrypto->cryptodev->ready) {
if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) {
vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
} else {
vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
}
virtio_crypto_init_config(vdev);
cryptodev_backend_set_used(vcrypto->cryptodev, true);
}
static void virtio_crypto_device_unrealize(DeviceState *dev, Error **errp)
@ -818,6 +830,7 @@ static void virtio_crypto_device_unrealize(DeviceState *dev, Error **errp)
g_free(vcrypto->vqs);
virtio_cleanup(vdev);
cryptodev_backend_set_used(vcrypto->cryptodev, false);
}
static const VMStateDescription vmstate_virtio_crypto = {
@ -875,6 +888,20 @@ static void virtio_crypto_class_init(ObjectClass *klass, void *data)
vdc->reset = virtio_crypto_reset;
}
static void
virtio_crypto_check_cryptodev_is_used(Object *obj, const char *name,
Object *val, Error **errp)
{
if (cryptodev_backend_is_used(CRYPTODEV_BACKEND(val))) {
char *path = object_get_canonical_path_component(val);
error_setg(errp,
"can't use already used cryptodev backend: %s", path);
g_free(path);
} else {
qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
}
}
static void virtio_crypto_instance_init(Object *obj)
{
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(obj);
@ -888,7 +915,7 @@ static void virtio_crypto_instance_init(Object *obj)
object_property_add_link(obj, "cryptodev",
TYPE_CRYPTODEV_BACKEND,
(Object **)&vcrypto->conf.cryptodev,
qdev_prop_allow_set_link_before_realize,
virtio_crypto_check_cryptodev_is_used,
OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
}

View file

@ -1144,6 +1144,14 @@ static int virtio_pci_query_nvectors(DeviceState *d)
return proxy->nvectors;
}
static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
PCIDevice *dev = &proxy->pci_dev;
return pci_get_address_space(dev);
}
static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
struct virtio_pci_cap *cap)
{
@ -1601,6 +1609,11 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
}
if (legacy) {
if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
"neither legacy nor transitional device.");
return ;
}
/* legacy and transitional */
pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
pci_get_word(config + PCI_VENDOR_ID));
@ -1802,6 +1815,11 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
* PCI Power Management Interface Specification.
*/
pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
pcie_ats_init(pci_dev, 256);
}
} else {
/*
* make future invocations of pci_is_express() return false
@ -1855,6 +1873,8 @@ static Property virtio_pci_properties[] = {
VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
ignore_backend_features, false),
DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_ATS_BIT, false),
DEFINE_PROP_END_OF_LIST(),
};
@ -2520,6 +2540,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->query_nvectors = virtio_pci_query_nvectors;
k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
k->get_dma_as = virtio_pci_get_dma_as;
}
static const TypeInfo virtio_pci_bus_info = {

View file

@ -72,6 +72,7 @@ enum {
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT,
VIRTIO_PCI_FLAG_ATS_BIT,
};
/* Need to activate work-arounds for buggy guests at vmstate load. */
@ -96,6 +97,9 @@ enum {
#define VIRTIO_PCI_FLAG_PAGE_PER_VQ \
(1 << VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT)
/* address space translation service */
#define VIRTIO_PCI_FLAG_ATS (1 << VIRTIO_PCI_FLAG_ATS_BIT)
typedef struct {
MSIMessage msg;
int virq;

View file

@ -23,6 +23,7 @@
#include "hw/virtio/virtio-bus.h"
#include "migration/migration.h"
#include "hw/virtio/virtio-access.h"
#include "sysemu/dma.h"
/*
* The alignment to use between consumer and producer parts of vring.
@ -92,7 +93,7 @@ struct VirtQueue
uint16_t queue_index;
int inuse;
unsigned int inuse;
uint16_t vector;
VirtIOHandleOutput handle_output;
@ -121,7 +122,7 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
hwaddr desc_pa, int i)
{
address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
address_space_read(vdev->dma_as, desc_pa + i * sizeof(VRingDesc),
MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
virtio_tswap64s(vdev, &desc->addr);
virtio_tswap32s(vdev, &desc->len);
@ -163,7 +164,7 @@ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
virtio_tswap32s(vq->vdev, &uelem->id);
virtio_tswap32s(vq->vdev, &uelem->len);
pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED,
(void *)uelem, sizeof(VRingUsedElem));
}
@ -249,6 +250,7 @@ int virtio_queue_empty(VirtQueue *vq)
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
AddressSpace *dma_as = vq->vdev->dma_as;
unsigned int offset;
int i;
@ -256,17 +258,18 @@ static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
for (i = 0; i < elem->in_num; i++) {
size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
elem->in_sg[i].iov_len,
1, size);
dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
elem->in_sg[i].iov_len,
DMA_DIRECTION_FROM_DEVICE, size);
offset += size;
}
for (i = 0; i < elem->out_num; i++)
cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
elem->out_sg[i].iov_len,
0, elem->out_sg[i].iov_len);
dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
elem->out_sg[i].iov_len,
DMA_DIRECTION_TO_DEVICE,
elem->out_sg[i].iov_len);
}
/* virtqueue_detach_element:
@ -560,7 +563,10 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
goto out;
}
iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
is_write ?
DMA_DIRECTION_FROM_DEVICE :
DMA_DIRECTION_TO_DEVICE);
if (!iov[num_sg].iov_base) {
virtio_error(vdev, "virtio: bogus descriptor or out of resources");
goto out;
@ -597,9 +603,9 @@ static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
}
}
static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
unsigned int *num_sg, unsigned int max_size,
int is_write)
static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
hwaddr *addr, unsigned int *num_sg,
unsigned int max_size, int is_write)
{
unsigned int i;
hwaddr len;
@ -618,7 +624,10 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
for (i = 0; i < *num_sg; i++) {
len = sg[i].iov_len;
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
sg[i].iov_base = dma_memory_map(vdev->dma_as,
addr[i], &len, is_write ?
DMA_DIRECTION_FROM_DEVICE :
DMA_DIRECTION_TO_DEVICE);
if (!sg[i].iov_base) {
error_report("virtio: error trying to map MMIO memory");
exit(1);
@ -630,12 +639,15 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
}
}
void virtqueue_map(VirtQueueElement *elem)
void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
{
virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
VIRTQUEUE_MAX_SIZE, 1);
virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
VIRTQUEUE_MAX_SIZE, 0);
virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num,
MIN(ARRAY_SIZE(elem->in_sg), ARRAY_SIZE(elem->in_addr)),
1);
virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num,
MIN(ARRAY_SIZE(elem->out_sg),
ARRAY_SIZE(elem->out_addr)),
0);
}
static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
@ -771,6 +783,44 @@ err_undo_map:
return NULL;
}
/* virtqueue_drop_all:
* @vq: The #VirtQueue
* Drops all queued buffers and indicates them to the guest
* as if they are done. Useful when buffers can not be
* processed but must be returned to the guest.
*/
unsigned int virtqueue_drop_all(VirtQueue *vq)
{
unsigned int dropped = 0;
VirtQueueElement elem = {};
VirtIODevice *vdev = vq->vdev;
bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
if (unlikely(vdev->broken)) {
return 0;
}
while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
/* works similar to virtqueue_pop but does not map buffers
* and does not allocate any memory */
smp_rmb();
if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
break;
}
vq->inuse++;
vq->last_avail_idx++;
if (fEventIdx) {
vring_set_avail_event(vq, vq->last_avail_idx);
}
/* immediately push the element, nothing to unmap
* as both in_num and out_num are set to 0 */
virtqueue_push(vq, &elem, 0);
dropped++;
}
return dropped;
}
/* Reading and writing a structure directly to QEMUFile is *awful*, but
* it is what QEMU has always done by mistake. We can change it sooner
* or later by bumping the version number of the affected vm states.
@ -788,7 +838,7 @@ typedef struct VirtQueueElementOld {
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
} VirtQueueElementOld;
void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
{
VirtQueueElement *elem;
VirtQueueElementOld data;
@ -819,7 +869,7 @@ void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
}
virtqueue_map(elem);
virtqueue_map(vdev, elem);
return elem;
}
@ -878,6 +928,11 @@ static int virtio_validate_features(VirtIODevice *vdev)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
!virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
return -EFAULT;
}
if (k->validate_features) {
return k->validate_features(vdev);
} else {
@ -1861,9 +1916,11 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
/*
* Some devices migrate VirtQueueElements that have been popped
* from the avail ring but not yet returned to the used ring.
* Since max ring size < UINT16_MAX it's safe to use modulo
* UINT16_MAX + 1 subtraction.
*/
vdev->vq[i].inuse = vdev->vq[i].last_avail_idx -
vdev->vq[i].used_idx;
vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
vdev->vq[i].used_idx);
if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
"used_idx 0x%x",
@ -2001,6 +2058,11 @@ void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
vdev->vq[n].shadow_avail_idx = idx;
}
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
{
vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
}
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
{
vdev->vq[n].signalled_used_valid = false;

View file

@ -628,6 +628,9 @@ static inline bool memory_region_is_romd(MemoryRegion *mr)
*/
static inline bool memory_region_is_iommu(MemoryRegion *mr)
{
if (mr->alias) {
return memory_region_is_iommu(mr->alias);
}
return mr->iommu_ops;
}
@ -1537,6 +1540,11 @@ void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
/* address_space_get_iotlb_entry: translate an address into an IOTLB
* entry. Should be called from an RCU critical section.
*/
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
bool is_write);
/* address_space_translate: translate an address range into an address space
* into a MemoryRegion and an address range into that section. Should be

View file

@ -627,8 +627,20 @@ struct AcpiDmarHardwareUnit {
} QEMU_PACKED;
typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit;
/* Type 2: Root Port ATS Capability Reporting Structure */
struct AcpiDmarRootPortATS {
uint16_t type;
uint16_t length;
uint8_t flags;
uint8_t reserved;
uint16_t pci_segment;
AcpiDmarDeviceScope scope[0];
} QEMU_PACKED;
typedef struct AcpiDmarRootPortATS AcpiDmarRootPortATS;
/* Masks for Flags field above */
#define ACPI_DMAR_INCLUDE_PCI_ALL 1
#define ACPI_DMAR_ATSR_ALL_PORTS 1
/*
* Input Output Remapping Table (IORT)

View file

@ -30,7 +30,7 @@ typedef struct MemHotplugState {
} MemHotplugState;
void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner,
MemHotplugState *state);
MemHotplugState *state, uint16_t io_base);
void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st,
DeviceState *dev, Error **errp);
@ -47,11 +47,7 @@ extern const VMStateDescription vmstate_memory_hotplug;
void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list);
#define MEMORY_HOTPLUG_DEVICE "MHPD"
#define MEMORY_SLOT_SCAN_METHOD "MSCN"
#define MEMORY_HOTPLUG_HANDLER_PATH "\\_SB.PCI0." \
MEMORY_HOTPLUG_DEVICE "." MEMORY_SLOT_SCAN_METHOD
void build_memory_hotplug_aml(Aml *ctx, uint32_t nr_mem,
uint16_t io_base, uint16_t io_len);
void build_memory_hotplug_aml(Aml *table, uint32_t nr_mem,
const char *res_root,
const char *event_handler_method);
#endif

View file

@ -29,29 +29,6 @@
#define PIIX4_CPU_HOTPLUG_IO_BASE 0xaf00
#define CPU_HOTPLUG_RESOURCE_DEVICE PRES
#define ACPI_MEMORY_HOTPLUG_IO_LEN 24
#define ACPI_MEMORY_HOTPLUG_BASE 0x0a00
#define MEMORY_SLOTS_NUMBER "MDNR"
#define MEMORY_HOTPLUG_IO_REGION "HPMR"
#define MEMORY_SLOT_ADDR_LOW "MRBL"
#define MEMORY_SLOT_ADDR_HIGH "MRBH"
#define MEMORY_SLOT_SIZE_LOW "MRLL"
#define MEMORY_SLOT_SIZE_HIGH "MRLH"
#define MEMORY_SLOT_PROXIMITY "MPX"
#define MEMORY_SLOT_ENABLED "MES"
#define MEMORY_SLOT_INSERT_EVENT "MINS"
#define MEMORY_SLOT_REMOVE_EVENT "MRMV"
#define MEMORY_SLOT_EJECT "MEJ"
#define MEMORY_SLOT_SLECTOR "MSEL"
#define MEMORY_SLOT_OST_EVENT "MOEV"
#define MEMORY_SLOT_OST_STATUS "MOSC"
#define MEMORY_SLOT_LOCK "MLCK"
#define MEMORY_SLOT_STATUS_METHOD "MRST"
#define MEMORY_SLOT_CRS_METHOD "MCRS"
#define MEMORY_SLOT_OST_METHOD "MOST"
#define MEMORY_SLOT_PROXIMITY_METHOD "MPXM"
#define MEMORY_SLOT_EJECT_METHOD "MEJ0"
#define MEMORY_SLOT_NOTIFY_METHOD "MTFY"
#endif

View file

@ -73,6 +73,7 @@ typedef struct IEC_Notifier IEC_Notifier;
struct X86IOMMUState {
SysBusDevice busdev;
bool intr_supported; /* Whether vIOMMU supports IR */
bool dt_supported; /* Whether vIOMMU supports DT */
IommuType type; /* IOMMU type - AMD/Intel */
QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */
};

View file

@ -74,6 +74,9 @@ struct PCIExpressDevice {
/* AER */
uint16_t aer_cap;
PCIEAERLog aer_log;
/* Offset of ATS capability in config space */
uint16_t ats_cap;
};
#define COMPAT_PROP_PCP "power_controller_present"
@ -120,6 +123,7 @@ void pcie_add_capability(PCIDevice *dev,
void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn);
void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num);
void pcie_ats_init(PCIDevice *dev, uint16_t offset);
extern const VMStateDescription vmstate_pcie_device;

View file

@ -44,7 +44,6 @@ struct PCIEAERLog {
*/
#define PCIE_AER_LOG_MAX_DEFAULT 8
#define PCIE_AER_LOG_MAX_LIMIT 128
#define PCIE_AER_LOG_MAX_UNSET 0xffff
uint16_t log_max;
/* Error log. log_max-sized array */
@ -87,7 +86,8 @@ struct PCIEAERErr {
extern const VMStateDescription vmstate_pcie_aer_log;
int pcie_aer_init(PCIDevice *dev, uint16_t offset, uint16_t size);
int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset,
uint16_t size, Error **errp);
void pcie_aer_exit(PCIDevice *dev);
void pcie_aer_write_config(PCIDevice *dev,
uint32_t addr, uint32_t val, int len);

View file

@ -32,6 +32,7 @@ typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
struct vhost_vring_file *file);
typedef int (*vhost_net_set_mtu_op)(struct vhost_dev *dev, uint16_t mtu);
typedef int (*vhost_scsi_set_endpoint_op)(struct vhost_dev *dev,
struct vhost_scsi_target *target);
typedef int (*vhost_scsi_clear_endpoint_op)(struct vhost_dev *dev,
@ -83,6 +84,7 @@ typedef struct VhostOps {
vhost_backend_cleanup vhost_backend_cleanup;
vhost_backend_memslots_limit vhost_backend_memslots_limit;
vhost_net_set_backend_op vhost_net_set_backend;
vhost_net_set_mtu_op vhost_net_set_mtu;
vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
vhost_scsi_clear_endpoint_op vhost_scsi_clear_endpoint;
vhost_scsi_get_abi_version_op vhost_scsi_get_abi_version;

View file

@ -17,6 +17,7 @@
#define QEMU_VIRTIO_ACCESS_H
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
#include "exec/address-spaces.h"
#if defined(TARGET_PPC64) || defined(TARGET_ARM)
@ -40,45 +41,55 @@ static inline bool virtio_access_is_big_endian(VirtIODevice *vdev)
static inline uint16_t virtio_lduw_phys(VirtIODevice *vdev, hwaddr pa)
{
AddressSpace *dma_as = vdev->dma_as;
if (virtio_access_is_big_endian(vdev)) {
return lduw_be_phys(&address_space_memory, pa);
return lduw_be_phys(dma_as, pa);
}
return lduw_le_phys(&address_space_memory, pa);
return lduw_le_phys(dma_as, pa);
}
static inline uint32_t virtio_ldl_phys(VirtIODevice *vdev, hwaddr pa)
{
AddressSpace *dma_as = vdev->dma_as;
if (virtio_access_is_big_endian(vdev)) {
return ldl_be_phys(&address_space_memory, pa);
return ldl_be_phys(dma_as, pa);
}
return ldl_le_phys(&address_space_memory, pa);
return ldl_le_phys(dma_as, pa);
}
static inline uint64_t virtio_ldq_phys(VirtIODevice *vdev, hwaddr pa)
{
AddressSpace *dma_as = vdev->dma_as;
if (virtio_access_is_big_endian(vdev)) {
return ldq_be_phys(&address_space_memory, pa);
return ldq_be_phys(dma_as, pa);
}
return ldq_le_phys(&address_space_memory, pa);
return ldq_le_phys(dma_as, pa);
}
static inline void virtio_stw_phys(VirtIODevice *vdev, hwaddr pa,
uint16_t value)
{
AddressSpace *dma_as = vdev->dma_as;
if (virtio_access_is_big_endian(vdev)) {
stw_be_phys(&address_space_memory, pa, value);
stw_be_phys(dma_as, pa, value);
} else {
stw_le_phys(&address_space_memory, pa, value);
stw_le_phys(dma_as, pa, value);
}
}
static inline void virtio_stl_phys(VirtIODevice *vdev, hwaddr pa,
uint32_t value)
{
AddressSpace *dma_as = vdev->dma_as;
if (virtio_access_is_big_endian(vdev)) {
stl_be_phys(&address_space_memory, pa, value);
stl_be_phys(dma_as, pa, value);
} else {
stl_le_phys(&address_space_memory, pa, value);
stl_le_phys(dma_as, pa, value);
}
}

View file

@ -88,6 +88,7 @@ typedef struct VirtioBusClass {
* Note that changing this will break migration for this transport.
*/
bool has_variable_vring_alignment;
AddressSpace *(*get_dma_as)(DeviceState *d);
} VirtioBusClass;
struct VirtioBusState {

View file

@ -36,6 +36,7 @@ typedef struct virtio_net_conf
int32_t txburst;
char *tx;
uint16_t rx_queue_size;
uint16_t mtu;
} virtio_net_conf;
/* Maximum packet size we can receive from tap device: header + 64k */

View file

@ -92,6 +92,7 @@ struct VirtIODevice
char *bus_name;
uint8_t device_endian;
bool use_guest_notifier_mask;
AddressSpace *dma_as;
QLIST_HEAD(, VirtQueue) *vector_queues;
};
@ -170,9 +171,10 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num);
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx);
void virtqueue_map(VirtQueueElement *elem);
void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem);
void *virtqueue_pop(VirtQueue *vq, size_t sz);
void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz);
unsigned int virtqueue_drop_all(VirtQueue *vq);
void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz);
void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem);
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
unsigned int out_bytes);
@ -255,7 +257,9 @@ typedef struct VirtIORNGConf VirtIORNGConf;
DEFINE_PROP_BIT64("notify_on_empty", _state, _field, \
VIRTIO_F_NOTIFY_ON_EMPTY, true), \
DEFINE_PROP_BIT64("any_layout", _state, _field, \
VIRTIO_F_ANY_LAYOUT, true)
VIRTIO_F_ANY_LAYOUT, true), \
DEFINE_PROP_BIT64("iommu_platform", _state, _field, \
VIRTIO_F_IOMMU_PLATFORM, false)
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
@ -266,6 +270,7 @@ hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n);
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n);
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
uint16_t virtio_get_queue_index(VirtQueue *vq);
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);

View file

@ -186,6 +186,12 @@ enum VMStateFlags {
VMS_MULTIPLY_ELEMENTS = 0x4000,
};
typedef enum {
MIG_PRI_DEFAULT = 0,
MIG_PRI_IOMMU, /* Must happen before PCI devices */
MIG_PRI_MAX,
} MigrationPriority;
typedef struct {
const char *name;
size_t offset;
@ -207,6 +213,7 @@ struct VMStateDescription {
int version_id;
int minimum_version_id;
int minimum_version_id_old;
MigrationPriority priority;
LoadStateHandler *load_state_old;
int (*pre_load)(void *opaque);
int (*post_load)(void *opaque, int version_id);

View file

@ -35,4 +35,6 @@ int vhost_set_vring_enable(NetClientState * nc, int enable);
uint64_t vhost_net_get_acked_features(VHostNetState *net);
int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu);
#endif

View file

@ -678,6 +678,7 @@
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_ATS_SIZEOF 8
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
/* Advanced Error Reporting */

View file

@ -202,6 +202,8 @@ struct CryptoDevBackend {
Object parent_obj;
bool ready;
/* Tag the cryptodev backend is used by virtio-crypto or not */
bool is_used;
CryptoDevBackendConf conf;
};
@ -295,4 +297,44 @@ int cryptodev_backend_crypto_operation(
void *opaque,
uint32_t queue_index, Error **errp);
/**
* cryptodev_backend_set_used:
* @backend: the cryptodev backend object
* @used: ture or false
*
* Set the cryptodev backend is used by virtio-crypto or not
*/
void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used);
/**
* cryptodev_backend_is_used:
* @backend: the cryptodev backend object
*
* Return the status that the cryptodev backend is used
* by virtio-crypto or not
*
* Returns: true on used, or false on not used
*/
bool cryptodev_backend_is_used(CryptoDevBackend *backend);
/**
* cryptodev_backend_set_ready:
* @backend: the cryptodev backend object
* @ready: ture or false
*
* Set the cryptodev backend is ready or not, which is called
* by the children of the cryptodev banckend interface.
*/
void cryptodev_backend_set_ready(CryptoDevBackend *backend, bool ready);
/**
* cryptodev_backend_is_ready:
* @backend: the cryptodev backend object
*
* Return the status that the cryptodev backend is ready or not
*
* Returns: true on ready, or false on not ready
*/
bool cryptodev_backend_is_ready(CryptoDevBackend *backend);
#endif /* CRYPTODEV_H */

View file

@ -1603,6 +1603,11 @@ static void memory_region_update_iommu_notify_flags(MemoryRegion *mr)
void memory_region_register_iommu_notifier(MemoryRegion *mr,
IOMMUNotifier *n)
{
if (mr->alias) {
memory_region_register_iommu_notifier(mr->alias, n);
return;
}
/* We need to register for at least one bitfield */
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
@ -1643,6 +1648,10 @@ void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
IOMMUNotifier *n)
{
if (mr->alias) {
memory_region_unregister_iommu_notifier(mr->alias, n);
return;
}
QLIST_REMOVE(n, node);
memory_region_update_iommu_notify_flags(mr);
}

View file

@ -532,6 +532,34 @@ static int calculate_compat_instance_id(const char *idstr)
return instance_id;
}
static inline MigrationPriority save_state_priority(SaveStateEntry *se)
{
if (se->vmsd) {
return se->vmsd->priority;
}
return MIG_PRI_DEFAULT;
}
static void savevm_state_handler_insert(SaveStateEntry *nse)
{
MigrationPriority priority = save_state_priority(nse);
SaveStateEntry *se;
assert(priority <= MIG_PRI_MAX);
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (save_state_priority(se) < priority) {
break;
}
}
if (se) {
QTAILQ_INSERT_BEFORE(se, nse, entry);
} else {
QTAILQ_INSERT_TAIL(&savevm_state.handlers, nse, entry);
}
}
/* TODO: Individual devices generally have very little idea about the rest
of the system, so instance_id should be removed/replaced.
Meanwhile pass -1 as instance_id if you do not already have a clearly
@ -578,8 +606,7 @@ int register_savevm_live(DeviceState *dev,
se->instance_id = instance_id;
}
assert(!se->compat || se->instance_id == 0);
/* add at the end of list */
QTAILQ_INSERT_TAIL(&savevm_state.handlers, se, entry);
savevm_state_handler_insert(se);
return 0;
}
@ -662,8 +689,7 @@ int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
se->instance_id = instance_id;
}
assert(!se->compat || se->instance_id == 0);
/* add at the end of list */
QTAILQ_INSERT_TAIL(&savevm_state.handlers, se, entry);
savevm_state_handler_insert(se);
return 0;
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -867,6 +867,28 @@ static void test_acpi_piix4_tcg_ipmi(void)
free_test_data(&data);
}
static void test_acpi_q35_tcg_memhp(void)
{
test_data data;
memset(&data, 0, sizeof(data));
data.machine = MACHINE_Q35;
data.variant = ".memhp";
test_acpi_one(" -m 128,slots=3,maxmem=1G -numa node", &data);
free_test_data(&data);
}
static void test_acpi_piix4_tcg_memhp(void)
{
test_data data;
memset(&data, 0, sizeof(data));
data.machine = MACHINE_PC;
data.variant = ".memhp";
test_acpi_one(" -m 128,slots=3,maxmem=1G -numa node", &data);
free_test_data(&data);
}
int main(int argc, char *argv[])
{
const char *arch = qtest_get_arch();
@ -887,6 +909,8 @@ int main(int argc, char *argv[])
qtest_add_func("acpi/q35/ipmi", test_acpi_q35_tcg_ipmi);
qtest_add_func("acpi/piix4/cpuhp", test_acpi_piix4_tcg_cphp);
qtest_add_func("acpi/q35/cpuhp", test_acpi_q35_tcg_cphp);
qtest_add_func("acpi/piix4/memhp", test_acpi_piix4_tcg_memhp);
qtest_add_func("acpi/q35/memhp", test_acpi_q35_tcg_memhp);
}
ret = g_test_run();
boot_sector_cleanup(disk);