virtio, pc: fixes and features

more guest error handling for virtio devices
 virtio migration rework
 pc fixes
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJX+tUfAAoJECgfDbjSjVRpIGMH/Ri+bnKF9zD6jQXfzYY+neSF
 SqR0BsFUqR+8C1Yxx45tFRC/kMpJy3n5PZunoDwAXcSlN/uoWvzp05/s44praFDc
 5FDcj3SvFhvOpBFnO5sTMBTkmGOCG/f/lnej+Fea0X8KjtOvVE6Yxek8CS+/dS3K
 t70hxLaTO93Z63olOxhAZSVX9wYKLovB0PXAu9Uj9LsnXl8o8gQLxM9WgKnI/0vD
 1V/ZGZY0lfFaHrvIgkgKy3/L7QJ91A/jU9jypNJOEdV52EDfkV97hA2ibcIQ+7Y1
 w/S3gzVmKM3dtxdS9DiQJ3riBT8XcPUWI6sIEjpfKGFGoOjazai3m9e3bcEx3Rg=
 =f//+
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

virtio, pc: fixes and features

more guest error handling for virtio devices
virtio migration rework
pc fixes

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Mon 10 Oct 2016 00:39:11 BST
# gpg:                using RSA key 0x281F0DB8D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* remotes/mst/tags/for_upstream: (33 commits)
  intel-iommu: Check IOAPIC's Trigger Mode against the one in IRTE
  virtio: cleanup VMSTATE_VIRTIO_DEVICE
  vhost-vsock: convert VMSTATE_VIRTIO_DEVICE
  virtio-rng: convert VMSTATE_VIRTIO_DEVICE
  virtio-balloon: convert VMSTATE_VIRTIO_DEVICE
  virtio-scsi: convert VMSTATE_VIRTIO_DEVICE
  virtio-input: convert VMSTATE_VIRTIO_DEVICE
  virtio-gpu: convert VMSTATE_VIRTIO_DEVICE
  virtio-serial: convert VMSTATE_VIRTIO_DEVICE
  virtio-9p: convert VMSTATE_VIRTIO_DEVICE
  virtio-net: convert VMSTATE_VIRTIO_DEVICE
  virtio-blk: convert VMSTATE_VIRTIO_DEVICE
  virtio: prepare change VMSTATE_VIRTIO_DEVICE macro
  net: don't poke at chardev internal QemuOpts
  virtio-scsi: handle virtio_scsi_set_config() error
  virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
  virtio-net: handle virtio_net_flush_tx() errors
  virtio-net: handle virtio_net_receive() errors
  virtio-net: handle virtio_net_handle_ctrl() error
  virtio-blk: handle virtio_blk_handle_request() errors
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
stable-2.8
Peter Maydell 2016-10-10 16:23:40 +01:00
commit 627eae7d72
34 changed files with 483 additions and 307 deletions

1
hmp.c
View File

@ -1974,6 +1974,7 @@ void hmp_chardev_add(Monitor *mon, const QDict *qdict)
error_setg(&err, "Parsing chardev args failed");
} else {
qemu_chr_new_from_opts(opts, NULL, &err);
qemu_opts_del(opts);
}
hmp_handle_error(mon, &err);
}

View File

@ -41,6 +41,7 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
V9fsState *s = &v->state;
V9fsPDU *pdu;
ssize_t len;
VirtQueueElement *elem;
while ((pdu = pdu_alloc(s))) {
struct {
@ -48,21 +49,28 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
uint8_t id;
uint16_t tag_le;
} QEMU_PACKED out;
VirtQueueElement *elem;
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
pdu_free(pdu);
break;
goto out_free_pdu;
}
BUG_ON(elem->out_num == 0 || elem->in_num == 0);
QEMU_BUILD_BUG_ON(sizeof out != 7);
if (elem->in_num == 0) {
virtio_error(vdev,
"The guest sent a VirtFS request without space for "
"the reply");
goto out_free_req;
}
QEMU_BUILD_BUG_ON(sizeof(out) != 7);
v->elems[pdu->idx] = elem;
len = iov_to_buf(elem->out_sg, elem->out_num, 0,
&out, sizeof out);
BUG_ON(len != sizeof out);
&out, sizeof(out));
if (len != sizeof(out)) {
virtio_error(vdev, "The guest sent a malformed VirtFS request: "
"header size is %zd, should be 7", len);
goto out_free_req;
}
pdu->size = le32_to_cpu(out.size_le);
@ -72,6 +80,14 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
qemu_co_queue_init(&pdu->complete);
pdu_submit(pdu);
}
return;
out_free_req:
virtqueue_detach_element(vq, elem, 0);
g_free(elem);
out_free_pdu:
pdu_free(pdu);
}
static uint64_t virtio_9p_get_features(VirtIODevice *vdev, uint64_t features,
@ -97,11 +113,6 @@ static void virtio_9p_get_config(VirtIODevice *vdev, uint8_t *config)
g_free(cfg);
}
static int virtio_9p_load(QEMUFile *f, void *opaque, size_t size)
{
return virtio_load(VIRTIO_DEVICE(opaque), f, 1);
}
static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@ -168,7 +179,15 @@ void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
/* virtio-9p device */
VMSTATE_VIRTIO_DEVICE(9p, 1, virtio_9p_load, virtio_vmstate_save);
static const VMStateDescription vmstate_virtio_9p = {
.name = "virtio-9p",
.minimum_version_id = 1,
.version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
};
static Property virtio_9p_properties[] = {
DEFINE_PROP_STRING("mount_tag", V9fsVirtioState, state.fsconf.tag),

View File

@ -4,6 +4,7 @@
#include "qapi/error.h"
#include "qapi-event.h"
#include "trace.h"
#include "sysemu/numa.h"
#define ACPI_CPU_HOTPLUG_REG_LEN 12
#define ACPI_CPU_SELECTOR_OFFSET_WR 0
@ -503,6 +504,7 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
/* build Processor object for each processor */
for (i = 0; i < arch_ids->len; i++) {
int j;
Aml *dev;
Aml *uid = aml_int(i);
GArray *madt_buf = g_array_new(0, 1, 1);
@ -546,6 +548,16 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
aml_arg(1), aml_arg(2))
);
aml_append(dev, method);
/* Linux guests discard SRAT info for non-present CPUs
* as a result _PXM is required for all CPUs which might
* be hot-plugged. For simplicity, add it for all CPUs.
*/
j = numa_get_node_for_cpu(i);
if (j < nb_numa_nodes) {
aml_append(dev, aml_name_decl("_PXM", aml_int(j)));
}
aml_append(cpus_dev, dev);
}
}

View File

@ -427,11 +427,9 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
uint32_t *cpu_node = g_malloc0(guest_info->smp_cpus * sizeof(uint32_t));
for (i = 0; i < guest_info->smp_cpus; i++) {
for (j = 0; j < nb_numa_nodes; j++) {
if (test_bit(i, numa_info[j].node_cpu)) {
j = numa_get_node_for_cpu(i);
if (j < nb_numa_nodes) {
cpu_node[i] = j;
break;
}
}
}

View File

@ -413,10 +413,9 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
armcpu->mp_affinity);
}
for (i = 0; i < nb_numa_nodes; i++) {
if (test_bit(cpu, numa_info[i].node_cpu)) {
qemu_fdt_setprop_cell(vbi->fdt, nodename, "numa-node-id", i);
}
i = numa_get_node_for_cpu(cpu);
if (i < nb_numa_nodes) {
qemu_fdt_setprop_cell(vbi->fdt, nodename, "numa-node-id", i);
}
g_free(nodename);

View File

@ -29,8 +29,8 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
VirtIOBlockReq *req)
static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
VirtIOBlockReq *req)
{
req->dev = s;
req->vq = vq;
@ -40,7 +40,7 @@ void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
req->mr_next = NULL;
}
void virtio_blk_free_request(VirtIOBlockReq *req)
static void virtio_blk_free_request(VirtIOBlockReq *req)
{
if (req) {
g_free(req);
@ -381,7 +381,7 @@ static int multireq_compare(const void *a, const void *b)
}
}
void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
{
int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
uint32_t max_transfer;
@ -468,30 +468,32 @@ static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
return true;
}
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
uint32_t type;
struct iovec *in_iov = req->elem.in_sg;
struct iovec *iov = req->elem.out_sg;
unsigned in_num = req->elem.in_num;
unsigned out_num = req->elem.out_num;
VirtIOBlock *s = req->dev;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
if (req->elem.out_num < 1 || req->elem.in_num < 1) {
error_report("virtio-blk missing headers");
exit(1);
virtio_error(vdev, "virtio-blk missing headers");
return -1;
}
if (unlikely(iov_to_buf(iov, out_num, 0, &req->out,
sizeof(req->out)) != sizeof(req->out))) {
error_report("virtio-blk request outhdr too short");
exit(1);
virtio_error(vdev, "virtio-blk request outhdr too short");
return -1;
}
iov_discard_front(&iov, &out_num, sizeof(req->out));
if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
error_report("virtio-blk request inhdr too short");
exit(1);
virtio_error(vdev, "virtio-blk request inhdr too short");
return -1;
}
/* We always touch the last byte, so just see how big in_iov is. */
@ -529,7 +531,7 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
block_acct_invalid(blk_get_stats(req->dev->blk),
is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
virtio_blk_free_request(req);
return;
return 0;
}
block_acct_start(blk_get_stats(req->dev->blk),
@ -576,6 +578,7 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
virtio_blk_free_request(req);
}
return 0;
}
void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
@ -586,7 +589,11 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
blk_io_plug(s->blk);
while ((req = virtio_blk_get_request(s, vq))) {
virtio_blk_handle_request(req, &mrb);
if (virtio_blk_handle_request(req, &mrb)) {
virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_blk_free_request(req);
break;
}
}
if (mrb.num_reqs) {
@ -625,7 +632,18 @@ static void virtio_blk_dma_restart_bh(void *opaque)
while (req) {
VirtIOBlockReq *next = req->next;
virtio_blk_handle_request(req, &mrb);
if (virtio_blk_handle_request(req, &mrb)) {
/* Device is now broken and won't do any processing until it gets
* reset. Already queued requests will be lost: let's purge them.
*/
while (req) {
next = req->next;
virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_blk_free_request(req);
req = next;
}
break;
}
req = next;
}
@ -665,6 +683,7 @@ static void virtio_blk_reset(VirtIODevice *vdev)
while (s->rq) {
req = s->rq;
s->rq = req->next;
virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_blk_free_request(req);
}
@ -803,13 +822,6 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
}
}
static void virtio_blk_save(QEMUFile *f, void *opaque, size_t size)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
virtio_save(vdev, f);
}
static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
@ -828,14 +840,6 @@ static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
qemu_put_sbyte(f, 0);
}
static int virtio_blk_load(QEMUFile *f, void *opaque, size_t size)
{
VirtIOBlock *s = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
return virtio_load(vdev, f, 2);
}
static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
@ -956,7 +960,15 @@ static void virtio_blk_instance_init(Object *obj)
DEVICE(obj), NULL);
}
VMSTATE_VIRTIO_DEVICE(blk, 2, virtio_blk_load, virtio_blk_save);
static const VMStateDescription vmstate_virtio_blk = {
.name = "virtio-blk",
.minimum_version_id = 2,
.version_id = 2,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
};
static Property virtio_blk_properties[] = {
DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),

View File

@ -75,6 +75,19 @@ static VirtIOSerialPort *find_port_by_name(char *name)
return NULL;
}
static VirtIOSerialPort *find_first_connected_console(VirtIOSerial *vser)
{
VirtIOSerialPort *port;
QTAILQ_FOREACH(port, &vser->ports, next) {
VirtIOSerialPortClass const *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
if (vsc->is_console && port->host_connected) {
return port;
}
}
return NULL;
}
static bool use_multiport(VirtIOSerial *vser)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vser);
@ -132,6 +145,15 @@ static void discard_vq_data(VirtQueue *vq, VirtIODevice *vdev)
virtio_notify(vdev, vq);
}
static void discard_throttle_data(VirtIOSerialPort *port)
{
if (port->elem) {
virtqueue_detach_element(port->ovq, port->elem, 0);
g_free(port->elem);
port->elem = NULL;
}
}
static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq,
VirtIODevice *vdev)
{
@ -254,6 +276,7 @@ int virtio_serial_close(VirtIOSerialPort *port)
* consume, reset the throttling flag and discard the data.
*/
port->throttled = false;
discard_throttle_data(port);
discard_vq_data(port->ovq, VIRTIO_DEVICE(port->vser));
send_control_event(port->vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 0);
@ -528,6 +551,7 @@ static uint64_t get_features(VirtIODevice *vdev, uint64_t features,
vser = VIRTIO_SERIAL(vdev);
features |= vser->host_features;
if (vser->bus.max_nr_ports > 1) {
virtio_add_feature(&features, VIRTIO_CONSOLE_F_MULTIPORT);
}
@ -547,6 +571,29 @@ static void get_config(VirtIODevice *vdev, uint8_t *config_data)
vser->serial.max_virtserial_ports);
}
/* Guest sent new config info */
static void set_config(VirtIODevice *vdev, const uint8_t *config_data)
{
VirtIOSerial *vser = VIRTIO_SERIAL(vdev);
struct virtio_console_config *config =
(struct virtio_console_config *)config_data;
uint8_t emerg_wr_lo = le32_to_cpu(config->emerg_wr);
VirtIOSerialPort *port = find_first_connected_console(vser);
VirtIOSerialPortClass *vsc;
if (!config->emerg_wr) {
return;
}
/* Make sure we don't misdetect an emergency write when the guest
* does a short config write after an emergency write. */
config->emerg_wr = 0;
if (!port) {
return;
}
vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
(void)vsc->have_data(port, &emerg_wr_lo, 1);
}
static void guest_reset(VirtIOSerial *vser)
{
VirtIOSerialPort *port;
@ -554,6 +601,9 @@ static void guest_reset(VirtIOSerial *vser)
QTAILQ_FOREACH(port, &vser->ports, next) {
vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
discard_throttle_data(port);
if (port->guest_connected) {
port->guest_connected = false;
if (vsc->set_guest_connected) {
@ -728,12 +778,6 @@ static int fetch_active_ports_list(QEMUFile *f,
return 0;
}
static int virtio_serial_load(QEMUFile *f, void *opaque, size_t size)
{
/* The virtio device */
return virtio_load(VIRTIO_DEVICE(opaque), f, 3);
}
static int virtio_serial_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
@ -864,6 +908,7 @@ static void remove_port(VirtIOSerial *vser, uint32_t port_id)
assert(port);
/* Flush out any unconsumed buffers first */
discard_throttle_data(port);
discard_vq_data(port->ovq, VIRTIO_DEVICE(port->vser));
send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_REMOVE, 1);
@ -967,6 +1012,7 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOSerial *vser = VIRTIO_SERIAL(dev);
uint32_t i, max_supported_ports;
size_t config_size = sizeof(struct virtio_console_config);
if (!vser->serial.max_virtserial_ports) {
error_setg(errp, "Maximum number of serial ports not specified");
@ -981,10 +1027,12 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
return;
}
/* We don't support emergency write, skip it for now. */
/* TODO: cleaner fix, depending on host features. */
if (!virtio_has_feature(vser->host_features,
VIRTIO_CONSOLE_F_EMERG_WRITE)) {
config_size = offsetof(struct virtio_console_config, emerg_wr);
}
virtio_init(vdev, "virtio-serial", VIRTIO_ID_CONSOLE,
offsetof(struct virtio_console_config, emerg_wr));
config_size);
/* Spawn a new virtio-serial bus on which the ports will ride as devices */
qbus_create_inplace(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS,
@ -1075,11 +1123,21 @@ static void virtio_serial_device_unrealize(DeviceState *dev, Error **errp)
}
/* Note: 'console' is used for backwards compatibility */
VMSTATE_VIRTIO_DEVICE(console, 3, virtio_serial_load, virtio_vmstate_save);
static const VMStateDescription vmstate_virtio_console = {
.name = "virtio-console",
.minimum_version_id = 3,
.version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
};
static Property virtio_serial_properties[] = {
DEFINE_PROP_UINT32("max_ports", VirtIOSerial, serial.max_virtserial_ports,
31),
DEFINE_PROP_BIT64("emergency-write", VirtIOSerial, host_features,
VIRTIO_CONSOLE_F_EMERG_WRITE, true),
DEFINE_PROP_END_OF_LIST(),
};
@ -1098,6 +1156,7 @@ static void virtio_serial_class_init(ObjectClass *klass, void *data)
vdc->unrealize = virtio_serial_device_unrealize;
vdc->get_features = get_features;
vdc->get_config = get_config;
vdc->set_config = set_config;
vdc->set_status = set_status;
vdc->reset = vser_reset;
vdc->save = virtio_serial_save_device;

View File

@ -990,12 +990,9 @@ static const VMStateDescription vmstate_virtio_gpu_scanouts = {
static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size)
{
VirtIOGPU *g = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(g);
struct virtio_gpu_simple_resource *res;
int i;
virtio_save(vdev, f);
/* in 2d mode we should never find unprocessed commands here */
assert(QTAILQ_EMPTY(&g->cmdq));
@ -1020,16 +1017,10 @@ static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size)
static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size)
{
VirtIOGPU *g = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(g);
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_scanout *scanout;
uint32_t resource_id, pformat;
int i, ret;
ret = virtio_load(vdev, f, VIRTIO_GPU_VM_VERSION);
if (ret) {
return ret;
}
int i;
resource_id = qemu_get_be32(f);
while (resource_id != 0) {
@ -1219,8 +1210,32 @@ static void virtio_gpu_reset(VirtIODevice *vdev)
#endif
}
VMSTATE_VIRTIO_DEVICE(gpu, VIRTIO_GPU_VM_VERSION, virtio_gpu_load,
virtio_gpu_save);
/*
* For historical reasons virtio_gpu does not adhere to virtio migration
* scheme as described in doc/virtio-migration.txt, in a sense that no
* save/load callback are provided to the core. Instead the device data
* is saved/loaded after the core data.
*
* Because of this we need a special vmsd.
*/
static const VMStateDescription vmstate_virtio_gpu = {
.name = "virtio-gpu",
.minimum_version_id = VIRTIO_GPU_VM_VERSION,
.version_id = VIRTIO_GPU_VM_VERSION,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE /* core */,
{
.name = "virtio-gpu",
.info = &(const VMStateInfo) {
.name = "virtio-gpu",
.get = virtio_gpu_load,
.put = virtio_gpu_save,
},
.flags = VMS_SINGLE,
} /* device */,
VMSTATE_END_OF_LIST()
},
};
static Property virtio_gpu_properties[] = {
DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),

View File

@ -2410,18 +2410,15 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
srat->reserved1 = cpu_to_le32(1);
for (i = 0; i < apic_ids->len; i++) {
int j;
int j = numa_get_node_for_cpu(i);
int apic_id = apic_ids->cpus[i].arch_id;
core = acpi_data_push(table_data, sizeof *core);
core->type = ACPI_SRAT_PROCESSOR_APIC;
core->length = sizeof(*core);
core->local_apic_id = apic_id;
for (j = 0; j < nb_numa_nodes; j++) {
if (test_bit(i, numa_info[j].node_cpu)) {
if (j < nb_numa_nodes) {
core->proximity_lo = j;
break;
}
}
memset(core->proximity_hi, 0, 3);
core->local_sapic_eid = 0;

View File

@ -27,6 +27,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "hw/i386/pc.h"
#include "hw/i386/apic-msidef.h"
#include "hw/boards.h"
#include "hw/i386/x86-iommu.h"
#include "hw/pci-host/q35.h"
@ -2209,6 +2210,8 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
}
} else {
uint8_t vector = origin->data & 0xff;
uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
VTD_DPRINTF(IR, "received IOAPIC interrupt");
/* IOAPIC entry vector should be aligned with IRTE vector
* (see vt-d spec 5.1.5.1). */
@ -2217,6 +2220,15 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
"entry: %d, IRTE: %d, index: %d",
vector, irq.vector, index);
}
/* The Trigger Mode field must match the Trigger Mode in the IRTE.
* (see vt-d spec 5.1.5.1). */
if (trigger_mode != irq.trigger_mode) {
VTD_DPRINTF(GENERAL, "IOAPIC trigger mode inconsistent: "
"entry: %u, IRTE: %u, index: %d",
trigger_mode, irq.trigger_mode, index);
}
}
/*

View File

@ -779,11 +779,9 @@ static FWCfgState *bochs_bios_init(AddressSpace *as, PCMachineState *pcms)
for (i = 0; i < max_cpus; i++) {
unsigned int apic_id = x86_cpu_apic_id_from_index(i);
assert(apic_id < pcms->apic_id_limit);
for (j = 0; j < nb_numa_nodes; j++) {
if (test_bit(i, numa_info[j].node_cpu)) {
numa_fw_cfg[apic_id + 1] = cpu_to_le64(j);
break;
}
j = numa_get_node_for_cpu(i);
if (j < nb_numa_nodes) {
numa_fw_cfg[apic_id + 1] = cpu_to_le64(j);
}
}
for (i = 0; i < nb_numa_nodes; i++) {

View File

@ -217,19 +217,12 @@ static void virtio_input_reset(VirtIODevice *vdev)
}
}
static int virtio_input_load(QEMUFile *f, void *opaque, size_t size)
static int virtio_input_post_load(void *opaque, int version_id)
{
VirtIOInput *vinput = opaque;
VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vinput);
VirtIODevice *vdev = VIRTIO_DEVICE(vinput);
int ret;
ret = virtio_load(vdev, f, VIRTIO_INPUT_VM_VERSION);
if (ret) {
return ret;
}
/* post_load() */
vinput->active = vdev->status & VIRTIO_CONFIG_S_DRIVER_OK;
if (vic->change_active) {
vic->change_active(vinput);
@ -296,8 +289,16 @@ static void virtio_input_device_unrealize(DeviceState *dev, Error **errp)
virtio_cleanup(vdev);
}
VMSTATE_VIRTIO_DEVICE(input, VIRTIO_INPUT_VM_VERSION, virtio_input_load,
virtio_vmstate_save);
static const VMStateDescription vmstate_virtio_input = {
.name = "virtio-input",
.minimum_version_id = VIRTIO_INPUT_VM_VERSION,
.version_id = VIRTIO_INPUT_VM_VERSION,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
.post_load = virtio_input_post_load,
};
static Property virtio_input_properties[] = {
DEFINE_PROP_STRING("serial", VirtIOInput, serial),

View File

@ -880,6 +880,7 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
return VIRTIO_NET_OK;
}
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIONet *n = VIRTIO_NET(vdev);
@ -897,8 +898,10 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
error_report("virtio-net ctrl missing headers");
exit(1);
virtio_error(vdev, "virtio-net ctrl missing headers");
virtqueue_detach_element(vq, elem, 0);
g_free(elem);
break;
}
iov_cnt = elem->out_num;
@ -1127,21 +1130,24 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
if (!elem) {
if (i == 0)
return -1;
error_report("virtio-net unexpected empty queue: "
"i %zd mergeable %d offset %zd, size %zd, "
"guest hdr len %zd, host hdr len %zd "
"guest features 0x%" PRIx64,
i, n->mergeable_rx_bufs, offset, size,
n->guest_hdr_len, n->host_hdr_len,
vdev->guest_features);
exit(1);
if (i) {
virtio_error(vdev, "virtio-net unexpected empty queue: "
"i %zd mergeable %d offset %zd, size %zd, "
"guest hdr len %zd, host hdr len %zd "
"guest features 0x%" PRIx64,
i, n->mergeable_rx_bufs, offset, size,
n->guest_hdr_len, n->host_hdr_len,
vdev->guest_features);
}
return -1;
}
if (elem->in_num < 1) {
error_report("virtio-net receive queue contains no in buffers");
exit(1);
virtio_error(vdev,
"virtio-net receive queue contains no in buffers");
virtqueue_detach_element(q->rx_vq, elem, 0);
g_free(elem);
return -1;
}
sg = elem->in_sg;
@ -1243,15 +1249,19 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
out_num = elem->out_num;
out_sg = elem->out_sg;
if (out_num < 1) {
error_report("virtio-net header not in first element");
exit(1);
virtio_error(vdev, "virtio-net header not in first element");
virtqueue_detach_element(q->tx_vq, elem, 0);
g_free(elem);
return -EINVAL;
}
if (n->has_vnet_hdr) {
if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
n->guest_hdr_len) {
error_report("virtio-net header incorrect");
exit(1);
virtio_error(vdev, "virtio-net header incorrect");
virtqueue_detach_element(q->tx_vq, elem, 0);
g_free(elem);
return -EINVAL;
}
if (n->needs_vnet_hdr_swap) {
virtio_net_hdr_swap(vdev, (void *) &mhdr);
@ -1319,7 +1329,9 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
virtio_queue_set_notification(vq, 1);
timer_del(q->tx_timer);
q->tx_waiting = 0;
virtio_net_flush_tx(q);
if (virtio_net_flush_tx(q) == -EINVAL) {
return;
}
} else {
timer_mod(q->tx_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
@ -1390,8 +1402,9 @@ static void virtio_net_tx_bh(void *opaque)
}
ret = virtio_net_flush_tx(q);
if (ret == -EBUSY) {
return; /* Notification re-enable handled by tx_complete */
if (ret == -EBUSY || ret == -EINVAL) {
return; /* Notification re-enable handled by tx_complete or device
* broken */
}
/* If we flush a full burst of packets, assume there are
@ -1406,7 +1419,10 @@ static void virtio_net_tx_bh(void *opaque)
* anything that may have come in while we weren't looking. If
* we find something, assume the guest is still active and reschedule */
virtio_queue_set_notification(q->tx_vq, 1);
if (virtio_net_flush_tx(q) > 0) {
ret = virtio_net_flush_tx(q);
if (ret == -EINVAL) {
return;
} else if (ret > 0) {
virtio_queue_set_notification(q->tx_vq, 0);
qemu_bh_schedule(q->tx_bh);
q->tx_waiting = 1;
@ -1498,17 +1514,6 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
virtio_net_set_queues(n);
}
static void virtio_net_save(QEMUFile *f, void *opaque, size_t size)
{
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
/* At this point, backend must be stopped, otherwise
* it might keep writing to memory. */
assert(!n->vhost_started);
virtio_save(vdev, f);
}
static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
{
VirtIONet *n = VIRTIO_NET(vdev);
@ -1544,14 +1549,6 @@ static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
}
}
static int virtio_net_load(QEMUFile *f, void *opaque, size_t size)
{
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
return virtio_load(vdev, f, VIRTIO_NET_VM_VERSION);
}
static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
@ -1854,8 +1851,25 @@ static void virtio_net_instance_init(Object *obj)
DEVICE(n), NULL);
}
VMSTATE_VIRTIO_DEVICE(net, VIRTIO_NET_VM_VERSION, virtio_net_load,
virtio_net_save);
static void virtio_net_pre_save(void *opaque)
{
VirtIONet *n = opaque;
/* At this point, backend must be stopped, otherwise
* it might keep writing to memory. */
assert(!n->vhost_started);
}
static const VMStateDescription vmstate_virtio_net = {
.name = "virtio-net",
.minimum_version_id = VIRTIO_NET_VM_VERSION,
.version_id = VIRTIO_NET_VM_VERSION,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
.pre_save = virtio_net_pre_save,
};
static Property virtio_net_properties[] = {
DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),

View File

@ -69,11 +69,9 @@ void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu, Error **errp)
}
/* Set NUMA node for the added CPUs */
for (i = 0; i < nb_numa_nodes; i++) {
if (test_bit(cs->cpu_index, numa_info[i].node_cpu)) {
i = numa_get_node_for_cpu(cs->cpu_index);
if (i < nb_numa_nodes) {
cs->numa_node = i;
break;
}
}
xics_cpu_setup(spapr->xics, cpu);

View File

@ -81,10 +81,11 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
virtio_scsi_free_req(req);
}
static void virtio_scsi_bad_req(void)
static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
{
error_report("wrong size for virtio-scsi headers");
exit(1);
virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_scsi_free_req(req);
}
static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
@ -387,7 +388,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
&type, sizeof(type)) < sizeof(type)) {
virtio_scsi_bad_req();
virtio_scsi_bad_req(req);
return;
}
@ -395,7 +396,8 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (type == VIRTIO_SCSI_T_TMF) {
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
virtio_scsi_bad_req();
virtio_scsi_bad_req(req);
return;
} else {
r = virtio_scsi_do_tmf(s, req);
}
@ -404,7 +406,8 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
sizeof(VirtIOSCSICtrlANResp)) < 0) {
virtio_scsi_bad_req();
virtio_scsi_bad_req(req);
return;
} else {
req->resp.an.event_actual = 0;
req->resp.an.response = VIRTIO_SCSI_S_OK;
@ -521,7 +524,7 @@ static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
virtio_scsi_complete_cmd_req(req);
}
static bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
VirtIOSCSICommon *vs = &s->parent_obj;
SCSIDevice *d;
@ -532,17 +535,18 @@ static bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req
if (rc < 0) {
if (rc == -ENOTSUP) {
virtio_scsi_fail_cmd_req(req);
return -ENOTSUP;
} else {
virtio_scsi_bad_req();
virtio_scsi_bad_req(req);
return -EINVAL;
}
return false;
}
d = virtio_scsi_device_find(s, req->req.cmd.lun);
if (!d) {
req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
virtio_scsi_complete_cmd_req(req);
return false;
return -ENOENT;
}
virtio_scsi_ctx_check(s, d);
req->sreq = scsi_req_new(d, req->req.cmd.tag,
@ -554,11 +558,11 @@ static bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req
req->sreq->cmd.xfer > req->qsgl.size)) {
req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
virtio_scsi_complete_cmd_req(req);
return false;
return -ENOBUFS;
}
scsi_req_ref(req->sreq);
blk_io_plug(d->conf.blk);
return true;
return 0;
}
static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
@ -574,11 +578,24 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req, *next;
int ret;
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
while ((req = virtio_scsi_pop_req(s, vq))) {
if (virtio_scsi_handle_cmd_req_prepare(s, req)) {
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
if (!ret) {
QTAILQ_INSERT_TAIL(&reqs, req, next);
} else if (ret == -EINVAL) {
/* The device is broken and shouldn't process any request */
while (!QTAILQ_EMPTY(&reqs)) {
req = QTAILQ_FIRST(&reqs);
QTAILQ_REMOVE(&reqs, req, next);
blk_io_unplug(req->sreq->dev->conf.blk);
scsi_req_unref(req->sreq);
virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_scsi_free_req(req);
}
}
}
@ -627,8 +644,9 @@ static void virtio_scsi_set_config(VirtIODevice *vdev,
if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
(uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
error_report("bad data written to virtio-scsi configuration space");
exit(1);
virtio_error(vdev,
"bad data written to virtio-scsi configuration space");
return;
}
vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
@ -663,22 +681,6 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
s->events_dropped = false;
}
/* The device does not have anything to save beyond the virtio data.
* Request data is saved with callbacks from SCSI devices.
*/
static void virtio_scsi_save(QEMUFile *f, void *opaque, size_t size)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
virtio_save(vdev, f);
}
static int virtio_scsi_load(QEMUFile *f, void *opaque, size_t size)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
return virtio_load(vdev, f, 1);
}
void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
uint32_t event, uint32_t reason)
{
@ -708,7 +710,8 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
}
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
virtio_scsi_bad_req();
virtio_scsi_bad_req(req);
goto out;
}
evt = &req->resp.event;
@ -921,7 +924,15 @@ static Property virtio_scsi_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
VMSTATE_VIRTIO_DEVICE(scsi, 1, virtio_scsi_load, virtio_scsi_save);
static const VMStateDescription vmstate_virtio_scsi = {
.name = "virtio-scsi",
.minimum_version_id = 1,
.version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
};
static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
{

View File

@ -236,17 +236,6 @@ out:
g_free(elem);
}
static void vhost_vsock_save(QEMUFile *f, void *opaque, size_t size)
{
VHostVSock *vsock = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(vsock);
/* At this point, backend must be stopped, otherwise
* it might keep writing to memory. */
assert(!vsock->vhost_dev.started);
virtio_save(vdev, f);
}
static void vhost_vsock_post_load_timer_cleanup(VHostVSock *vsock)
{
if (!vsock->post_load_timer) {
@ -266,16 +255,19 @@ static void vhost_vsock_post_load_timer_cb(void *opaque)
vhost_vsock_send_transport_reset(vsock);
}
static int vhost_vsock_load(QEMUFile *f, void *opaque, size_t size)
static void vhost_vsock_pre_save(void *opaque)
{
VHostVSock *vsock = opaque;
/* At this point, backend must be stopped, otherwise
* it might keep writing to memory. */
assert(!vsock->vhost_dev.started);
}
static int vhost_vsock_post_load(void *opaque, int version_id)
{
VHostVSock *vsock = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(vsock);
int ret;
ret = virtio_load(vdev, f, VHOST_VSOCK_SAVEVM_VERSION);
if (ret) {
return ret;
}
if (virtio_queue_get_addr(vdev, 2)) {
/* Defer transport reset event to a vm clock timer so that virtqueue
@ -288,12 +280,20 @@ static int vhost_vsock_load(QEMUFile *f, void *opaque, size_t size)
vsock);
timer_mod(vsock->post_load_timer, 1);
}
return 0;
}
VMSTATE_VIRTIO_DEVICE(vhost_vsock, VHOST_VSOCK_SAVEVM_VERSION,
vhost_vsock_load, vhost_vsock_save);
static const VMStateDescription vmstate_virtio_vhost_vsock = {
.name = "virtio-vhost_vsock",
.minimum_version_id = VHOST_VSOCK_SAVEVM_VERSION,
.version_id = VHOST_VSOCK_SAVEVM_VERSION,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
.pre_save = vhost_vsock_pre_save,
.post_load = vhost_vsock_post_load,
};
static void vhost_vsock_device_realize(DeviceState *dev, Error **errp)
{

View File

@ -34,13 +34,11 @@
static void balloon_page(void *addr, int deflate)
{
#if defined(__linux__)
if (!qemu_balloon_is_inhibited() && (!kvm_enabled() ||
kvm_has_sync_mmu())) {
qemu_madvise(addr, BALLOON_PAGE_SIZE,
deflate ? QEMU_MADV_WILLNEED : QEMU_MADV_DONTNEED);
}
#endif
}
static const char *balloon_stat_names[] = {
@ -404,11 +402,6 @@ static void virtio_balloon_save_device(VirtIODevice *vdev, QEMUFile *f)
qemu_put_be32(f, s->actual);
}
static int virtio_balloon_load(QEMUFile *f, void *opaque, size_t size)
{
return virtio_load(VIRTIO_DEVICE(opaque), f, 1);
}
static int virtio_balloon_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
@ -494,7 +487,15 @@ static void virtio_balloon_instance_init(Object *obj)
NULL, s, NULL);
}
VMSTATE_VIRTIO_DEVICE(balloon, 1, virtio_balloon_load, virtio_vmstate_save);
static const VMStateDescription vmstate_virtio_balloon = {
.name = "virtio-balloon",
.minimum_version_id = 1,
.version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
};
static Property virtio_balloon_properties[] = {
DEFINE_PROP_BIT("deflate-on-oom", VirtIOBalloon, host_features,

View File

@ -120,15 +120,9 @@ static uint64_t get_features(VirtIODevice *vdev, uint64_t f, Error **errp)
return f;
}
static int virtio_rng_load(QEMUFile *f, void *opaque, size_t size)
static int virtio_rng_post_load(void *opaque, int version_id)
{
VirtIORNG *vrng = opaque;
int ret;
ret = virtio_load(VIRTIO_DEVICE(vrng), f, 1);
if (ret != 0) {
return ret;
}
/* We may have an element ready but couldn't process it due to a quota
* limit. Make sure to try again after live migration when the quota may
@ -216,7 +210,16 @@ static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
virtio_cleanup(vdev);
}
VMSTATE_VIRTIO_DEVICE(rng, 1, virtio_rng_load, virtio_vmstate_save);
static const VMStateDescription vmstate_virtio_rng = {
.name = "virtio-rng",
.minimum_version_id = 1,
.version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
.post_load = virtio_rng_post_load,
};
static Property virtio_rng_properties[] = {
/* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If

View File

@ -264,12 +264,35 @@ static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
0, elem->out_sg[i].iov_len);
}
/* virtqueue_detach_element:
* @vq: The #VirtQueue
* @elem: The #VirtQueueElement
* @len: number of bytes written
*
* Detach the element from the virtqueue. This function is suitable for device
* reset or other situations where a #VirtQueueElement is simply freed and will
* not be pushed or discarded.
*/
void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
vq->inuse--;
virtqueue_unmap_sg(vq, elem, len);
}
/* virtqueue_discard:
* @vq: The #VirtQueue
* @elem: The #VirtQueueElement
* @len: number of bytes written
*
* Pretend the most recent element wasn't popped from the virtqueue. The next
* call to virtqueue_pop() will refetch the element.
*/
void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
vq->last_avail_idx--;
vq->inuse--;
virtqueue_unmap_sg(vq, elem, len);
virtqueue_detach_element(vq, elem, len);
}
/* virtqueue_rewind:
@ -1617,11 +1640,26 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
}
/* A wrapper for use as a VMState .put function */
void virtio_vmstate_save(QEMUFile *f, void *opaque, size_t size)
static void virtio_device_put(QEMUFile *f, void *opaque, size_t size)
{
virtio_save(VIRTIO_DEVICE(opaque), f);
}
/* A wrapper for use as a VMState .get function */
static int virtio_device_get(QEMUFile *f, void *opaque, size_t size)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
return virtio_load(vdev, f, dc->vmsd->version_id);
}
const VMStateInfo virtio_vmstate_info = {
.name = "virtio",
.get = virtio_device_get,
.put = virtio_device_put,
};
static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);

View File

@ -6,6 +6,10 @@
.driver = "virtio-pci",\
.property = "page-per-vq",\
.value = "on",\
},{\
.driver = "virtio-serial-device",\
.property = "emergency-write",\
.value = "off",\
},{\
.driver = "ioapic",\
.property = "version",\

View File

@ -80,14 +80,6 @@ typedef struct MultiReqBuffer {
bool is_write;
} MultiReqBuffer;
void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
VirtIOBlockReq *req);
void virtio_blk_free_request(VirtIOBlockReq *req);
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb);
void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb);
void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
#endif

View File

@ -184,6 +184,8 @@ struct VirtIOSerial {
struct VirtIOSerialPostLoad *post_load;
virtio_serial_conf serial;
uint64_t host_features;
};
/* Interface to the virtio-serial bus */

View File

@ -155,6 +155,8 @@ void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num);
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len);
void virtqueue_flush(VirtQueue *vq, unsigned int count);
void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len);
void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len);
bool virtqueue_rewind(VirtQueue *vq, unsigned int num);
@ -175,25 +177,14 @@ bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq);
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
void virtio_save(VirtIODevice *vdev, QEMUFile *f);
void virtio_vmstate_save(QEMUFile *f, void *opaque, size_t size);
#define VMSTATE_VIRTIO_DEVICE(devname, v, getf, putf) \
static const VMStateDescription vmstate_virtio_ ## devname = { \
.name = "virtio-" #devname , \
.minimum_version_id = v, \
.version_id = v, \
.fields = (VMStateField[]) { \
{ \
.name = "virtio", \
.info = &(const VMStateInfo) {\
.name = "virtio", \
.get = getf, \
.put = putf, \
}, \
.flags = VMS_SINGLE, \
}, \
VMSTATE_END_OF_LIST() \
} \
extern const VMStateInfo virtio_vmstate_info;
#define VMSTATE_VIRTIO_DEVICE \
{ \
.name = "virtio", \
.info = &virtio_vmstate_info, \
.flags = VMS_SINGLE, \
}
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id);

View File

@ -9,6 +9,7 @@
#include "qapi/qmp/qobject.h"
#include "qapi/qmp/qstring.h"
#include "qemu/main-loop.h"
#include "qemu/bitmap.h"
/* character device */
@ -58,6 +59,20 @@ struct ParallelIOArg {
typedef void IOEventHandler(void *opaque, int event);
typedef enum {
/* Whether the chardev peer is able to close and
* reopen the data channel, thus requiring support
* for qemu_chr_wait_connected() to wait for a
* valid connection */
QEMU_CHAR_FEATURE_RECONNECTABLE,
/* Whether it is possible to send/recv file descriptors
* over the data channel */
QEMU_CHAR_FEATURE_FD_PASS,
QEMU_CHAR_FEATURE_LAST,
} CharDriverFeature;
struct CharDriverState {
QemuMutex chr_write_lock;
void (*init)(struct CharDriverState *s);
@ -94,8 +109,8 @@ struct CharDriverState {
int is_mux;
int mux_idx;
guint fd_in_tag;
QemuOpts *opts;
bool replay;
DECLARE_BITMAP(features, QEMU_CHAR_FEATURE_LAST);
QTAILQ_ENTRY(CharDriverState) next;
};
@ -438,6 +453,10 @@ int qemu_chr_add_client(CharDriverState *s, int fd);
CharDriverState *qemu_chr_find(const char *name);
bool chr_is_ringbuf(const CharDriverState *chr);
bool qemu_chr_has_feature(CharDriverState *chr,
CharDriverFeature feature);
void qemu_chr_set_feature(CharDriverState *chr,
CharDriverFeature feature);
QemuOpts *qemu_chr_parse_compat(const char *label, const char *filename);
void register_char_driver(const char *name, ChardevBackendKind kind,

View File

@ -32,4 +32,7 @@ void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node);
void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node);
uint32_t numa_get_node(ram_addr_t addr, Error **errp);
/* on success returns node index in numa_info,
* on failure returns nb_numa_nodes */
int numa_get_node_for_cpu(int idx);
#endif

View File

@ -564,29 +564,6 @@ static void compare_sec_rs_finalize(SocketReadState *sec_rs)
}
}
static int compare_chardev_opts(void *opaque,
const char *name, const char *value,
Error **errp)
{
CompareChardevProps *props = opaque;
if (strcmp(name, "backend") == 0 &&
strcmp(value, "socket") == 0) {
props->is_socket = true;
return 0;
} else if (strcmp(name, "host") == 0 ||
(strcmp(name, "port") == 0) ||
(strcmp(name, "server") == 0) ||
(strcmp(name, "wait") == 0) ||
(strcmp(name, "path") == 0)) {
return 0;
} else {
error_setg(errp,
"COLO-compare does not support a chardev with option %s=%s",
name, value);
return -1;
}
}
/*
* Return 0 is success.
@ -606,12 +583,9 @@ static int find_and_check_chardev(CharDriverState **chr,
}
memset(&props, 0, sizeof(props));
if (qemu_opt_foreach((*chr)->opts, compare_chardev_opts, &props, errp)) {
return 1;
}
if (!props.is_socket) {
error_setg(errp, "chardev \"%s\" is not a tcp socket",
if (!qemu_chr_has_feature(*chr, QEMU_CHAR_FEATURE_RECONNECTABLE)) {
error_setg(errp, "chardev \"%s\" is not reconnectable",
chr_name);
return 1;
}

View File

@ -27,11 +27,6 @@ typedef struct VhostUserState {
bool started;
} VhostUserState;
typedef struct VhostUserChardevProps {
bool is_socket;
bool is_unix;
} VhostUserChardevProps;
VHostNetState *vhost_user_get_vhost_net(NetClientState *nc)
{
VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
@ -278,45 +273,23 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
return 0;
}
static int net_vhost_chardev_opts(void *opaque,
const char *name, const char *value,
Error **errp)
{
VhostUserChardevProps *props = opaque;
if (strcmp(name, "backend") == 0 && strcmp(value, "socket") == 0) {
props->is_socket = true;
} else if (strcmp(name, "path") == 0) {
props->is_unix = true;
} else if (strcmp(name, "server") == 0) {
} else {
error_setg(errp,
"vhost-user does not support a chardev with option %s=%s",
name, value);
return -1;
}
return 0;
}
static CharDriverState *net_vhost_parse_chardev(
static CharDriverState *net_vhost_claim_chardev(
const NetdevVhostUserOptions *opts, Error **errp)
{
CharDriverState *chr = qemu_chr_find(opts->chardev);
VhostUserChardevProps props;
if (chr == NULL) {
error_setg(errp, "chardev \"%s\" not found", opts->chardev);
return NULL;
}
/* inspect chardev opts */
memset(&props, 0, sizeof(props));
if (qemu_opt_foreach(chr->opts, net_vhost_chardev_opts, &props, errp)) {
if (!qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE)) {
error_setg(errp, "chardev \"%s\" is not reconnectable",
opts->chardev);
return NULL;
}
if (!props.is_socket || !props.is_unix) {
error_setg(errp, "chardev \"%s\" is not a unix socket",
if (!qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_FD_PASS)) {
error_setg(errp, "chardev \"%s\" does not support FD passing",
opts->chardev);
return NULL;
}
@ -357,7 +330,7 @@ int net_init_vhost_user(const Netdev *netdev, const char *name,
assert(netdev->type == NET_CLIENT_DRIVER_VHOST_USER);
vhost_user_opts = &netdev->u.vhost_user;
chr = net_vhost_parse_chardev(vhost_user_opts, errp);
chr = net_vhost_claim_chardev(vhost_user_opts, errp);
if (!chr) {
return -1;
}

12
numa.c
View File

@ -550,3 +550,15 @@ MemdevList *qmp_query_memdev(Error **errp)
object_child_foreach(obj, query_memdev, &list);
return list;
}
int numa_get_node_for_cpu(int idx)
{
int i;
for (i = 0; i < nb_numa_nodes; i++) {
if (test_bit(idx, numa_info[i].node_cpu)) {
break;
}
}
return i;
}

View File

@ -4005,7 +4005,6 @@ CharDriverState *qemu_chr_new_from_opts(QemuOpts *opts,
}
chr = qemu_chr_find(id);
chr->opts = opts;
qapi_out:
qapi_free_ChardevBackend(backend);
@ -4014,7 +4013,6 @@ qapi_out:
return chr;
err:
qemu_opts_del(opts);
return NULL;
}
@ -4042,6 +4040,7 @@ CharDriverState *qemu_chr_new_noreplay(const char *label, const char *filename,
qemu_chr_fe_claim_no_fail(chr);
monitor_init(chr, MONITOR_USE_READLINE);
}
qemu_opts_del(opts);
return chr;
}
@ -4141,7 +4140,6 @@ static void qemu_chr_free_common(CharDriverState *chr)
{
g_free(chr->filename);
g_free(chr->label);
qemu_opts_del(chr->opts);
if (chr->logfd != -1) {
close(chr->logfd);
}
@ -4522,6 +4520,11 @@ static CharDriverState *qmp_chardev_open_socket(const char *id,
s->addr = QAPI_CLONE(SocketAddress, sock->addr);
qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE);
if (s->is_unix) {
qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_FD_PASS);
}
chr->opaque = s;
chr->chr_wait_connected = tcp_chr_wait_connected;
chr->chr_write = tcp_chr_write;
@ -4605,6 +4608,19 @@ static CharDriverState *qmp_chardev_open_udp(const char *id,
return qemu_chr_open_udp(sioc, common, errp);
}
bool qemu_chr_has_feature(CharDriverState *chr,
CharDriverFeature feature)
{
return test_bit(feature, chr->features);
}
void qemu_chr_set_feature(CharDriverState *chr,
CharDriverFeature feature)
{
return set_bit(feature, chr->features);
}
ChardevReturn *qmp_chardev_add(const char *id, ChardevBackend *backend,
Error **errp)
{

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -811,7 +811,8 @@ static void test_acpi_piix4_tcg_cphp(void)
memset(&data, 0, sizeof(data));
data.machine = MACHINE_PC;
data.variant = ".cphp";
test_acpi_one("-smp 2,cores=3,sockets=2,maxcpus=6",
test_acpi_one("-smp 2,cores=3,sockets=2,maxcpus=6"
" -numa node -numa node",
&data);
free_test_data(&data);
}
@ -823,7 +824,8 @@ static void test_acpi_q35_tcg_cphp(void)
memset(&data, 0, sizeof(data));
data.machine = MACHINE_Q35;
data.variant = ".cphp";
test_acpi_one(" -smp 2,cores=3,sockets=2,maxcpus=6",
test_acpi_one(" -smp 2,cores=3,sockets=2,maxcpus=6"
" -numa node -numa node",
&data);
free_test_data(&data);
}