vhost, virtio, pci, pxe

Fixes all over the place.
 New tests for pxe.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJWxedfAAoJECgfDbjSjVRp9ZoH/1zlxDy/iiJWXJI0jkcnbSof
 /tFbchrj/hfz0/Wr0yeKJNdU1rMgiY0lYM1F5Pp4MQDHFoFM6i7LaLUYLQq92u+w
 CpgTOMXwthOqn94yrBncKUN+OkB4vDW18sHd21rTh5n1oO9VjM4oQFSHpAtaDdnc
 7dyryrlocBlgjARuOhW7A3KJAdPcKUer5JKdbWMDHw2wgwk1+7lx8ip7PBrFpMwW
 PEEw2jo/lQw/rm/Kit+BV43NBy5pks2/jWmaXqH5jgCNixEmbY150dJLLW6lAqdh
 xatnMxkQHpbEyf/Cy8M73v8vdOLjfQNdJ7GO0lc3CZw4bZBHbOcuzVEExvHewYw=
 =vpiH
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

vhost, virtio, pci, pxe

Fixes all over the place.
New tests for pxe.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Thu 18 Feb 2016 15:46:39 GMT using RSA key ID D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"

* remotes/mst/tags/for_upstream:
  tests/vhost-user-bridge: add scattering of incoming packets
  vhost-user interrupt management fixes
  rules: filter out irrelevant files
  change type of pci_bridge_initfn() to void
  dec: convert to realize()
  tests: add pxe e1000 and virtio-pci tests
  msix: fix msix_vector_masked
  virtio: optimize virtio_access_is_big_endian() for little-endian targets
  vhost: simplify vhost_needs_vring_endian()
  vhost: move virtio 1.0 check to cross-endian helper
  virtio: move cross-endian helper to vhost
  vhost-net: revert support of cross-endian vnet headers
  virtio-net: use the backend cross-endian capabilities

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-02-19 10:50:37 +00:00
commit 09125c5e76
25 changed files with 428 additions and 209 deletions

View file

@ -36,7 +36,6 @@
#include "standard-headers/linux/virtio_ring.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
struct vhost_net {
struct vhost_dev dev;
@ -197,27 +196,6 @@ static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index)
net->dev.vq_index = vq_index;
}
static int vhost_net_set_vnet_endian(VirtIODevice *dev, NetClientState *peer,
bool set)
{
int r = 0;
if (virtio_vdev_has_feature(dev, VIRTIO_F_VERSION_1) ||
(virtio_legacy_is_cross_endian(dev) && !virtio_is_big_endian(dev))) {
r = qemu_set_vnet_le(peer, set);
if (r) {
error_report("backend does not support LE vnet headers");
}
} else if (virtio_legacy_is_cross_endian(dev)) {
r = qemu_set_vnet_be(peer, set);
if (r) {
error_report("backend does not support BE vnet headers");
}
}
return r;
}
static int vhost_net_start_one(struct vhost_net *net,
VirtIODevice *dev)
{
@ -298,25 +276,32 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int r, e, i, j;
int r, e, i;
if (!k->set_guest_notifiers) {
error_report("binding does not support guest notifiers");
return -ENOSYS;
}
for (j = 0; j < total_queues; j++) {
r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true);
if (r < 0) {
goto err_endian;
for (i = 0; i < total_queues; i++) {
struct vhost_net *net;
net = get_vhost_net(ncs[i].peer);
vhost_net_set_vq_index(net, i * 2);
/* Suppress the masking guest notifiers on vhost user
* because vhost user doesn't interrupt masking/unmasking
* properly.
*/
if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
dev->use_guest_notifier_mask = false;
}
vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2);
}
}
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
if (r < 0) {
error_report("Error binding guest notifier: %d", -r);
goto err_endian;
goto err;
}
for (i = 0; i < total_queues; i++) {
@ -338,10 +323,7 @@ err_start:
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
fflush(stderr);
}
err_endian:
while (--j >= 0) {
vhost_net_set_vnet_endian(dev, ncs[j].peer, false);
}
err:
return r;
}
@ -363,8 +345,6 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
fflush(stderr);
}
assert(r >= 0);
assert(vhost_net_set_vnet_endian(dev, ncs[0].peer, false) >= 0);
}
void vhost_net_cleanup(struct vhost_net *net)

View file

@ -129,6 +129,13 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
if (!n->vhost_started) {
int r, i;
if (n->needs_vnet_hdr_swap) {
error_report("backend does not support %s vnet headers; "
"falling back on userspace virtio",
virtio_is_big_endian(vdev) ? "BE" : "LE");
return;
}
/* Any packets outstanding? Purge them to avoid touching rings
* when vhost is running.
*/
@ -153,6 +160,59 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
}
}
static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
NetClientState *peer,
bool enable)
{
if (virtio_is_big_endian(vdev)) {
return qemu_set_vnet_be(peer, enable);
} else {
return qemu_set_vnet_le(peer, enable);
}
}
static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
int queues, bool enable)
{
int i;
for (i = 0; i < queues; i++) {
if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
enable) {
while (--i >= 0) {
virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
}
return true;
}
}
return false;
}
static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
int queues = n->multiqueue ? n->max_queues : 1;
if (virtio_net_started(n, status)) {
/* Before using the device, we tell the network backend about the
* endianness to use when parsing vnet headers. If the backend
* can't do it, we fallback onto fixing the headers in the core
* virtio-net code.
*/
n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
queues, true);
} else if (virtio_net_started(n, vdev->status)) {
/* After using the device, we need to reset the network backend to
* the default (guest native endianness), otherwise the guest may
* lose network connectivity if it is rebooted into a different
* endianness.
*/
virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
}
}
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = VIRTIO_NET(vdev);
@ -160,6 +220,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
int i;
uint8_t queue_status;
virtio_net_vnet_endian_status(n, status);
virtio_net_vhost_status(n, status);
for (i = 0; i < n->max_queues; i++) {
@ -963,7 +1024,10 @@ static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
void *wbuf = (void *)buf;
work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
size - n->host_hdr_len);
virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
if (n->needs_vnet_hdr_swap) {
virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
}
iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
} else {
struct virtio_net_hdr hdr = {
@ -1184,7 +1248,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
error_report("virtio-net header incorrect");
exit(1);
}
if (virtio_needs_swap(vdev)) {
if (n->needs_vnet_hdr_swap) {
virtio_net_hdr_swap(vdev, (void *) &mhdr);
sg2[0].iov_base = &mhdr;
sg2[0].iov_len = n->guest_hdr_len;

View file

@ -52,9 +52,9 @@ static int dec_map_irq(PCIDevice *pci_dev, int irq_num)
return irq_num;
}
static int dec_pci_bridge_initfn(PCIDevice *pci_dev)
static void dec_pci_bridge_realize(PCIDevice *pci_dev, Error **errp)
{
return pci_bridge_initfn(pci_dev, TYPE_PCI_BUS);
pci_bridge_initfn(pci_dev, TYPE_PCI_BUS);
}
static void dec_21154_pci_bridge_class_init(ObjectClass *klass, void *data)
@ -62,7 +62,7 @@ static void dec_21154_pci_bridge_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->init = dec_pci_bridge_initfn;
k->realize = dec_pci_bridge_realize;
k->exit = pci_bridge_exitfn;
k->vendor_id = PCI_VENDOR_ID_DEC;
k->device_id = PCI_DEVICE_ID_DEC_21154;

View file

@ -62,10 +62,7 @@ static int i82801b11_bridge_initfn(PCIDevice *d)
{
int rc;
rc = pci_bridge_initfn(d, TYPE_PCI_BUS);
if (rc < 0) {
return rc;
}
pci_bridge_initfn(d, TYPE_PCI_BUS);
rc = pci_bridge_ssvid_init(d, I82801ba_SSVID_OFFSET,
I82801ba_SSVID_SVID, I82801ba_SSVID_SSID);

View file

@ -98,11 +98,7 @@ static int ioh3420_initfn(PCIDevice *d)
PCIESlot *s = PCIE_SLOT(d);
int rc;
rc = pci_bridge_initfn(d, TYPE_PCIE_BUS);
if (rc < 0) {
return rc;
}
pci_bridge_initfn(d, TYPE_PCIE_BUS);
pcie_port_init_reg(d);
rc = pci_bridge_ssvid_init(d, IOH_EP_SSVID_OFFSET,

View file

@ -53,10 +53,8 @@ static int pci_bridge_dev_initfn(PCIDevice *dev)
PCIBridgeDev *bridge_dev = PCI_BRIDGE_DEV(dev);
int err;
err = pci_bridge_initfn(dev, TYPE_PCI_BUS);
if (err) {
goto bridge_error;
}
pci_bridge_initfn(dev, TYPE_PCI_BUS);
if (bridge_dev->flags & (1 << PCI_BRIDGE_DEV_F_SHPC_REQ)) {
dev->config[PCI_INTERRUPT_PIN] = 0x1;
memory_region_init(&bridge_dev->bar, OBJECT(dev), "shpc-bar",
@ -95,7 +93,7 @@ slotid_error:
}
shpc_error:
pci_bridge_exitfn(dev);
bridge_error:
return err;
}

View file

@ -61,11 +61,7 @@ static int xio3130_downstream_initfn(PCIDevice *d)
PCIESlot *s = PCIE_SLOT(d);
int rc;
rc = pci_bridge_initfn(d, TYPE_PCIE_BUS);
if (rc < 0) {
return rc;
}
pci_bridge_initfn(d, TYPE_PCIE_BUS);
pcie_port_init_reg(d);
rc = msi_init(d, XIO3130_MSI_OFFSET, XIO3130_MSI_NR_VECTOR,

View file

@ -57,11 +57,7 @@ static int xio3130_upstream_initfn(PCIDevice *d)
PCIEPort *p = PCIE_PORT(d);
int rc;
rc = pci_bridge_initfn(d, TYPE_PCIE_BUS);
if (rc < 0) {
return rc;
}
pci_bridge_initfn(d, TYPE_PCIE_BUS);
pcie_port_init_reg(d);
rc = msi_init(d, XIO3130_MSI_OFFSET, XIO3130_MSI_NR_VECTOR,

View file

@ -635,12 +635,7 @@ static void pci_apb_set_irq(void *opaque, int irq_num, int level)
static int apb_pci_bridge_initfn(PCIDevice *dev)
{
int rc;
rc = pci_bridge_initfn(dev, TYPE_PCI_BUS);
if (rc < 0) {
return rc;
}
pci_bridge_initfn(dev, TYPE_PCI_BUS);
/*
* command register:

View file

@ -80,10 +80,10 @@ static void msix_clr_pending(PCIDevice *dev, int vector)
static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
{
unsigned offset = vector * PCI_MSIX_ENTRY_SIZE;
uint32_t *data = (uint32_t *)&dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
/* MSIs on Xen can be remapped into pirqs. In those cases, masking
* and unmasking go through the PV evtchn path. */
if (xen_is_pirq_msi(*data)) {
if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) {
return false;
}
return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] &

View file

@ -333,7 +333,7 @@ void pci_bridge_reset(DeviceState *qdev)
}
/* default qdev initialization function for PCI-to-PCI bridge */
int pci_bridge_initfn(PCIDevice *dev, const char *typename)
void pci_bridge_initfn(PCIDevice *dev, const char *typename)
{
PCIBus *parent = dev->bus;
PCIBridge *br = PCI_BRIDGE(dev);
@ -379,7 +379,6 @@ int pci_bridge_initfn(PCIDevice *dev, const char *typename)
br->windows = pci_bridge_region_init(br);
QLIST_INIT(&sec_bus->child);
QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling);
return 0;
}
/* default qdev clean up function for PCI-to-PCI bridge */

View file

@ -749,6 +749,27 @@ static void vhost_log_stop(MemoryListener *listener,
/* FIXME: implement */
}
/* The vhost driver natively knows how to handle the vrings of non
* cross-endian legacy devices and modern devices. Only legacy devices
* exposed to a bi-endian guest may require the vhost driver to use a
* specific endianness.
*/
static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
return false;
}
#ifdef TARGET_IS_BIENDIAN
#ifdef HOST_WORDS_BIGENDIAN
return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
#else
return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
#endif
#else
return false;
#endif
}
static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
bool is_big_endian,
int vhost_vq_index)
@ -799,8 +820,7 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
return -errno;
}
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
virtio_legacy_is_cross_endian(vdev)) {
if (vhost_needs_vring_endian(vdev)) {
r = vhost_virtqueue_set_vring_endian_legacy(dev,
virtio_is_big_endian(vdev),
vhost_vq_index);
@ -855,6 +875,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
/* Clear and discard previous events if any. */
event_notifier_test_and_clear(&vq->masked_notifier);
/* Init vring in unmasked state, unless guest_notifier_mask
* will do it later.
*/
if (!vdev->use_guest_notifier_mask) {
/* TODO: check and handle errors. */
vhost_virtqueue_mask(dev, vdev, idx, false);
}
return 0;
fail_kick:
@ -896,8 +924,7 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
/* In the cross-endian case, we need to reset the vring endianness to
* native as legacy devices expect so by default.
*/
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
virtio_legacy_is_cross_endian(vdev)) {
if (vhost_needs_vring_endian(vdev)) {
r = vhost_virtqueue_set_vring_endian_legacy(dev,
!virtio_is_big_endian(vdev),
vhost_vq_index);
@ -1148,6 +1175,7 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
struct vhost_vring_file file;
if (mask) {
assert(vdev->use_guest_notifier_mask);
file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
} else {
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));

View file

@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
/* If guest supports masking, set up irqfd now.
* Otherwise, delay until unmasked in the frontend.
*/
if (k->guest_notifier_mask) {
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
if (ret < 0) {
kvm_virtio_pci_vq_vector_release(proxy, vector);
@ -822,7 +822,7 @@ undo:
if (vector >= msix_nr_vectors_allocated(dev)) {
continue;
}
if (k->guest_notifier_mask) {
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
/* If guest supports masking, clean up irqfd now.
* Otherwise, it was cleaned when masked in the frontend.
*/
if (k->guest_notifier_mask) {
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
/* If guest supports masking, irqfd is already setup, unmask it.
* Otherwise, set it up now.
*/
if (k->guest_notifier_mask) {
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, false);
/* Test after unmasking to avoid losing events. */
if (k->guest_notifier_pending &&
@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
/* If guest supports masking, keep irqfd but mask it.
* Otherwise, clean it up now.
*/
if (k->guest_notifier_mask) {
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, true);
} else {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
event_notifier_cleanup(notifier);
}
if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
if (!msix_enabled(&proxy->pci_dev) &&
vdev->use_guest_notifier_mask &&
vdc->guest_notifier_mask) {
vdc->guest_notifier_mask(vdev, n, !assign);
}

View file

@ -1677,6 +1677,7 @@ void virtio_init(VirtIODevice *vdev, const char *name,
vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
vdev);
vdev->device_endian = virtio_default_endian();
vdev->use_guest_notifier_mask = true;
}
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)

View file

@ -48,7 +48,7 @@ void pci_bridge_disable_base_limit(PCIDevice *dev);
void pci_bridge_reset_reg(PCIDevice *dev);
void pci_bridge_reset(DeviceState *qdev);
int pci_bridge_initfn(PCIDevice *pci_dev, const char *typename);
void pci_bridge_initfn(PCIDevice *pci_dev, const char *typename);
void pci_bridge_exitfn(PCIDevice *pci_dev);

View file

@ -19,32 +19,19 @@
static inline bool virtio_access_is_big_endian(VirtIODevice *vdev)
{
#if defined(TARGET_IS_BIENDIAN)
return virtio_is_big_endian(vdev);
#elif defined(TARGET_WORDS_BIGENDIAN)
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return false;
}
#if defined(TARGET_IS_BIENDIAN)
return virtio_is_big_endian(vdev);
#elif defined(TARGET_WORDS_BIGENDIAN)
return true;
#else
return false;
#endif
}
static inline bool virtio_legacy_is_cross_endian(VirtIODevice *vdev)
{
#ifdef TARGET_IS_BIENDIAN
#ifdef HOST_WORDS_BIGENDIAN
return !virtio_is_big_endian(vdev);
#else
return virtio_is_big_endian(vdev);
#endif
#else
return false;
#endif
}
static inline uint16_t virtio_lduw_phys(VirtIODevice *vdev, hwaddr pa)
{
if (virtio_access_is_big_endian(vdev)) {
@ -143,15 +130,6 @@ static inline uint64_t virtio_ldq_p(VirtIODevice *vdev, const void *ptr)
}
}
static inline bool virtio_needs_swap(VirtIODevice *vdev)
{
#ifdef HOST_WORDS_BIGENDIAN
return virtio_access_is_big_endian(vdev) ? false : true;
#else
return virtio_access_is_big_endian(vdev) ? true : false;
#endif
}
static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s)
{
#ifdef HOST_WORDS_BIGENDIAN

View file

@ -94,6 +94,7 @@ typedef struct VirtIONet {
uint64_t curr_guest_offloads;
QEMUTimer *announce_timer;
int announce_counter;
bool needs_vnet_hdr_swap;
} VirtIONet;
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,

View file

@ -90,6 +90,7 @@ struct VirtIODevice
VMChangeStateEntry *vmstate;
char *bus_name;
uint8_t device_endian;
bool use_guest_notifier_mask;
QLIST_HEAD(, VirtQueue) *vector_queues;
};

View file

@ -102,7 +102,7 @@ LD_REL := $(CC) -nostdlib -Wl,-r
modules:
%$(EXESUF): %.o
$(call LINK,$^)
$(call LINK,$(filter %.o %.a %.mo, $^))
%.a:
$(call quiet-command,rm -f $@ && $(AR) rcs $@ $^," AR $(TARGET_DIR)$@")

View file

@ -173,6 +173,7 @@ check-qtest-i386-y += tests/hd-geo-test$(EXESUF)
gcov-files-i386-y += hw/block/hd-geometry.c
check-qtest-i386-y += tests/boot-order-test$(EXESUF)
check-qtest-i386-y += tests/bios-tables-test$(EXESUF)
check-qtest-i386-y += tests/pxe-test$(EXESUF)
check-qtest-i386-y += tests/rtc-test$(EXESUF)
check-qtest-i386-y += tests/ipmi-kcs-test$(EXESUF)
check-qtest-i386-y += tests/ipmi-bt-test$(EXESUF)
@ -518,7 +519,9 @@ tests/ipmi-kcs-test$(EXESUF): tests/ipmi-kcs-test.o
tests/ipmi-bt-test$(EXESUF): tests/ipmi-bt-test.o
tests/hd-geo-test$(EXESUF): tests/hd-geo-test.o
tests/boot-order-test$(EXESUF): tests/boot-order-test.o $(libqos-obj-y)
tests/bios-tables-test$(EXESUF): tests/bios-tables-test.o $(libqos-obj-y)
tests/bios-tables-test$(EXESUF): tests/bios-tables-test.o \
tests/boot-sector.o $(libqos-obj-y)
tests/pxe-test$(EXESUF): tests/pxe-test.o tests/boot-sector.o $(libqos-obj-y)
tests/tmp105-test$(EXESUF): tests/tmp105-test.o $(libqos-omap-obj-y)
tests/ds1338-test$(EXESUF): tests/ds1338-test.o $(libqos-imx-obj-y)
tests/i440fx-test$(EXESUF): tests/i440fx-test.o $(libqos-pc-obj-y)

View file

@ -18,6 +18,7 @@
#include "hw/acpi/acpi-defs.h"
#include "hw/smbios/smbios.h"
#include "qemu/bitmap.h"
#include "boot-sector.h"
#define MACHINE_PC "pc"
#define MACHINE_Q35 "q35"
@ -51,13 +52,6 @@ typedef struct {
struct smbios_21_entry_point smbios_ep_table;
} test_data;
#define LOW(x) ((x) & 0xff)
#define HIGH(x) ((x) >> 8)
#define SIGNATURE 0xdead
#define SIGNATURE_OFFSET 0x10
#define BOOT_SECTOR_ADDRESS 0x7c00
#define ACPI_READ_FIELD(field, addr) \
do { \
switch (sizeof(field)) { \
@ -117,35 +111,6 @@ typedef struct {
g_assert_cmpstr(ACPI_ASSERT_CMP_str, ==, expected); \
} while (0)
/* Boot sector code: write SIGNATURE into memory,
* then halt.
* Q35 machine requires a minimum 0x7e000 bytes disk.
* (bug or feature?)
*/
static uint8_t boot_sector[0x7e000] = {
/* 7c00: mov $0xdead,%ax */
[0x00] = 0xb8,
[0x01] = LOW(SIGNATURE),
[0x02] = HIGH(SIGNATURE),
/* 7c03: mov %ax,0x7c10 */
[0x03] = 0xa3,
[0x04] = LOW(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET),
[0x05] = HIGH(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET),
/* 7c06: cli */
[0x06] = 0xfa,
/* 7c07: hlt */
[0x07] = 0xf4,
/* 7c08: jmp 0x7c07=0x7c0a-3 */
[0x08] = 0xeb,
[0x09] = LOW(-3),
/* We mov 0xdead here: set value to make debugging easier */
[SIGNATURE_OFFSET] = LOW(0xface),
[SIGNATURE_OFFSET + 1] = HIGH(0xface),
/* End of boot sector marker */
[0x1FE] = 0x55,
[0x1FF] = 0xAA,
};
static const char *disk = "tests/acpi-test-disk.raw";
static const char *data_dir = "tests/acpi-test-data";
#ifdef CONFIG_IASL
@ -737,10 +702,6 @@ static void test_smbios_structs(test_data *data)
static void test_acpi_one(const char *params, test_data *data)
{
char *args;
uint8_t signature_low;
uint8_t signature_high;
uint16_t signature;
int i;
args = g_strdup_printf("-net none -display none %s "
"-drive id=hd0,if=none,file=%s,format=raw "
@ -749,24 +710,7 @@ static void test_acpi_one(const char *params, test_data *data)
qtest_start(args);
/* Wait at most 1 minute */
#define TEST_DELAY (1 * G_USEC_PER_SEC / 10)
#define TEST_CYCLES MAX((60 * G_USEC_PER_SEC / TEST_DELAY), 1)
/* Poll until code has run and modified memory. Once it has we know BIOS
* initialization is done. TODO: check that IP reached the halt
* instruction.
*/
for (i = 0; i < TEST_CYCLES; ++i) {
signature_low = readb(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET);
signature_high = readb(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET + 1);
signature = (signature_high << 8) | signature_low;
if (signature == SIGNATURE) {
break;
}
g_usleep(TEST_DELAY);
}
g_assert_cmphex(signature, ==, SIGNATURE);
boot_sector_test();
test_acpi_rsdp_address(data);
test_acpi_rsdp_table(data);
@ -840,15 +784,11 @@ static void test_acpi_q35_tcg_bridge(void)
int main(int argc, char *argv[])
{
const char *arch = qtest_get_arch();
FILE *f = fopen(disk, "w");
int ret;
if (!f) {
fprintf(stderr, "Couldn't open \"%s\": %s", disk, strerror(errno));
return 1;
}
fwrite(boot_sector, 1, sizeof boot_sector, f);
fclose(f);
ret = boot_sector_init(disk);
if(ret)
return ret;
g_test_init(&argc, &argv, NULL);
@ -859,6 +799,6 @@ int main(int argc, char *argv[])
qtest_add_func("acpi/q35/tcg/bridge", test_acpi_q35_tcg_bridge);
}
ret = g_test_run();
unlink(disk);
boot_sector_cleanup(disk);
return ret;
}

119
tests/boot-sector.c Normal file
View file

@ -0,0 +1,119 @@
/*
* QEMU boot sector testing helpers.
*
* Copyright (c) 2016 Red Hat Inc.
*
* Authors:
* Michael S. Tsirkin <mst@redhat.com>
* Victor Kaplansky <victork@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "boot-sector.h"
#include <string.h>
#include <stdio.h>
#include "qemu-common.h"
#include "libqtest.h"
#define LOW(x) ((x) & 0xff)
#define HIGH(x) ((x) >> 8)
#define SIGNATURE 0xdead
#define SIGNATURE_OFFSET 0x10
#define BOOT_SECTOR_ADDRESS 0x7c00
/* Boot sector code: write SIGNATURE into memory,
* then halt.
* Q35 machine requires a minimum 0x7e000 bytes disk.
* (bug or feature?)
*/
static uint8_t boot_sector[0x7e000] = {
/* The first sector will be placed at RAM address 00007C00, and
* the BIOS transfers control to 00007C00
*/
/* Data Segment register should be initialized, since pxe
* boot loader can leave it dirty.
*/
/* 7c00: move $0000,%ax */
[0x00] = 0xb8,
[0x01] = 0x00,
[0x02] = 0x00,
/* 7c03: move %ax,%ds */
[0x03] = 0x8e,
[0x04] = 0xd8,
/* 7c05: mov $0xdead,%ax */
[0x05] = 0xb8,
[0x06] = LOW(SIGNATURE),
[0x07] = HIGH(SIGNATURE),
/* 7c08: mov %ax,0x7c10 */
[0x08] = 0xa3,
[0x09] = LOW(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET),
[0x0a] = HIGH(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET),
/* 7c0b cli */
[0x0b] = 0xfa,
/* 7c0c: hlt */
[0x0c] = 0xf4,
/* 7c0e: jmp 0x7c07=0x7c0f-3 */
[0x0d] = 0xeb,
[0x0e] = LOW(-3),
/* We mov 0xdead here: set value to make debugging easier */
[SIGNATURE_OFFSET] = LOW(0xface),
[SIGNATURE_OFFSET + 1] = HIGH(0xface),
/* End of boot sector marker */
[0x1FE] = 0x55,
[0x1FF] = 0xAA,
};
/* Create boot disk file. */
int boot_sector_init(const char *fname)
{
FILE *f = fopen(fname, "w");
if (!f) {
fprintf(stderr, "Couldn't open \"%s\": %s", fname, strerror(errno));
return 1;
}
fwrite(boot_sector, 1, sizeof boot_sector, f);
fclose(f);
return 0;
}
/* Loop until signature in memory is OK. */
void boot_sector_test(void)
{
uint8_t signature_low;
uint8_t signature_high;
uint16_t signature;
int i;
/* Wait at most 1 minute */
#define TEST_DELAY (1 * G_USEC_PER_SEC / 10)
#define TEST_CYCLES MAX((60 * G_USEC_PER_SEC / TEST_DELAY), 1)
/* Poll until code has run and modified memory. Once it has we know BIOS
* initialization is done. TODO: check that IP reached the halt
* instruction.
*/
for (i = 0; i < TEST_CYCLES; ++i) {
signature_low = readb(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET);
signature_high = readb(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET + 1);
signature = (signature_high << 8) | signature_low;
if (signature == SIGNATURE) {
break;
}
g_usleep(TEST_DELAY);
}
g_assert_cmphex(signature, ==, SIGNATURE);
}
/* unlink boot disk file. */
void boot_sector_cleanup(const char *fname)
{
unlink(fname);
}

26
tests/boot-sector.h Normal file
View file

@ -0,0 +1,26 @@
/*
* QEMU boot sector testing helpers.
*
* Copyright (c) 2016 Red Hat Inc.
*
* Authors:
* Michael S. Tsirkin <mst@redhat.com>
* Victor Kaplansky <victork@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef TEST_BOOT_SECTOR
#define TEST_BOOT_SECTOR
/* Create boot disk file. */
int boot_sector_init(const char *fname);
/* Loop until signature in memory is OK. */
void boot_sector_test(void);
/* unlink boot disk file. */
void boot_sector_cleanup(const char *fname);
#endif /* TEST_BOOT_SECTOR */

69
tests/pxe-test.c Normal file
View file

@ -0,0 +1,69 @@
/*
* PXE test cases.
*
* Copyright (c) 2016 Red Hat Inc.
*
* Authors:
* Michael S. Tsirkin <mst@redhat.com>,
* Victor Kaplansky <victork@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include <string.h>
#include <stdio.h>
#include <glib.h>
#include <glib/gstdio.h>
#include "qemu-common.h"
#include "libqtest.h"
#include "boot-sector.h"
#define NETNAME "net0"
static const char *disk = "tests/pxe-test-disk.raw";
static void test_pxe_one(const char *params)
{
char *args;
args = g_strdup_printf("-machine accel=tcg "
"-netdev user,id=" NETNAME ",tftp=./,bootfile=%s "
"%s ",
disk, params);
qtest_start(args);
boot_sector_test();
qtest_quit(global_qtest);
g_free(args);
}
static void test_pxe_e1000(void)
{
test_pxe_one("-device e1000,netdev=" NETNAME);
}
static void test_pxe_virtio_pci(void)
{
test_pxe_one("-device virtio-net-pci,netdev=" NETNAME);
}
int main(int argc, char *argv[])
{
int ret;
const char *arch = qtest_get_arch();
ret = boot_sector_init(disk);
if(ret)
return ret;
g_test_init(&argc, &argv, NULL);
if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
qtest_add_func("pxe/e1000", test_pxe_e1000);
qtest_add_func("pxe/virtio", test_pxe_virtio_pci);
}
ret = g_test_run();
boot_sector_cleanup(disk);
return ret;
}

View file

@ -37,6 +37,7 @@
#include <sys/eventfd.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <qemu/osdep.h>
#include <linux/vhost.h>
@ -176,6 +177,8 @@ typedef struct VubrVirtq {
#define VHOST_MEMORY_MAX_NREGIONS 8
#define VHOST_USER_F_PROTOCOL_FEATURES 30
/* v1.0 compliant. */
#define VIRTIO_F_VERSION_1 32
#define VHOST_LOG_PAGE 4096
@ -284,6 +287,7 @@ typedef struct VubrDev {
struct sockaddr_in backend_udp_dest;
int ready;
uint64_t features;
int hdrlen;
} VubrDev;
static const char *vubr_request_str[] = {
@ -474,7 +478,8 @@ vubr_backend_udp_recvbuf(VubrDev *dev, uint8_t *buf, size_t buflen)
static void
vubr_consume_raw_packet(VubrDev *dev, uint8_t *buf, uint32_t len)
{
int hdrlen = sizeof(struct virtio_net_hdr_v1);
int hdrlen = dev->hdrlen;
DPRINT(" hdrlen = %d\n", dev->hdrlen);
if (VHOST_USER_BRIDGE_DEBUG) {
print_buffer(buf, len);
@ -536,6 +541,7 @@ vubr_post_buffer(VubrDev *dev, VubrVirtq *vq, uint8_t *buf, int32_t len)
struct vring_avail *avail = vq->avail;
struct vring_used *used = vq->used;
uint64_t log_guest_addr = vq->log_guest_addr;
int32_t remaining_len = len;
unsigned int size = vq->size;
@ -550,36 +556,49 @@ vubr_post_buffer(VubrDev *dev, VubrVirtq *vq, uint8_t *buf, int32_t len)
uint16_t d_index = avail->ring[a_index];
int i = d_index;
uint32_t written_len = 0;
DPRINT("Post packet to guest on vq:\n");
DPRINT(" size = %d\n", vq->size);
DPRINT(" last_avail_index = %d\n", vq->last_avail_index);
DPRINT(" last_used_index = %d\n", vq->last_used_index);
DPRINT(" a_index = %d\n", a_index);
DPRINT(" u_index = %d\n", u_index);
DPRINT(" d_index = %d\n", d_index);
DPRINT(" desc[%d].addr = 0x%016"PRIx64"\n", i, desc[i].addr);
DPRINT(" desc[%d].len = %d\n", i, desc[i].len);
DPRINT(" desc[%d].flags = %d\n", i, desc[i].flags);
DPRINT(" avail->idx = %d\n", avail_index);
DPRINT(" used->idx = %d\n", used->idx);
do {
DPRINT("Post packet to guest on vq:\n");
DPRINT(" size = %d\n", vq->size);
DPRINT(" last_avail_index = %d\n", vq->last_avail_index);
DPRINT(" last_used_index = %d\n", vq->last_used_index);
DPRINT(" a_index = %d\n", a_index);
DPRINT(" u_index = %d\n", u_index);
DPRINT(" d_index = %d\n", d_index);
DPRINT(" desc[%d].addr = 0x%016"PRIx64"\n", i, desc[i].addr);
DPRINT(" desc[%d].len = %d\n", i, desc[i].len);
DPRINT(" desc[%d].flags = %d\n", i, desc[i].flags);
DPRINT(" avail->idx = %d\n", avail_index);
DPRINT(" used->idx = %d\n", used->idx);
if (!(desc[i].flags & VRING_DESC_F_WRITE)) {
/* FIXME: we should find writable descriptor. */
fprintf(stderr, "Error: descriptor is not writable. Exiting.\n");
exit(1);
}
if (!(desc[i].flags & VRING_DESC_F_WRITE)) {
/* FIXME: we should find writable descriptor. */
fprintf(stderr, "Error: descriptor is not writable. Exiting.\n");
exit(1);
}
void *chunk_start = (void *)gpa_to_va(dev, desc[i].addr);
uint32_t chunk_len = desc[i].len;
void *chunk_start = (void *)gpa_to_va(dev, desc[i].addr);
uint32_t chunk_len = desc[i].len;
uint32_t chunk_write_len = MIN(remaining_len, chunk_len);
if (len <= chunk_len) {
memcpy(chunk_start, buf, len);
vubr_log_write(dev, desc[i].addr, len);
} else {
fprintf(stderr,
"Received too long packet from the backend. Dropping...\n");
return;
memcpy(chunk_start, buf + written_len, chunk_write_len);
vubr_log_write(dev, desc[i].addr, chunk_write_len);
remaining_len -= chunk_write_len;
written_len += chunk_write_len;
if ((remaining_len == 0) || !(desc[i].flags & VRING_DESC_F_NEXT)) {
break;
}
i = desc[i].next;
} while (1);
if (remaining_len > 0) {
fprintf(stderr,
"Too long packet for RX, remaining_len = %d, Dropping...\n",
remaining_len);
return;
}
/* Add descriptor to the used ring. */
@ -687,7 +706,7 @@ vubr_backend_recv_cb(int sock, void *ctx)
VubrVirtq *rx_vq = &dev->vq[0];
uint8_t buf[4096];
struct virtio_net_hdr_v1 *hdr = (struct virtio_net_hdr_v1 *)buf;
int hdrlen = sizeof(struct virtio_net_hdr_v1);
int hdrlen = dev->hdrlen;
int buflen = sizeof(buf);
int len;
@ -696,6 +715,7 @@ vubr_backend_recv_cb(int sock, void *ctx)
}
DPRINT("\n\n *** IN UDP RECEIVE CALLBACK ***\n\n");
DPRINT(" hdrlen = %d\n", hdrlen);
uint16_t avail_index = atomic_mb_read(&rx_vq->avail->idx);
@ -707,10 +727,12 @@ vubr_backend_recv_cb(int sock, void *ctx)
return;
}
memset(buf, 0, hdrlen);
/* TODO: support mergeable buffers. */
if (hdrlen == 12)
hdr->num_buffers = 1;
len = vubr_backend_udp_recvbuf(dev, buf + hdrlen, buflen - hdrlen);
*hdr = (struct virtio_net_hdr_v1) { };
hdr->num_buffers = 1;
vubr_post_buffer(dev, rx_vq, buf, len + hdrlen);
}
@ -758,7 +780,15 @@ static int
vubr_set_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
{
DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
dev->features = vmsg->payload.u64;
if ((dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
(dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))) {
dev->hdrlen = 12;
} else {
dev->hdrlen = 10;
}
return 0;
}