qemu-patch-raspberry4/hw/virtio-balloon.c
aliguori 99b3718ee1 Use the default subsystem vendor ID for virtio devices (Mark McLoughlin)
A subsystem vendor ID of zero isn't allowed, so we use our
default ID.

Gerd points out that although the PCI subsystem vendor ID is
treated by the guest as the virtio vendor ID:

   /* we use the subsystem vendor/device id as the virtio vendor/device
    * id.  this allows us to use the same PCI vendor/device id for all
    * virtio devices and to identify the particular virtio driver by
    * the subsytem ids */
    vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
    vp_dev->vdev.id.device = pci_dev->subsystem_device;

it looks like only the device ID is used right now:

   # grep virtio modules.alias
   alias virtio:d00000001v* virtio_net
   alias virtio:d00000002v* virtio_blk
   alias virtio:d00000003v* virtio_console
   alias virtio:d00000004v* virtio-rng
   alias virtio:d00000005v* virtio_balloon
   alias pci:v00001AF4d*sv*sd*bc*sc*i* virtio_pci
   alias virtio:d00000009v* 9pnet_virtio

so setting the subsystem vendor id to something != zero shouldn't cause
trouble.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6440 c046a42c-6fe2-441c-8c8c-71466251a162
2009-01-26 15:22:57 +00:00

197 lines
5 KiB
C

/*
* Virtio Block Device
*
* Copyright IBM, Corp. 2008
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include "qemu-common.h"
#include "virtio.h"
#include "pc.h"
#include "sysemu.h"
#include "cpu.h"
#include "balloon.h"
#include "virtio-balloon.h"
#include "kvm.h"
#if defined(__linux__)
#include <sys/mman.h>
#endif
typedef struct VirtIOBalloon
{
VirtIODevice vdev;
VirtQueue *ivq, *dvq;
uint32_t num_pages;
uint32_t actual;
} VirtIOBalloon;
static VirtIOBalloon *to_virtio_balloon(VirtIODevice *vdev)
{
return (VirtIOBalloon *)vdev;
}
static void balloon_page(void *addr, int deflate)
{
#if defined(__linux__)
if (!kvm_enabled() || kvm_has_sync_mmu())
madvise(addr, TARGET_PAGE_SIZE,
deflate ? MADV_WILLNEED : MADV_DONTNEED);
#endif
}
/* FIXME: once we do a virtio refactoring, this will get subsumed into common
* code */
static size_t memcpy_from_iovector(void *data, size_t offset, size_t size,
struct iovec *iov, int iovlen)
{
int i;
uint8_t *ptr = data;
size_t iov_off = 0;
size_t data_off = 0;
for (i = 0; i < iovlen && size; i++) {
if (offset < (iov_off + iov[i].iov_len)) {
size_t len = MIN((iov_off + iov[i].iov_len) - offset , size);
memcpy(ptr + data_off, iov[i].iov_base + (offset - iov_off), len);
data_off += len;
offset += len;
size -= len;
}
iov_off += iov[i].iov_len;
}
return data_off;
}
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBalloon *s = to_virtio_balloon(vdev);
VirtQueueElement elem;
while (virtqueue_pop(vq, &elem)) {
size_t offset = 0;
uint32_t pfn;
while (memcpy_from_iovector(&pfn, offset, 4,
elem.out_sg, elem.out_num) == 4) {
ram_addr_t pa;
ram_addr_t addr;
pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT;
offset += 4;
addr = cpu_get_physical_page_desc(pa);
if ((addr & ~TARGET_PAGE_MASK) != IO_MEM_RAM)
continue;
balloon_page(phys_ram_base + addr, !!(vq == s->dvq));
}
virtqueue_push(vq, &elem, offset);
virtio_notify(vdev, vq);
}
}
static void virtio_balloon_get_config(VirtIODevice *vdev, uint8_t *config_data)
{
VirtIOBalloon *dev = to_virtio_balloon(vdev);
struct virtio_balloon_config config;
config.num_pages = cpu_to_le32(dev->num_pages);
config.actual = cpu_to_le32(dev->actual);
memcpy(config_data, &config, 8);
}
static void virtio_balloon_set_config(VirtIODevice *vdev,
const uint8_t *config_data)
{
VirtIOBalloon *dev = to_virtio_balloon(vdev);
struct virtio_balloon_config config;
memcpy(&config, config_data, 8);
dev->actual = config.actual;
}
static uint32_t virtio_balloon_get_features(VirtIODevice *vdev)
{
return 0;
}
static ram_addr_t virtio_balloon_to_target(void *opaque, ram_addr_t target)
{
VirtIOBalloon *dev = opaque;
if (target > ram_size)
target = ram_size;
if (target) {
dev->num_pages = (ram_size - target) >> VIRTIO_BALLOON_PFN_SHIFT;
virtio_notify_config(&dev->vdev);
}
return ram_size - (dev->actual << VIRTIO_BALLOON_PFN_SHIFT);
}
static void virtio_balloon_save(QEMUFile *f, void *opaque)
{
VirtIOBalloon *s = opaque;
virtio_save(&s->vdev, f);
qemu_put_be32(f, s->num_pages);
qemu_put_be32(f, s->actual);
}
static int virtio_balloon_load(QEMUFile *f, void *opaque, int version_id)
{
VirtIOBalloon *s = opaque;
if (version_id != 1)
return -EINVAL;
virtio_load(&s->vdev, f);
s->num_pages = qemu_get_be32(f);
s->actual = qemu_get_be32(f);
return 0;
}
void *virtio_balloon_init(PCIBus *bus)
{
VirtIOBalloon *s;
s = (VirtIOBalloon *)virtio_init_pci(bus, "virtio-balloon",
PCI_VENDOR_ID_REDHAT_QUMRANET,
PCI_DEVICE_ID_VIRTIO_BALLOON,
PCI_VENDOR_ID_REDHAT_QUMRANET,
VIRTIO_ID_BALLOON,
0x05, 0x00, 0x00,
8, sizeof(VirtIOBalloon));
if (s == NULL)
return NULL;
s->vdev.get_config = virtio_balloon_get_config;
s->vdev.set_config = virtio_balloon_set_config;
s->vdev.get_features = virtio_balloon_get_features;
s->ivq = virtio_add_queue(&s->vdev, 128, virtio_balloon_handle_output);
s->dvq = virtio_add_queue(&s->vdev, 128, virtio_balloon_handle_output);
qemu_add_balloon_handler(virtio_balloon_to_target, s);
register_savevm("virtio-balloon", -1, 1, virtio_balloon_save, virtio_balloon_load, s);
return &s->vdev;
}