diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index e83500bee9..6427a0047d 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -5,7 +5,8 @@ vhost_commit(bool started, bool changed) "Started: %d Changed: %d" vhost_region_add_section(const char *name, uint64_t gpa, uint64_t size, uint64_t host) "%s: 0x%"PRIx64"+0x%"PRIx64" @ 0x%"PRIx64 vhost_region_add_section_merge(const char *name, uint64_t new_size, uint64_t gpa, uint64_t owr) "%s: size: 0x%"PRIx64 " gpa: 0x%"PRIx64 " owr: 0x%"PRIx64 vhost_region_add_section_aligned(const char *name, uint64_t gpa, uint64_t size, uint64_t host) "%s: 0x%"PRIx64"+0x%"PRIx64" @ 0x%"PRIx64 -vhost_section(const char *name, int r) "%s:%d" +vhost_section(const char *name) "%s" +vhost_reject_section(const char *name, int d) "%s:%d" vhost_iotlb_miss(void *dev, int step) "%p step %d" # vhost-user.c diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index aff98a0ede..e3e2181290 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -27,6 +27,7 @@ #include "migration/blocker.h" #include "migration/qemu-file-types.h" #include "sysemu/dma.h" +#include "sysemu/tcg.h" #include "trace.h" /* enabled until disconnected backend stabilizes */ @@ -403,26 +404,50 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev, return r; } +/* + * vhost_section: identify sections needed for vhost access + * + * We only care about RAM sections here (where virtqueue and guest + * internals accessed by virtio might live). If we find one we still + * allow the backend to potentially filter it out of our list. + */ static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section) { - bool result; - bool log_dirty = memory_region_get_dirty_log_mask(section->mr) & - ~(1 << DIRTY_MEMORY_MIGRATION); - result = memory_region_is_ram(section->mr) && - !memory_region_is_rom(section->mr); + MemoryRegion *mr = section->mr; - /* Vhost doesn't handle any block which is doing dirty-tracking other - * than migration; this typically fires on VGA areas. - */ - result &= !log_dirty; + if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) { + uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr); + uint8_t handled_dirty; - if (result && dev->vhost_ops->vhost_backend_mem_section_filter) { - result &= - dev->vhost_ops->vhost_backend_mem_section_filter(dev, section); + /* + * Kernel based vhost doesn't handle any block which is doing + * dirty-tracking other than migration for which it has + * specific logging support. However for TCG the kernel never + * gets involved anyway so we can also ignore it's + * self-modiying code detection flags. However a vhost-user + * client could still confuse a TCG guest if it re-writes + * executable memory that has already been translated. + */ + handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) | + (1 << DIRTY_MEMORY_CODE); + + if (dirty_mask & ~handled_dirty) { + trace_vhost_reject_section(mr->name, 1); + return false; + } + + if (dev->vhost_ops->vhost_backend_mem_section_filter && + !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) { + trace_vhost_reject_section(mr->name, 2); + return false; + } + + trace_vhost_section(mr->name); + return true; + } else { + trace_vhost_reject_section(mr->name, 3); + return false; } - - trace_vhost_section(section->mr->name, result); - return result; } static void vhost_begin(MemoryListener *listener)