migration/next for 20150316

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJVBtxlAAoJEPSH7xhYctcjokAQALpZdRqCAS89MSFfFMj0uSE+
 YL2jELoMHYoOutq7quc/dF5wpQpYeSWYAzcHBpdo63j9VlnoKfQ0CaOGdevnwhZT
 KUg9S+9UtvcqJUteZyd/JtY6HA83jXElJB6siTUMHJmdgYY4i5HSfUeYe/jqsfdT
 W2TYJQPMu1ecGFAjKJQafZRO0q5gYfR2XeEiAsdynIVvccPFKMKnE5CtoPnJtTPb
 83O36K3wi4lg0AgB7ti8QcNSf3JS2WuluKXG1TxrgoxfWAVQ0SQ79pq7xVZ7c4IZ
 BErm7ihhlOP/DlGaNsPzcYwVsP6uksm1TE/PO63QoWBxaVjmLPkV0ZLKbGkR8Cr0
 ulk/BUSAjXzMDoloVqz27e2SCrCA0p7tpGJ9nw/2xv01GNcVO6EFAJDHFLv8HmnH
 yVZDQnqO5wJkrUu5U5WQ1tKIktRZFRoyR0D6nzhAKSOWqFCOKMID6TH8ixmGSxA7
 HHzSxs48GHu9pztZ1tNqON1r1Ets5I+Zy760wC60DPeOrSlkcEvgRzfNF26V9cY/
 VSp8QedX5RE509HYvh73fVT5+brwPHMs36OSe/LSc4tomXZq83RjfDRjwYrAAAjW
 nBugr0hyBrATLiPQSYgt+GNly4Qg09jwkTvRcQqcNf22zKdFM4zjf+wvrbH/+9xa
 ZTuPQUSpHMNS3mv/Gj3b
 =DZVy
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20150316' into staging

migration/next for 20150316

# gpg: Signature made Mon Mar 16 13:36:37 2015 GMT using RSA key ID 5872D723
# gpg: Can't check signature: public key not found

* remotes/juanquintela/tags/migration/20150316:
  pc: Disable vmdesc submission for old machines
  migration: Allow to suppress vmdesc submission
  migration: Read JSON VM description on incoming migration
  rename save_block_hdr to save_page_header
  save_block_hdr: we can recalculate the cont parameter here
  save_xbzrle_page: change calling convention
  ram_save_page: change calling covention
  ram_find_and_save_block:  change calling convention
  ram: make all save_page functions take a uint64_t parameter
  Add migrate_incoming
  Add -incoming defer

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-03-16 14:45:09 +00:00
commit bc68d2e512
17 changed files with 292 additions and 85 deletions

View file

@ -305,23 +305,6 @@ uint64_t xbzrle_mig_pages_overflow(void)
return acct_info.xbzrle_overflows;
}
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
int cont, int flag)
{
size_t size;
qemu_put_be64(f, offset | cont | flag);
size = 8;
if (!cont) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr,
strlen(block->idstr));
size += 1 + strlen(block->idstr);
}
return size;
}
/* This is the last block that we have visited serching for dirty pages
*/
static RAMBlock *last_seen_block;
@ -333,6 +316,39 @@ static uint64_t migration_dirty_pages;
static uint32_t last_version;
static bool ram_bulk_stage;
/**
* save_page_header: Write page header to wire
*
* If this is the 1st block, it also writes the block identification
*
* Returns: Number of bytes written
*
* @f: QEMUFile where to send the data
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* in the lower bits, it contains flags
*/
static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
{
size_t size;
if (block == last_sent_block) {
offset |= RAM_SAVE_FLAG_CONTINUE;
}
qemu_put_be64(f, offset);
size = 8;
if (block != last_sent_block) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr,
strlen(block->idstr));
size += 1 + strlen(block->idstr);
last_sent_block = block;
}
return size;
}
/* Update the xbzrle cache to reflect a page that's been sent as all 0.
* The important thing is that a stale (not-yet-0'd) page be replaced
* by the new data.
@ -353,11 +369,27 @@ static void xbzrle_cache_zero_page(ram_addr_t current_addr)
#define ENCODING_FLAG_XBZRLE 0x1
/**
* save_xbzrle_page: compress and send current page
*
* Returns: 1 means that we wrote the page
* 0 means that page is identical to the one already sent
* -1 means that xbzrle would be longer than normal
*
* @f: QEMUFile where to send the data
* @current_data:
* @current_addr:
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* @last_stage: if we are at the completion stage
* @bytes_transferred: increase it with the number of transferred bytes
*/
static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
ram_addr_t current_addr, RAMBlock *block,
ram_addr_t offset, int cont, bool last_stage)
ram_addr_t offset, bool last_stage,
uint64_t *bytes_transferred)
{
int encoded_len = 0, bytes_sent = -1;
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
@ -404,15 +436,16 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
}
/* Send XBZRLE based compressed page */
bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
qemu_put_be16(f, encoded_len);
qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
bytes_sent += encoded_len + 1 + 2;
bytes_xbzrle += encoded_len + 1 + 2;
acct_info.xbzrle_pages++;
acct_info.xbzrle_bytes += bytes_sent;
acct_info.xbzrle_bytes += bytes_xbzrle;
*bytes_transferred += bytes_xbzrle;
return bytes_sent;
return 1;
}
static inline
@ -575,55 +608,64 @@ static void migration_bitmap_sync(void)
}
}
/*
/**
* ram_save_page: Send the given page to the stream
*
* Returns: Number of bytes written.
* Returns: Number of pages written.
*
* @f: QEMUFile where to send the data
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* @last_stage: if we are at the completion stage
* @bytes_transferred: increase it with the number of transferred bytes
*/
static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
bool last_stage)
bool last_stage, uint64_t *bytes_transferred)
{
int bytes_sent;
int cont;
int pages = -1;
uint64_t bytes_xmit;
ram_addr_t current_addr;
MemoryRegion *mr = block->mr;
uint8_t *p;
int ret;
bool send_async = true;
cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
p = memory_region_get_ram_ptr(mr) + offset;
/* In doubt sent page as normal */
bytes_sent = -1;
bytes_xmit = 0;
ret = ram_control_save_page(f, block->offset,
offset, TARGET_PAGE_SIZE, &bytes_sent);
offset, TARGET_PAGE_SIZE, &bytes_xmit);
if (bytes_xmit) {
*bytes_transferred += bytes_xmit;
pages = 1;
}
XBZRLE_cache_lock();
current_addr = block->offset + offset;
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
if (ret != RAM_SAVE_CONTROL_DELAYED) {
if (bytes_sent > 0) {
if (bytes_xmit > 0) {
acct_info.norm_pages++;
} else if (bytes_sent == 0) {
} else if (bytes_xmit == 0) {
acct_info.dup_pages++;
}
}
} else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
acct_info.dup_pages++;
bytes_sent = save_block_hdr(f, block, offset, cont,
RAM_SAVE_FLAG_COMPRESS);
*bytes_transferred += save_page_header(f, block,
offset | RAM_SAVE_FLAG_COMPRESS);
qemu_put_byte(f, 0);
bytes_sent++;
*bytes_transferred += 1;
pages = 1;
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
* page would be stale
*/
xbzrle_cache_zero_page(current_addr);
} else if (!ram_bulk_stage && migrate_use_xbzrle()) {
bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
offset, cont, last_stage);
pages = save_xbzrle_page(f, &p, current_addr, block,
offset, last_stage, bytes_transferred);
if (!last_stage) {
/* Can't send this cached data async, since the cache page
* might get updated before it gets to the wire
@ -633,37 +675,44 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
}
/* XBZRLE overflow or normal page */
if (bytes_sent == -1) {
bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
if (pages == -1) {
*bytes_transferred += save_page_header(f, block,
offset | RAM_SAVE_FLAG_PAGE);
if (send_async) {
qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
} else {
qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
}
bytes_sent += TARGET_PAGE_SIZE;
*bytes_transferred += TARGET_PAGE_SIZE;
pages = 1;
acct_info.norm_pages++;
}
XBZRLE_cache_unlock();
return bytes_sent;
return pages;
}
/*
* ram_find_and_save_block: Finds a page to send and sends it to f
/**
* ram_find_and_save_block: Finds a dirty page and sends it to f
*
* Called within an RCU critical section.
*
* Returns: The number of bytes written.
* Returns: The number of pages written
* 0 means no dirty pages
*
* @f: QEMUFile where to send the data
* @last_stage: if we are at the completion stage
* @bytes_transferred: increase it with the number of transferred bytes
*/
static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
uint64_t *bytes_transferred)
{
RAMBlock *block = last_seen_block;
ram_addr_t offset = last_offset;
bool complete_round = false;
int bytes_sent = 0;
int pages = 0;
MemoryRegion *mr;
if (!block)
@ -685,11 +734,11 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
ram_bulk_stage = false;
}
} else {
bytes_sent = ram_save_page(f, block, offset, last_stage);
pages = ram_save_page(f, block, offset, last_stage,
bytes_transferred);
/* if page is unmodified, continue to the next */
if (bytes_sent > 0) {
last_sent_block = block;
if (pages > 0) {
break;
}
}
@ -697,7 +746,8 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
last_seen_block = block;
last_offset = offset;
return bytes_sent;
return pages;
}
static uint64_t bytes_transferred;
@ -881,7 +931,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
int ret;
int i;
int64_t t0;
int total_sent = 0;
int pages_sent = 0;
rcu_read_lock();
if (ram_list.version != last_version) {
@ -896,14 +946,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
i = 0;
while ((ret = qemu_file_rate_limit(f)) == 0) {
int bytes_sent;
int pages;
bytes_sent = ram_find_and_save_block(f, false);
/* no more blocks to sent */
if (bytes_sent == 0) {
pages = ram_find_and_save_block(f, false, &bytes_transferred);
/* no more pages to sent */
if (pages == 0) {
break;
}
total_sent += bytes_sent;
pages_sent += pages;
acct_info.iterations++;
check_guest_throttling();
/* we want to check in the 1st loop, just in case it was the 1st time
@ -929,12 +979,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
*/
ram_control_after_iterate(f, RAM_CONTROL_ROUND);
bytes_transferred += total_sent;
/*
* Do not count these 8 bytes into total_sent, so that we can
* return 0 if no page had been dirtied.
*/
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
bytes_transferred += 8;
@ -943,7 +987,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
return ret;
}
return total_sent;
return pages_sent;
}
/* Called with iothread lock */
@ -959,14 +1003,13 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
/* flush all remaining blocks regardless of rate limiting */
while (true) {
int bytes_sent;
int pages;
bytes_sent = ram_find_and_save_block(f, true);
pages = ram_find_and_save_block(f, true, &bytes_transferred);
/* no more blocks to sent */
if (bytes_sent == 0) {
if (pages == 0) {
break;
}
bytes_transferred += bytes_sent;
}
ram_control_after_iterate(f, RAM_CONTROL_FINISH);

View file

@ -919,6 +919,22 @@ STEXI
@findex migrate_cancel
Cancel the current VM migration.
ETEXI
{
.name = "migrate_incoming",
.args_type = "uri:s",
.params = "uri",
.help = "Continue an incoming migration from an -incoming defer",
.mhandler.cmd = hmp_migrate_incoming,
},
STEXI
@item migrate_incoming @var{uri}
@findex migrate_incoming
Continue an incoming migration using the @var{uri} (that has the same syntax
as the -incoming option).
ETEXI
{

14
hmp.c
View file

@ -1116,6 +1116,20 @@ void hmp_migrate_cancel(Monitor *mon, const QDict *qdict)
qmp_migrate_cancel(NULL);
}
void hmp_migrate_incoming(Monitor *mon, const QDict *qdict)
{
Error *err = NULL;
const char *uri = qdict_get_str(qdict, "uri");
qmp_migrate_incoming(uri, &err);
if (err) {
monitor_printf(mon, "%s\n", error_get_pretty(err));
error_free(err);
return;
}
}
void hmp_migrate_set_downtime(Monitor *mon, const QDict *qdict)
{
double value = qdict_get_double(qdict, "value");

1
hmp.h
View file

@ -60,6 +60,7 @@ void hmp_snapshot_delete_blkdev_internal(Monitor *mon, const QDict *qdict);
void hmp_drive_mirror(Monitor *mon, const QDict *qdict);
void hmp_drive_backup(Monitor *mon, const QDict *qdict);
void hmp_migrate_cancel(Monitor *mon, const QDict *qdict);
void hmp_migrate_incoming(Monitor *mon, const QDict *qdict);
void hmp_migrate_set_downtime(Monitor *mon, const QDict *qdict);
void hmp_migrate_set_speed(Monitor *mon, const QDict *qdict);
void hmp_migrate_set_capability(Monitor *mon, const QDict *qdict);

View file

@ -254,6 +254,20 @@ static void machine_set_iommu(Object *obj, bool value, Error **errp)
ms->iommu = value;
}
static void machine_set_suppress_vmdesc(Object *obj, bool value, Error **errp)
{
MachineState *ms = MACHINE(obj);
ms->suppress_vmdesc = value;
}
static bool machine_get_suppress_vmdesc(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return ms->suppress_vmdesc;
}
static int error_on_sysbus_device(SysBusDevice *sbdev, void *opaque)
{
error_report("Option '-device %s' cannot be handled by this machine",
@ -377,6 +391,12 @@ static void machine_initfn(Object *obj)
object_property_set_description(obj, "iommu",
"Set on/off to enable/disable Intel IOMMU (VT-d)",
NULL);
object_property_add_bool(obj, "suppress-vmdesc",
machine_get_suppress_vmdesc,
machine_set_suppress_vmdesc, NULL);
object_property_set_description(obj, "suppress-vmdesc",
"Set on to disable self-describing migration",
NULL);
/* Register notifier when init is done for sysbus sanity checks */
ms->sysbus_notifier.notify = machine_init_notify;

View file

@ -335,6 +335,7 @@ static void pc_compat_2_2(MachineState *machine)
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_RTM, 0);
x86_cpu_compat_set_features("Broadwell", FEAT_7_0_EBX,
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_RTM, 0);
machine->suppress_vmdesc = true;
}
static void pc_compat_2_1(MachineState *machine)

View file

@ -314,6 +314,7 @@ static void pc_compat_2_2(MachineState *machine)
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_RTM, 0);
x86_cpu_compat_set_features("Broadwell", FEAT_7_0_EBX,
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_RTM, 0);
machine->suppress_vmdesc = true;
}
static void pc_compat_2_1(MachineState *machine)

View file

@ -143,6 +143,7 @@ struct MachineState {
bool usb;
char *firmware;
bool iommu;
bool suppress_vmdesc;
ram_addr_t ram_size;
ram_addr_t maxram_size;

View file

@ -169,6 +169,6 @@ void ram_control_load_hook(QEMUFile *f, uint64_t flags);
size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
ram_addr_t offset, size_t size,
int *bytes_sent);
uint64_t *bytes_sent);
#endif

View file

@ -82,7 +82,7 @@ typedef size_t (QEMURamSaveFunc)(QEMUFile *f, void *opaque,
ram_addr_t block_offset,
ram_addr_t offset,
size_t size,
int *bytes_sent);
uint64_t *bytes_sent);
/*
* Stop any read or write (depending on flags) on the underlying

View file

@ -49,6 +49,8 @@ enum {
static NotifierList migration_state_notifiers =
NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
static bool deferred_incoming;
/* When we add fault tolerance, we could have several
migrations at once. For now we don't need to add
dynamic creation of migration */
@ -65,25 +67,40 @@ MigrationState *migrate_get_current(void)
return &current_migration;
}
/*
* Called on -incoming with a defer: uri.
* The migration can be started later after any parameters have been
* changed.
*/
static void deferred_incoming_migration(Error **errp)
{
if (deferred_incoming) {
error_setg(errp, "Incoming migration already deferred");
}
deferred_incoming = true;
}
void qemu_start_incoming_migration(const char *uri, Error **errp)
{
const char *p;
if (strstart(uri, "tcp:", &p))
if (!strcmp(uri, "defer")) {
deferred_incoming_migration(errp);
} else if (strstart(uri, "tcp:", &p)) {
tcp_start_incoming_migration(p, errp);
#ifdef CONFIG_RDMA
else if (strstart(uri, "rdma:", &p))
} else if (strstart(uri, "rdma:", &p)) {
rdma_start_incoming_migration(p, errp);
#endif
#if !defined(WIN32)
else if (strstart(uri, "exec:", &p))
} else if (strstart(uri, "exec:", &p)) {
exec_start_incoming_migration(p, errp);
else if (strstart(uri, "unix:", &p))
} else if (strstart(uri, "unix:", &p)) {
unix_start_incoming_migration(p, errp);
else if (strstart(uri, "fd:", &p))
} else if (strstart(uri, "fd:", &p)) {
fd_start_incoming_migration(p, errp);
#endif
else {
} else {
error_setg(errp, "unknown migration protocol: %s", uri);
}
}
@ -415,6 +432,25 @@ void migrate_del_blocker(Error *reason)
migration_blockers = g_slist_remove(migration_blockers, reason);
}
void qmp_migrate_incoming(const char *uri, Error **errp)
{
Error *local_err = NULL;
if (!deferred_incoming) {
error_setg(errp, "'-incoming defer' is required for migrate_incoming");
return;
}
qemu_start_incoming_migration(uri, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
deferred_incoming = false;
}
void qmp_migrate(const char *uri, bool has_blk, bool blk,
bool has_inc, bool inc, bool has_detach, bool detach,
Error **errp)

View file

@ -161,7 +161,8 @@ void ram_control_load_hook(QEMUFile *f, uint64_t flags)
}
size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
ram_addr_t offset, size_t size, int *bytes_sent)
ram_addr_t offset, size_t size,
uint64_t *bytes_sent)
{
if (f->ops->save_page) {
int ret = f->ops->save_page(f, f->opaque, block_offset,

View file

@ -2654,7 +2654,7 @@ static int qemu_rdma_close(void *opaque)
*/
static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
ram_addr_t block_offset, ram_addr_t offset,
size_t size, int *bytes_sent)
size_t size, uint64_t *bytes_sent)
{
QEMUFileRDMA *rfile = opaque;
RDMAContext *rdma = rfile->rdma;

View file

@ -1738,6 +1738,21 @@
{ 'command': 'migrate',
'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', '*detach': 'bool' } }
##
# @migrate-incoming
#
# Start an incoming migration, the qemu must have been started
# with -incoming defer
#
# @uri: The Uniform Resource Identifier identifying the source or
# address to listen on
#
# Returns: nothing on success
#
# Since: 2.3
##
{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
# @xen-save-devices-state:
#
# Save the state of all devices to file. The RAM and the block devices

View file

@ -39,7 +39,8 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
" mem-merge=on|off controls memory merge support (default: on)\n"
" iommu=on|off controls emulated Intel IOMMU (VT-d) support (default=off)\n"
" aes-key-wrap=on|off controls support for AES key wrapping (default=on)\n"
" dea-key-wrap=on|off controls support for DEA key wrapping (default=on)\n",
" dea-key-wrap=on|off controls support for DEA key wrapping (default=on)\n"
" suppress-vmdesc=on|off disables self-describing migration (default=off)\n",
QEMU_ARCH_ALL)
STEXI
@item -machine [type=]@var{name}[,prop=@var{value}[,...]]

View file

@ -661,7 +661,36 @@ Example:
<- { "return": {} }
EQMP
{
{
.name = "migrate-incoming",
.args_type = "uri:s",
.mhandler.cmd_new = qmp_marshal_input_migrate_incoming,
},
SQMP
migrate-incoming
----------------
Continue an incoming migration
Arguments:
- "uri": Source/listening URI (json-string)
Example:
-> { "execute": "migrate-incoming", "arguments": { "uri": "tcp::4446" } }
<- { "return": {} }
Notes:
(1) QEMU must be started with -incoming defer to allow migrate-incoming to
be used
(2) The uri format is the same as to -incoming
EQMP
{
.name = "migrate-set-cache-size",
.args_type = "value:o",
.mhandler.cmd_new = qmp_marshal_input_migrate_set_cache_size,

View file

@ -710,6 +710,12 @@ int qemu_savevm_state_iterate(QEMUFile *f)
return ret;
}
static bool should_send_vmdesc(void)
{
MachineState *machine = MACHINE(qdev_get_machine());
return !machine->suppress_vmdesc;
}
void qemu_savevm_state_complete(QEMUFile *f)
{
QJSON *vmdesc;
@ -782,9 +788,11 @@ void qemu_savevm_state_complete(QEMUFile *f)
qjson_finish(vmdesc);
vmdesc_len = strlen(qjson_get_str(vmdesc));
qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
qemu_put_be32(f, vmdesc_len);
qemu_put_buffer(f, (uint8_t *)qjson_get_str(vmdesc), vmdesc_len);
if (should_send_vmdesc()) {
qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
qemu_put_be32(f, vmdesc_len);
qemu_put_buffer(f, (uint8_t *)qjson_get_str(vmdesc), vmdesc_len);
}
object_unref(OBJECT(vmdesc));
qemu_fflush(f);
@ -930,6 +938,7 @@ int qemu_loadvm_state(QEMUFile *f)
uint8_t section_type;
unsigned int v;
int ret;
int file_error_after_eof = -1;
if (qemu_savevm_state_blocked(&local_err)) {
error_report("%s", error_get_pretty(local_err));
@ -1035,6 +1044,24 @@ int qemu_loadvm_state(QEMUFile *f)
}
}
file_error_after_eof = qemu_file_get_error(f);
/*
* Try to read in the VMDESC section as well, so that dumping tools that
* intercept our migration stream have the chance to see it.
*/
if (qemu_get_byte(f) == QEMU_VM_VMDESCRIPTION) {
uint32_t size = qemu_get_be32(f);
uint8_t *buf = g_malloc(0x1000);
while (size > 0) {
uint32_t read_chunk = MIN(size, 0x1000);
qemu_get_buffer(f, buf, read_chunk);
size -= read_chunk;
}
g_free(buf);
}
cpu_synchronize_all_post_init();
ret = 0;
@ -1046,7 +1073,8 @@ out:
}
if (ret == 0) {
ret = qemu_file_get_error(f);
/* We may not have a VMDESC section, so ignore relative errors */
ret = file_error_after_eof;
}
return ret;