Merge commit 'quintela/migration-next-v5' into staging

* commit '6c779f22a93cc6e4565b940ef616e3efc5b50ba5':
  Change ram_save_block to return -1 if there are no more changes
  ram: save_live_setup() we don't need to synchronize the dirty bitmap.
  ram: iterate phase
  ram: save_live_complete() only do one loop
  ram: save_live_setup() don't need to sent pages
  savevm: split save_live into stage2 and stage3
  savevm: split save_live_setup from save_live_state
  savevm: introduce is_active method
  savevm: Refactor cancel operation in its own operation
  savevm: remove SaveLiveStateHandler
  savevm: remove SaveSetParamsHandler
  savevm: Live migration handlers register the struct directly
  savevm: Use a struct to pass all handlers
This commit is contained in:
Anthony Liguori 2012-07-30 09:58:41 -05:00
commit e6a7671998
6 changed files with 260 additions and 162 deletions

View file

@ -186,11 +186,19 @@ static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
static RAMBlock *last_block; static RAMBlock *last_block;
static ram_addr_t last_offset; static ram_addr_t last_offset;
/*
* ram_save_block: Writes a page of memory to the stream f
*
* Returns: 0: if the page hasn't changed
* -1: if there are no more dirty pages
* n: the amount of bytes written in other case
*/
static int ram_save_block(QEMUFile *f) static int ram_save_block(QEMUFile *f)
{ {
RAMBlock *block = last_block; RAMBlock *block = last_block;
ram_addr_t offset = last_offset; ram_addr_t offset = last_offset;
int bytes_sent = 0; int bytes_sent = -1;
MemoryRegion *mr; MemoryRegion *mr;
if (!block) if (!block)
@ -298,50 +306,55 @@ static void migration_end(void)
memory_global_dirty_log_stop(); memory_global_dirty_log_stop();
} }
static void ram_migration_cancel(void *opaque)
{
migration_end();
}
#define MAX_WAIT 50 /* ms, half buffered_file limit */ #define MAX_WAIT 50 /* ms, half buffered_file limit */
int ram_save_live(QEMUFile *f, int stage, void *opaque) static int ram_save_setup(QEMUFile *f, void *opaque)
{ {
ram_addr_t addr; ram_addr_t addr;
RAMBlock *block;
bytes_transferred = 0;
last_block = NULL;
last_offset = 0;
sort_ram_list();
/* Make sure all dirty bits are set */
QLIST_FOREACH(block, &ram_list.blocks, next) {
for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE);
}
}
}
memory_global_dirty_log_start();
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
QLIST_FOREACH(block, &ram_list.blocks, next) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
qemu_put_be64(f, block->length);
}
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
}
static int ram_save_iterate(QEMUFile *f, void *opaque)
{
uint64_t bytes_transferred_last; uint64_t bytes_transferred_last;
double bwidth = 0; double bwidth = 0;
int ret; int ret;
int i; int i;
uint64_t expected_time;
if (stage < 0) {
migration_end();
return 0;
}
memory_global_sync_dirty_bitmap(get_system_memory());
if (stage == 1) {
RAMBlock *block;
bytes_transferred = 0;
last_block = NULL;
last_offset = 0;
sort_ram_list();
/* Make sure all dirty bits are set */
QLIST_FOREACH(block, &ram_list.blocks, next) {
for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE);
}
}
}
memory_global_dirty_log_start();
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
QLIST_FOREACH(block, &ram_list.blocks, next) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
qemu_put_be64(f, block->length);
}
}
bytes_transferred_last = bytes_transferred; bytes_transferred_last = bytes_transferred;
bwidth = qemu_get_clock_ns(rt_clock); bwidth = qemu_get_clock_ns(rt_clock);
@ -351,10 +364,11 @@ int ram_save_live(QEMUFile *f, int stage, void *opaque)
int bytes_sent; int bytes_sent;
bytes_sent = ram_save_block(f); bytes_sent = ram_save_block(f);
bytes_transferred += bytes_sent; /* no more blocks to sent */
if (bytes_sent == 0) { /* no more blocks */ if (bytes_sent < 0) {
break; break;
} }
bytes_transferred += bytes_sent;
/* we want to check in the 1st loop, just in case it was the 1st time /* we want to check in the 1st loop, just in case it was the 1st time
and we had to sync the dirty bitmap. and we had to sync the dirty bitmap.
qemu_get_clock_ns() is a bit expensive, so we only check each some qemu_get_clock_ns() is a bit expensive, so we only check each some
@ -384,31 +398,46 @@ int ram_save_live(QEMUFile *f, int stage, void *opaque)
bwidth = 0.000001; bwidth = 0.000001;
} }
/* try transferring iterative blocks of memory */
if (stage == 3) {
int bytes_sent;
/* flush all remaining blocks regardless of rate limiting */
while ((bytes_sent = ram_save_block(f)) != 0) {
bytes_transferred += bytes_sent;
}
memory_global_dirty_log_stop();
}
qemu_put_be64(f, RAM_SAVE_FLAG_EOS); qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
if (stage == 2) { expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
uint64_t expected_time;
expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
DPRINTF("ram_save_live: expected(" PRIu64 ") <= max(" PRIu64 ")?\n", DPRINTF("ram_save_live: expected(" PRIu64 ") <= max(" PRIu64 ")?\n",
expected_time, migrate_max_downtime()); expected_time, migrate_max_downtime());
if (expected_time <= migrate_max_downtime()) {
memory_global_sync_dirty_bitmap(get_system_memory());
expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
return expected_time <= migrate_max_downtime(); return expected_time <= migrate_max_downtime();
} }
return 0; return 0;
} }
static int ram_save_complete(QEMUFile *f, void *opaque)
{
memory_global_sync_dirty_bitmap(get_system_memory());
/* try transferring iterative blocks of memory */
/* flush all remaining blocks regardless of rate limiting */
while (true) {
int bytes_sent;
bytes_sent = ram_save_block(f);
/* no more blocks to sent */
if (bytes_sent < 0) {
break;
}
bytes_transferred += bytes_sent;
}
memory_global_dirty_log_stop();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
}
static inline void *host_from_stream_offset(QEMUFile *f, static inline void *host_from_stream_offset(QEMUFile *f,
ram_addr_t offset, ram_addr_t offset,
int flags) int flags)
@ -439,7 +468,7 @@ static inline void *host_from_stream_offset(QEMUFile *f,
return NULL; return NULL;
} }
int ram_load(QEMUFile *f, void *opaque, int version_id) static int ram_load(QEMUFile *f, void *opaque, int version_id)
{ {
ram_addr_t addr; ram_addr_t addr;
int flags, ret = 0; int flags, ret = 0;
@ -536,6 +565,14 @@ done:
return ret; return ret;
} }
SaveVMHandlers savevm_ram_handlers = {
.save_live_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
.save_live_complete = ram_save_complete,
.load_state = ram_load,
.cancel = ram_migration_cancel,
};
#ifdef HAS_AUDIO #ifdef HAS_AUDIO
struct soundhw { struct soundhw {
const char *name; const char *name;

View file

@ -536,30 +536,22 @@ static void blk_mig_cleanup(void)
} }
} }
static int block_save_live(QEMUFile *f, int stage, void *opaque) static void block_migration_cancel(void *opaque)
{
blk_mig_cleanup();
}
static int block_save_setup(QEMUFile *f, void *opaque)
{ {
int ret; int ret;
DPRINTF("Enter save live stage %d submitted %d transferred %d\n", DPRINTF("Enter save live setup submitted %d transferred %d\n",
stage, block_mig_state.submitted, block_mig_state.transferred); block_mig_state.submitted, block_mig_state.transferred);
if (stage < 0) { init_blk_migration(f);
blk_mig_cleanup();
return 0;
}
if (block_mig_state.blk_enable != 1) { /* start track dirty blocks */
/* no need to migrate storage */ set_dirty_tracking(1);
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
return 1;
}
if (stage == 1) {
init_blk_migration(f);
/* start track dirty blocks */
set_dirty_tracking(1);
}
flush_blks(f); flush_blks(f);
@ -571,56 +563,98 @@ static int block_save_live(QEMUFile *f, int stage, void *opaque)
blk_mig_reset_dirty_cursor(); blk_mig_reset_dirty_cursor();
if (stage == 2) { qemu_put_be64(f, BLK_MIG_FLAG_EOS);
/* control the rate of transfer */
while ((block_mig_state.submitted + return 0;
block_mig_state.read_done) * BLOCK_SIZE < }
qemu_file_get_rate_limit(f)) {
if (block_mig_state.bulk_completed == 0) { static int block_save_iterate(QEMUFile *f, void *opaque)
/* first finish the bulk phase */ {
if (blk_mig_save_bulked_block(f) == 0) { int ret;
/* finished saving bulk on all devices */
block_mig_state.bulk_completed = 1; DPRINTF("Enter save live iterate submitted %d transferred %d\n",
} block_mig_state.submitted, block_mig_state.transferred);
} else {
if (blk_mig_save_dirty_block(f, 1) == 0) { flush_blks(f);
/* no more dirty blocks */
break; ret = qemu_file_get_error(f);
} if (ret) {
blk_mig_cleanup();
return ret;
}
blk_mig_reset_dirty_cursor();
/* control the rate of transfer */
while ((block_mig_state.submitted +
block_mig_state.read_done) * BLOCK_SIZE <
qemu_file_get_rate_limit(f)) {
if (block_mig_state.bulk_completed == 0) {
/* first finish the bulk phase */
if (blk_mig_save_bulked_block(f) == 0) {
/* finished saving bulk on all devices */
block_mig_state.bulk_completed = 1;
}
} else {
if (blk_mig_save_dirty_block(f, 1) == 0) {
/* no more dirty blocks */
break;
} }
}
flush_blks(f);
ret = qemu_file_get_error(f);
if (ret) {
blk_mig_cleanup();
return ret;
} }
} }
if (stage == 3) { flush_blks(f);
/* we know for sure that save bulk is completed and
all async read completed */
assert(block_mig_state.submitted == 0);
while (blk_mig_save_dirty_block(f, 0) != 0); ret = qemu_file_get_error(f);
if (ret) {
blk_mig_cleanup(); blk_mig_cleanup();
return ret;
/* report completion */
qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
ret = qemu_file_get_error(f);
if (ret) {
return ret;
}
DPRINTF("Block migration completed\n");
} }
qemu_put_be64(f, BLK_MIG_FLAG_EOS); qemu_put_be64(f, BLK_MIG_FLAG_EOS);
return ((stage == 2) && is_stage2_completed()); return is_stage2_completed();
}
static int block_save_complete(QEMUFile *f, void *opaque)
{
int ret;
DPRINTF("Enter save live complete submitted %d transferred %d\n",
block_mig_state.submitted, block_mig_state.transferred);
flush_blks(f);
ret = qemu_file_get_error(f);
if (ret) {
blk_mig_cleanup();
return ret;
}
blk_mig_reset_dirty_cursor();
/* we know for sure that save bulk is completed and
all async read completed */
assert(block_mig_state.submitted == 0);
while (blk_mig_save_dirty_block(f, 0) != 0) {
/* Do nothing */
}
blk_mig_cleanup();
/* report completion */
qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
ret = qemu_file_get_error(f);
if (ret) {
return ret;
}
DPRINTF("Block migration completed\n");
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
return 0;
} }
static int block_load(QEMUFile *f, void *opaque, int version_id) static int block_load(QEMUFile *f, void *opaque, int version_id)
@ -709,11 +743,26 @@ static void block_set_params(const MigrationParams *params, void *opaque)
block_mig_state.blk_enable |= params->shared; block_mig_state.blk_enable |= params->shared;
} }
static bool block_is_active(void *opaque)
{
return block_mig_state.blk_enable == 1;
}
SaveVMHandlers savevm_block_handlers = {
.set_params = block_set_params,
.save_live_setup = block_save_setup,
.save_live_iterate = block_save_iterate,
.save_live_complete = block_save_complete,
.load_state = block_load,
.cancel = block_migration_cancel,
.is_active = block_is_active,
};
void blk_mig_init(void) void blk_mig_init(void)
{ {
QSIMPLEQ_INIT(&block_mig_state.bmds_list); QSIMPLEQ_INIT(&block_mig_state.bmds_list);
QSIMPLEQ_INIT(&block_mig_state.blk_list); QSIMPLEQ_INIT(&block_mig_state.blk_list);
register_savevm_live(NULL, "block", 0, 1, block_set_params, register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
block_save_live, NULL, block_load, &block_mig_state); &block_mig_state);
} }

View file

@ -18,6 +18,7 @@
#include "qemu-common.h" #include "qemu-common.h"
#include "notify.h" #include "notify.h"
#include "error.h" #include "error.h"
#include "vmstate.h"
struct MigrationParams { struct MigrationParams {
bool blk; bool blk;
@ -81,8 +82,7 @@ uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_transferred(void); uint64_t ram_bytes_transferred(void);
uint64_t ram_bytes_total(void); uint64_t ram_bytes_total(void);
int ram_save_live(QEMUFile *f, int stage, void *opaque); extern SaveVMHandlers savevm_ram_handlers;
int ram_load(QEMUFile *f, void *opaque, int version_id);
/** /**
* @migrate_add_blocker - prevent migration from proceeding * @migrate_add_blocker - prevent migration from proceeding

View file

@ -1171,10 +1171,7 @@ typedef struct SaveStateEntry {
int alias_id; int alias_id;
int version_id; int version_id;
int section_id; int section_id;
SaveSetParamsHandler *set_params; SaveVMHandlers *ops;
SaveLiveStateHandler *save_live_state;
SaveStateHandler *save_state;
LoadStateHandler *load_state;
const VMStateDescription *vmsd; const VMStateDescription *vmsd;
void *opaque; void *opaque;
CompatEntry *compat; CompatEntry *compat;
@ -1226,10 +1223,7 @@ int register_savevm_live(DeviceState *dev,
const char *idstr, const char *idstr,
int instance_id, int instance_id,
int version_id, int version_id,
SaveSetParamsHandler *set_params, SaveVMHandlers *ops,
SaveLiveStateHandler *save_live_state,
SaveStateHandler *save_state,
LoadStateHandler *load_state,
void *opaque) void *opaque)
{ {
SaveStateEntry *se; SaveStateEntry *se;
@ -1237,15 +1231,12 @@ int register_savevm_live(DeviceState *dev,
se = g_malloc0(sizeof(SaveStateEntry)); se = g_malloc0(sizeof(SaveStateEntry));
se->version_id = version_id; se->version_id = version_id;
se->section_id = global_section_id++; se->section_id = global_section_id++;
se->set_params = set_params; se->ops = ops;
se->save_live_state = save_live_state;
se->save_state = save_state;
se->load_state = load_state;
se->opaque = opaque; se->opaque = opaque;
se->vmsd = NULL; se->vmsd = NULL;
se->no_migrate = 0; se->no_migrate = 0;
/* if this is a live_savem then set is_ram */ /* if this is a live_savem then set is_ram */
if (save_live_state != NULL) { if (ops->save_live_setup != NULL) {
se->is_ram = 1; se->is_ram = 1;
} }
@ -1284,8 +1275,11 @@ int register_savevm(DeviceState *dev,
LoadStateHandler *load_state, LoadStateHandler *load_state,
void *opaque) void *opaque)
{ {
SaveVMHandlers *ops = g_malloc0(sizeof(SaveVMHandlers));
ops->save_state = save_state;
ops->load_state = load_state;
return register_savevm_live(dev, idstr, instance_id, version_id, return register_savevm_live(dev, idstr, instance_id, version_id,
NULL, NULL, save_state, load_state, opaque); ops, opaque);
} }
void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque) void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
@ -1309,6 +1303,7 @@ void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
if (se->compat) { if (se->compat) {
g_free(se->compat); g_free(se->compat);
} }
g_free(se->ops);
g_free(se); g_free(se);
} }
} }
@ -1327,9 +1322,6 @@ int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
se = g_malloc0(sizeof(SaveStateEntry)); se = g_malloc0(sizeof(SaveStateEntry));
se->version_id = vmsd->version_id; se->version_id = vmsd->version_id;
se->section_id = global_section_id++; se->section_id = global_section_id++;
se->save_live_state = NULL;
se->save_state = NULL;
se->load_state = NULL;
se->opaque = opaque; se->opaque = opaque;
se->vmsd = vmsd; se->vmsd = vmsd;
se->alias_id = alias_id; se->alias_id = alias_id;
@ -1524,7 +1516,7 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id) static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
{ {
if (!se->vmsd) { /* Old style */ if (!se->vmsd) { /* Old style */
return se->load_state(f, se->opaque, version_id); return se->ops->load_state(f, se->opaque, version_id);
} }
return vmstate_load_state(f, se->vmsd, se->opaque, version_id); return vmstate_load_state(f, se->vmsd, se->opaque, version_id);
} }
@ -1532,7 +1524,7 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
static void vmstate_save(QEMUFile *f, SaveStateEntry *se) static void vmstate_save(QEMUFile *f, SaveStateEntry *se)
{ {
if (!se->vmsd) { /* Old style */ if (!se->vmsd) { /* Old style */
se->save_state(f, se->opaque); se->ops->save_state(f, se->opaque);
return; return;
} }
vmstate_save_state(f,se->vmsd, se->opaque); vmstate_save_state(f,se->vmsd, se->opaque);
@ -1569,10 +1561,10 @@ int qemu_savevm_state_begin(QEMUFile *f,
int ret; int ret;
QTAILQ_FOREACH(se, &savevm_handlers, entry) { QTAILQ_FOREACH(se, &savevm_handlers, entry) {
if(se->set_params == NULL) { if (!se->ops || !se->ops->set_params) {
continue; continue;
} }
se->set_params(params, se->opaque); se->ops->set_params(params, se->opaque);
} }
qemu_put_be32(f, QEMU_VM_FILE_MAGIC); qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
@ -1581,9 +1573,14 @@ int qemu_savevm_state_begin(QEMUFile *f,
QTAILQ_FOREACH(se, &savevm_handlers, entry) { QTAILQ_FOREACH(se, &savevm_handlers, entry) {
int len; int len;
if (se->save_live_state == NULL) if (!se->ops || !se->ops->save_live_setup) {
continue; continue;
}
if (se->ops && se->ops->is_active) {
if (!se->ops->is_active(se->opaque)) {
continue;
}
}
/* Section type */ /* Section type */
qemu_put_byte(f, QEMU_VM_SECTION_START); qemu_put_byte(f, QEMU_VM_SECTION_START);
qemu_put_be32(f, se->section_id); qemu_put_be32(f, se->section_id);
@ -1596,7 +1593,7 @@ int qemu_savevm_state_begin(QEMUFile *f,
qemu_put_be32(f, se->instance_id); qemu_put_be32(f, se->instance_id);
qemu_put_be32(f, se->version_id); qemu_put_be32(f, se->version_id);
ret = se->save_live_state(f, QEMU_VM_SECTION_START, se->opaque); ret = se->ops->save_live_setup(f, se->opaque);
if (ret < 0) { if (ret < 0) {
qemu_savevm_state_cancel(f); qemu_savevm_state_cancel(f);
return ret; return ret;
@ -1623,9 +1620,14 @@ int qemu_savevm_state_iterate(QEMUFile *f)
int ret = 1; int ret = 1;
QTAILQ_FOREACH(se, &savevm_handlers, entry) { QTAILQ_FOREACH(se, &savevm_handlers, entry) {
if (se->save_live_state == NULL) if (!se->ops || !se->ops->save_live_iterate) {
continue; continue;
}
if (se->ops && se->ops->is_active) {
if (!se->ops->is_active(se->opaque)) {
continue;
}
}
if (qemu_file_rate_limit(f)) { if (qemu_file_rate_limit(f)) {
return 0; return 0;
} }
@ -1634,7 +1636,7 @@ int qemu_savevm_state_iterate(QEMUFile *f)
qemu_put_byte(f, QEMU_VM_SECTION_PART); qemu_put_byte(f, QEMU_VM_SECTION_PART);
qemu_put_be32(f, se->section_id); qemu_put_be32(f, se->section_id);
ret = se->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque); ret = se->ops->save_live_iterate(f, se->opaque);
trace_savevm_section_end(se->section_id); trace_savevm_section_end(se->section_id);
if (ret <= 0) { if (ret <= 0) {
@ -1663,15 +1665,20 @@ int qemu_savevm_state_complete(QEMUFile *f)
cpu_synchronize_all_states(); cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_handlers, entry) { QTAILQ_FOREACH(se, &savevm_handlers, entry) {
if (se->save_live_state == NULL) if (!se->ops || !se->ops->save_live_complete) {
continue; continue;
}
if (se->ops && se->ops->is_active) {
if (!se->ops->is_active(se->opaque)) {
continue;
}
}
trace_savevm_section_start(); trace_savevm_section_start();
/* Section type */ /* Section type */
qemu_put_byte(f, QEMU_VM_SECTION_END); qemu_put_byte(f, QEMU_VM_SECTION_END);
qemu_put_be32(f, se->section_id); qemu_put_be32(f, se->section_id);
ret = se->save_live_state(f, QEMU_VM_SECTION_END, se->opaque); ret = se->ops->save_live_complete(f, se->opaque);
trace_savevm_section_end(se->section_id); trace_savevm_section_end(se->section_id);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
@ -1681,9 +1688,9 @@ int qemu_savevm_state_complete(QEMUFile *f)
QTAILQ_FOREACH(se, &savevm_handlers, entry) { QTAILQ_FOREACH(se, &savevm_handlers, entry) {
int len; int len;
if (se->save_state == NULL && se->vmsd == NULL) if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
continue; continue;
}
trace_savevm_section_start(); trace_savevm_section_start();
/* Section type */ /* Section type */
qemu_put_byte(f, QEMU_VM_SECTION_FULL); qemu_put_byte(f, QEMU_VM_SECTION_FULL);
@ -1711,8 +1718,8 @@ void qemu_savevm_state_cancel(QEMUFile *f)
SaveStateEntry *se; SaveStateEntry *se;
QTAILQ_FOREACH(se, &savevm_handlers, entry) { QTAILQ_FOREACH(se, &savevm_handlers, entry) {
if (se->save_live_state) { if (se->ops && se->ops->cancel) {
se->save_live_state(f, -1, se->opaque); se->ops->cancel(se->opaque);
} }
} }
} }
@ -1765,7 +1772,7 @@ static int qemu_save_device_state(QEMUFile *f)
if (se->is_ram) { if (se->is_ram) {
continue; continue;
} }
if (se->save_state == NULL && se->vmsd == NULL) { if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
continue; continue;
} }

3
vl.c
View file

@ -3437,8 +3437,7 @@ int main(int argc, char **argv, char **envp)
default_drive(default_sdcard, snapshot, machine->use_scsi, default_drive(default_sdcard, snapshot, machine->use_scsi,
IF_SD, 0, SD_OPTS); IF_SD, 0, SD_OPTS);
register_savevm_live(NULL, "ram", 0, 4, NULL, ram_save_live, NULL, register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
ram_load, NULL);
if (nb_numa_nodes > 0) { if (nb_numa_nodes > 0) {
int i; int i;

View file

@ -26,11 +26,20 @@
#ifndef QEMU_VMSTATE_H #ifndef QEMU_VMSTATE_H
#define QEMU_VMSTATE_H 1 #define QEMU_VMSTATE_H 1
typedef void SaveSetParamsHandler(const MigrationParams *params, void * opaque);
typedef void SaveStateHandler(QEMUFile *f, void *opaque); typedef void SaveStateHandler(QEMUFile *f, void *opaque);
typedef int SaveLiveStateHandler(QEMUFile *f, int stage, void *opaque);
typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id); typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
typedef struct SaveVMHandlers {
void (*set_params)(const MigrationParams *params, void * opaque);
SaveStateHandler *save_state;
int (*save_live_setup)(QEMUFile *f, void *opaque);
int (*save_live_iterate)(QEMUFile *f, void *opaque);
int (*save_live_complete)(QEMUFile *f, void *opaque);
void (*cancel)(void *opaque);
LoadStateHandler *load_state;
bool (*is_active)(void *opaque);
} SaveVMHandlers;
int register_savevm(DeviceState *dev, int register_savevm(DeviceState *dev,
const char *idstr, const char *idstr,
int instance_id, int instance_id,
@ -43,10 +52,7 @@ int register_savevm_live(DeviceState *dev,
const char *idstr, const char *idstr,
int instance_id, int instance_id,
int version_id, int version_id,
SaveSetParamsHandler *set_params, SaveVMHandlers *ops,
SaveLiveStateHandler *save_live_state,
SaveStateHandler *save_state,
LoadStateHandler *load_state,
void *opaque); void *opaque);
void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque); void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);