-----BEGIN PGP SIGNATURE-----

iQIcBAABAgAGBQJaqD6PAAoJEH3vgQaq/DkO9FIP/3pAW3xJUDGYsONiebX1IbhA
 VpoQCcjks3cHD18AUoVHufayJBUVfed1LhYPP8xoDuSRmKs1xU1O9FknxMQaL+Dw
 kbliBY7GjN8A2EcCjW+ZwyNT/KpjyXXwuZ2PSnOSSiN3JK6wrLCzeZyKyOYewLCS
 u9fKscnqWkg+awbCfDlVs92AaBAKoOP9loOq6e2J/jVY8HSDGb2owRnsxaWg8gJ8
 J9BlnXENQ14jEwickD3sluPfWkhu9xh7cCocH8cfgXL5veGUELz0Ugx4RHcsAF9Q
 SVDg/EhRRN11cvOkLnlggETaLbGtEE64AL4HhjxzCLraHsnEazPDwFgetB9mOhhF
 Nqu8HuGcVvRgn89au89mxAvTSWX9KFq4oF8Vi+FZZHkLilRx6NJnMpUpd9zkSJDq
 yjR2/BV0A9Ep1gvWX/rhpPrN5dALYHcaxoiSB497Yj4SI2ZSyzfrneteYdPv4EEc
 3CSJ3l6NCGAE2dNXuVZTVqHyXOSl7mJQQmT53dtsSNipCMEsVr0mOx3DPNY26LIc
 DUdnX6JOyZPU0wzOj8xjFNV72/gBEkqVZ5p9UJ+lrIYwOsTobpzfDtYquu4asda8
 IN44mcbRCZRFIiZZOGEdnwf34vIpQKMiZAtszAaan9KXwTXV9LbipaomBEN88vUD
 IgI5XsZTfiD2uIjnREWv
 =ISfR
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging

# gpg: Signature made Tue 13 Mar 2018 21:11:43 GMT
# gpg:                using RSA key 7DEF8106AAFC390E
# gpg: Good signature from "John Snow (John Huston) <jsnow@redhat.com>"
# Primary key fingerprint: FAEB 9711 A12C F475 812F  18F2 88A9 064D 1835 61EB
#      Subkey fingerprint: F9B7 ABDB BCAC DF95 BE76  CBD0 7DEF 8106 AAFC 390E

* remotes/jnsnow/tags/bitmaps-pull-request:
  iotests: add dirty bitmap postcopy test
  iotests: add dirty bitmap migration test
  migration: add postcopy migration of dirty bitmaps
  migration: allow qmp command migrate-start-postcopy for any postcopy
  migration: add is_active_iterate handler
  migration/qemu-file: add qemu_put_counted_string()
  migration: include migrate_dirty_bitmaps in migrate_postcopy
  qapi: add dirty-bitmaps migration capability
  migration: introduce postcopy-only pending
  dirty-bitmap: add locked state
  block/dirty-bitmap: add _locked version of bdrv_reclaim_dirty_bitmap
  block/dirty-bitmap: fix locking in bdrv_reclaim_dirty_bitmap
  block/dirty-bitmap: add bdrv_dirty_bitmap_enable_successor()

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-03-16 14:15:18 +00:00
commit 9cc7d0cf6a
26 changed files with 1277 additions and 66 deletions

View file

@ -40,6 +40,8 @@ struct BdrvDirtyBitmap {
QemuMutex *mutex; QemuMutex *mutex;
HBitmap *bitmap; /* Dirty bitmap implementation */ HBitmap *bitmap; /* Dirty bitmap implementation */
HBitmap *meta; /* Meta dirty bitmap */ HBitmap *meta; /* Meta dirty bitmap */
bool qmp_locked; /* Bitmap is locked, it can't be modified
through QMP */
BdrvDirtyBitmap *successor; /* Anonymous child; implies frozen status */ BdrvDirtyBitmap *successor; /* Anonymous child; implies frozen status */
char *name; /* Optional non-empty unique ID */ char *name; /* Optional non-empty unique ID */
int64_t size; /* Size of the bitmap, in bytes */ int64_t size; /* Size of the bitmap, in bytes */
@ -183,6 +185,18 @@ bool bdrv_dirty_bitmap_frozen(BdrvDirtyBitmap *bitmap)
return bitmap->successor; return bitmap->successor;
} }
void bdrv_dirty_bitmap_set_qmp_locked(BdrvDirtyBitmap *bitmap, bool qmp_locked)
{
qemu_mutex_lock(bitmap->mutex);
bitmap->qmp_locked = qmp_locked;
qemu_mutex_unlock(bitmap->mutex);
}
bool bdrv_dirty_bitmap_qmp_locked(BdrvDirtyBitmap *bitmap)
{
return bitmap->qmp_locked;
}
/* Called with BQL taken. */ /* Called with BQL taken. */
bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap) bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap)
{ {
@ -194,6 +208,8 @@ DirtyBitmapStatus bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap)
{ {
if (bdrv_dirty_bitmap_frozen(bitmap)) { if (bdrv_dirty_bitmap_frozen(bitmap)) {
return DIRTY_BITMAP_STATUS_FROZEN; return DIRTY_BITMAP_STATUS_FROZEN;
} else if (bdrv_dirty_bitmap_qmp_locked(bitmap)) {
return DIRTY_BITMAP_STATUS_LOCKED;
} else if (!bdrv_dirty_bitmap_enabled(bitmap)) { } else if (!bdrv_dirty_bitmap_enabled(bitmap)) {
return DIRTY_BITMAP_STATUS_DISABLED; return DIRTY_BITMAP_STATUS_DISABLED;
} else { } else {
@ -234,6 +250,59 @@ int bdrv_dirty_bitmap_create_successor(BlockDriverState *bs,
return 0; return 0;
} }
/* Called with BQL taken. */
void bdrv_dirty_bitmap_enable_successor(BdrvDirtyBitmap *bitmap)
{
qemu_mutex_lock(bitmap->mutex);
bdrv_enable_dirty_bitmap(bitmap->successor);
qemu_mutex_unlock(bitmap->mutex);
}
/* Called within bdrv_dirty_bitmap_lock..unlock */
static void bdrv_do_release_matching_dirty_bitmap_locked(
BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
bool (*cond)(BdrvDirtyBitmap *bitmap))
{
BdrvDirtyBitmap *bm, *next;
QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
if ((!bitmap || bm == bitmap) && (!cond || cond(bm))) {
assert(!bm->active_iterators);
assert(!bdrv_dirty_bitmap_frozen(bm));
assert(!bm->meta);
QLIST_REMOVE(bm, list);
hbitmap_free(bm->bitmap);
g_free(bm->name);
g_free(bm);
if (bitmap) {
return;
}
}
}
if (bitmap) {
abort();
}
}
/* Called with BQL taken. */
static void bdrv_do_release_matching_dirty_bitmap(
BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
bool (*cond)(BdrvDirtyBitmap *bitmap))
{
bdrv_dirty_bitmaps_lock(bs);
bdrv_do_release_matching_dirty_bitmap_locked(bs, bitmap, cond);
bdrv_dirty_bitmaps_unlock(bs);
}
/* Called within bdrv_dirty_bitmap_lock..unlock */
static void bdrv_release_dirty_bitmap_locked(BlockDriverState *bs,
BdrvDirtyBitmap *bitmap)
{
bdrv_do_release_matching_dirty_bitmap_locked(bs, bitmap, NULL);
}
/** /**
* For a bitmap with a successor, yield our name to the successor, * For a bitmap with a successor, yield our name to the successor,
* delete the old bitmap, and return a handle to the new bitmap. * delete the old bitmap, and return a handle to the new bitmap.
@ -267,11 +336,11 @@ BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BlockDriverState *bs,
* In cases of failure where we can no longer safely delete the parent, * In cases of failure where we can no longer safely delete the parent,
* we may wish to re-join the parent and child/successor. * we may wish to re-join the parent and child/successor.
* The merged parent will be un-frozen, but not explicitly re-enabled. * The merged parent will be un-frozen, but not explicitly re-enabled.
* Called with BQL taken. * Called within bdrv_dirty_bitmap_lock..unlock and with BQL taken.
*/ */
BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BlockDriverState *bs,
BdrvDirtyBitmap *parent, BdrvDirtyBitmap *parent,
Error **errp) Error **errp)
{ {
BdrvDirtyBitmap *successor = parent->successor; BdrvDirtyBitmap *successor = parent->successor;
@ -284,12 +353,26 @@ BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs,
error_setg(errp, "Merging of parent and successor bitmap failed"); error_setg(errp, "Merging of parent and successor bitmap failed");
return NULL; return NULL;
} }
bdrv_release_dirty_bitmap(bs, successor); bdrv_release_dirty_bitmap_locked(bs, successor);
parent->successor = NULL; parent->successor = NULL;
return parent; return parent;
} }
/* Called with BQL taken. */
BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs,
BdrvDirtyBitmap *parent,
Error **errp)
{
BdrvDirtyBitmap *ret;
qemu_mutex_lock(parent->mutex);
ret = bdrv_reclaim_dirty_bitmap_locked(bs, parent, errp);
qemu_mutex_unlock(parent->mutex);
return ret;
}
/** /**
* Truncates _all_ bitmaps attached to a BDS. * Truncates _all_ bitmaps attached to a BDS.
* Called with BQL taken. * Called with BQL taken.
@ -313,36 +396,6 @@ static bool bdrv_dirty_bitmap_has_name(BdrvDirtyBitmap *bitmap)
return !!bdrv_dirty_bitmap_name(bitmap); return !!bdrv_dirty_bitmap_name(bitmap);
} }
/* Called with BQL taken. */
static void bdrv_do_release_matching_dirty_bitmap(
BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
bool (*cond)(BdrvDirtyBitmap *bitmap))
{
BdrvDirtyBitmap *bm, *next;
bdrv_dirty_bitmaps_lock(bs);
QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
if ((!bitmap || bm == bitmap) && (!cond || cond(bm))) {
assert(!bm->active_iterators);
assert(!bdrv_dirty_bitmap_frozen(bm));
assert(!bm->meta);
QLIST_REMOVE(bm, list);
hbitmap_free(bm->bitmap);
g_free(bm->name);
g_free(bm);
if (bitmap) {
goto out;
}
}
}
if (bitmap) {
abort();
}
out:
bdrv_dirty_bitmaps_unlock(bs);
}
/* Called with BQL taken. */ /* Called with BQL taken. */
void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
{ {

View file

@ -2118,6 +2118,9 @@ static void block_dirty_bitmap_clear_prepare(BlkActionState *common,
if (bdrv_dirty_bitmap_frozen(state->bitmap)) { if (bdrv_dirty_bitmap_frozen(state->bitmap)) {
error_setg(errp, "Cannot modify a frozen bitmap"); error_setg(errp, "Cannot modify a frozen bitmap");
return; return;
} else if (bdrv_dirty_bitmap_qmp_locked(state->bitmap)) {
error_setg(errp, "Cannot modify a locked bitmap");
return;
} else if (!bdrv_dirty_bitmap_enabled(state->bitmap)) { } else if (!bdrv_dirty_bitmap_enabled(state->bitmap)) {
error_setg(errp, "Cannot clear a disabled bitmap"); error_setg(errp, "Cannot clear a disabled bitmap");
return; return;
@ -2862,6 +2865,11 @@ void qmp_block_dirty_bitmap_remove(const char *node, const char *name,
"Bitmap '%s' is currently frozen and cannot be removed", "Bitmap '%s' is currently frozen and cannot be removed",
name); name);
return; return;
} else if (bdrv_dirty_bitmap_qmp_locked(bitmap)) {
error_setg(errp,
"Bitmap '%s' is currently locked and cannot be removed",
name);
return;
} }
if (bdrv_dirty_bitmap_get_persistance(bitmap)) { if (bdrv_dirty_bitmap_get_persistance(bitmap)) {
@ -2896,6 +2904,11 @@ void qmp_block_dirty_bitmap_clear(const char *node, const char *name,
"Bitmap '%s' is currently frozen and cannot be modified", "Bitmap '%s' is currently frozen and cannot be modified",
name); name);
return; return;
} else if (bdrv_dirty_bitmap_qmp_locked(bitmap)) {
error_setg(errp,
"Bitmap '%s' is currently locked and cannot be modified",
name);
return;
} else if (!bdrv_dirty_bitmap_enabled(bitmap)) { } else if (!bdrv_dirty_bitmap_enabled(bitmap)) {
error_setg(errp, error_setg(errp,
"Bitmap '%s' is currently disabled and cannot be cleared", "Bitmap '%s' is currently disabled and cannot be cleared",
@ -3370,6 +3383,12 @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
bdrv_unref(target_bs); bdrv_unref(target_bs);
goto out; goto out;
} }
if (bdrv_dirty_bitmap_qmp_locked(bmap)) {
error_setg(errp,
"Bitmap '%s' is currently locked and cannot be used for "
"backup", backup->bitmap);
goto out;
}
} }
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed, job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,

View file

@ -183,15 +183,16 @@ static int cmma_save_setup(QEMUFile *f, void *opaque)
} }
static void cmma_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, static void cmma_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
uint64_t *non_postcopiable_pending, uint64_t *res_precopy_only,
uint64_t *postcopiable_pending) uint64_t *res_compatible,
uint64_t *res_postcopy_only)
{ {
S390StAttribState *sas = S390_STATTRIB(opaque); S390StAttribState *sas = S390_STATTRIB(opaque);
S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas);
long long res = sac->get_dirtycount(sas); long long res = sac->get_dirtycount(sas);
if (res >= 0) { if (res >= 0) {
*non_postcopiable_pending += res; *res_precopy_only += res;
} }
} }

View file

@ -21,6 +21,7 @@ BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BlockDriverState *bs,
BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs,
BdrvDirtyBitmap *bitmap, BdrvDirtyBitmap *bitmap,
Error **errp); Error **errp);
void bdrv_dirty_bitmap_enable_successor(BdrvDirtyBitmap *bitmap);
BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs,
const char *name); const char *name);
void bdrv_dirty_bitmap_make_anon(BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_make_anon(BdrvDirtyBitmap *bitmap);
@ -68,6 +69,8 @@ void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_set_readonly(BdrvDirtyBitmap *bitmap, bool value); void bdrv_dirty_bitmap_set_readonly(BdrvDirtyBitmap *bitmap, bool value);
void bdrv_dirty_bitmap_set_persistance(BdrvDirtyBitmap *bitmap, void bdrv_dirty_bitmap_set_persistance(BdrvDirtyBitmap *bitmap,
bool persistent); bool persistent);
void bdrv_dirty_bitmap_set_qmp_locked(BdrvDirtyBitmap *bitmap, bool qmp_locked);
/* Functions that require manual locking. */ /* Functions that require manual locking. */
void bdrv_dirty_bitmap_lock(BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_lock(BdrvDirtyBitmap *bitmap);
@ -87,10 +90,14 @@ bool bdrv_dirty_bitmap_readonly(const BdrvDirtyBitmap *bitmap);
bool bdrv_has_readonly_bitmaps(BlockDriverState *bs); bool bdrv_has_readonly_bitmaps(BlockDriverState *bs);
bool bdrv_dirty_bitmap_get_autoload(const BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_get_autoload(const BdrvDirtyBitmap *bitmap);
bool bdrv_dirty_bitmap_get_persistance(BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_get_persistance(BdrvDirtyBitmap *bitmap);
bool bdrv_dirty_bitmap_qmp_locked(BdrvDirtyBitmap *bitmap);
bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs); bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs);
BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs, BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs,
BdrvDirtyBitmap *bitmap); BdrvDirtyBitmap *bitmap);
char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp); char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp);
int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start); int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start);
BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BlockDriverState *bs,
BdrvDirtyBitmap *bitmap,
Error **errp);
#endif #endif

View file

@ -56,4 +56,7 @@ bool migration_has_failed(MigrationState *);
bool migration_in_postcopy_after_devices(MigrationState *); bool migration_in_postcopy_after_devices(MigrationState *);
void migration_global_dump(Monitor *mon); void migration_global_dump(Monitor *mon);
/* migration/block-dirty-bitmap.c */
void dirty_bitmap_mig_init(void);
#endif #endif

View file

@ -26,6 +26,15 @@ typedef struct SaveVMHandlers {
bool (*is_active)(void *opaque); bool (*is_active)(void *opaque);
bool (*has_postcopy)(void *opaque); bool (*has_postcopy)(void *opaque);
/* is_active_iterate
* If it is not NULL then qemu_savevm_state_iterate will skip iteration if
* it returns false. For example, it is needed for only-postcopy-states,
* which needs to be handled by qemu_savevm_state_setup and
* qemu_savevm_state_pending, but do not need iterations until not in
* postcopy stage.
*/
bool (*is_active_iterate)(void *opaque);
/* This runs outside the iothread lock in the migration case, and /* This runs outside the iothread lock in the migration case, and
* within the lock in the savevm case. The callback had better only * within the lock in the savevm case. The callback had better only
* use data that is local to the migration thread or protected * use data that is local to the migration thread or protected
@ -37,8 +46,21 @@ typedef struct SaveVMHandlers {
int (*save_setup)(QEMUFile *f, void *opaque); int (*save_setup)(QEMUFile *f, void *opaque);
void (*save_live_pending)(QEMUFile *f, void *opaque, void (*save_live_pending)(QEMUFile *f, void *opaque,
uint64_t threshold_size, uint64_t threshold_size,
uint64_t *non_postcopiable_pending, uint64_t *res_precopy_only,
uint64_t *postcopiable_pending); uint64_t *res_compatible,
uint64_t *res_postcopy_only);
/* Note for save_live_pending:
* - res_precopy_only is for data which must be migrated in precopy phase
* or in stopped state, in other words - before target vm start
* - res_compatible is for data which may be migrated in any phase
* - res_postcopy_only is for data which must be migrated in postcopy phase
* or in stopped state, in other words - after source vm stop
*
* Sum of res_postcopy_only, res_compatible and res_postcopy_only is the
* whole amount of pending data.
*/
LoadStateHandler *load_state; LoadStateHandler *load_state;
int (*load_setup)(QEMUFile *f, void *opaque); int (*load_setup)(QEMUFile *f, void *opaque);
int (*load_cleanup)(void *opaque); int (*load_cleanup)(void *opaque);

View file

@ -6,6 +6,7 @@ common-obj-y += qemu-file.o global_state.o
common-obj-y += qemu-file-channel.o common-obj-y += qemu-file-channel.o
common-obj-y += xbzrle.o postcopy-ram.o common-obj-y += xbzrle.o postcopy-ram.o
common-obj-y += qjson.o common-obj-y += qjson.o
common-obj-y += block-dirty-bitmap.o
common-obj-$(CONFIG_RDMA) += rdma.o common-obj-$(CONFIG_RDMA) += rdma.o

View file

@ -0,0 +1,746 @@
/*
* Block dirty bitmap postcopy migration
*
* Copyright IBM, Corp. 2009
* Copyright (c) 2016-2017 Virtuozzo International GmbH. All rights reserved.
*
* Authors:
* Liran Schour <lirans@il.ibm.com>
* Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
* This file is derived from migration/block.c, so it's author and IBM copyright
* are here, although content is quite different.
*
* Contributions after 2012-01-13 are licensed under the terms of the
* GNU GPL, version 2 or (at your option) any later version.
*
* ***
*
* Here postcopy migration of dirty bitmaps is realized. Only QMP-addressable
* bitmaps are migrated.
*
* Bitmap migration implies creating bitmap with the same name and granularity
* in destination QEMU. If the bitmap with the same name (for the same node)
* already exists on destination an error will be generated.
*
* format of migration:
*
* # Header (shared for different chunk types)
* 1, 2 or 4 bytes: flags (see qemu_{put,put}_flags)
* [ 1 byte: node name size ] \ flags & DEVICE_NAME
* [ n bytes: node name ] /
* [ 1 byte: bitmap name size ] \ flags & BITMAP_NAME
* [ n bytes: bitmap name ] /
*
* # Start of bitmap migration (flags & START)
* header
* be64: granularity
* 1 byte: bitmap flags (corresponds to BdrvDirtyBitmap)
* bit 0 - bitmap is enabled
* bit 1 - bitmap is persistent
* bit 2 - bitmap is autoloading
* bits 3-7 - reserved, must be zero
*
* # Complete of bitmap migration (flags & COMPLETE)
* header
*
* # Data chunk of bitmap migration
* header
* be64: start sector
* be32: number of sectors
* [ be64: buffer size ] \ ! (flags & ZEROES)
* [ n bytes: buffer ] /
*
* The last chunk in stream should contain flags & EOS. The chunk may skip
* device and/or bitmap names, assuming them to be the same with the previous
* chunk.
*/
#include "qemu/osdep.h"
#include "block/block.h"
#include "block/block_int.h"
#include "sysemu/block-backend.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h"
#include "migration/misc.h"
#include "migration/migration.h"
#include "migration/qemu-file.h"
#include "migration/vmstate.h"
#include "migration/register.h"
#include "qemu/hbitmap.h"
#include "sysemu/sysemu.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "trace.h"
#define CHUNK_SIZE (1 << 10)
/* Flags occupy one, two or four bytes (Big Endian). The size is determined as
* follows:
* in first (most significant) byte bit 8 is clear --> one byte
* in first byte bit 8 is set --> two or four bytes, depending on second
* byte:
* | in second byte bit 8 is clear --> two bytes
* | in second byte bit 8 is set --> four bytes
*/
#define DIRTY_BITMAP_MIG_FLAG_EOS 0x01
#define DIRTY_BITMAP_MIG_FLAG_ZEROES 0x02
#define DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME 0x04
#define DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME 0x08
#define DIRTY_BITMAP_MIG_FLAG_START 0x10
#define DIRTY_BITMAP_MIG_FLAG_COMPLETE 0x20
#define DIRTY_BITMAP_MIG_FLAG_BITS 0x40
#define DIRTY_BITMAP_MIG_EXTRA_FLAGS 0x80
#define DIRTY_BITMAP_MIG_START_FLAG_ENABLED 0x01
#define DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT 0x02
/* 0x04 was "AUTOLOAD" flags on elder versions, no it is ignored */
#define DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK 0xf8
typedef struct DirtyBitmapMigBitmapState {
/* Written during setup phase. */
BlockDriverState *bs;
const char *node_name;
BdrvDirtyBitmap *bitmap;
uint64_t total_sectors;
uint64_t sectors_per_chunk;
QSIMPLEQ_ENTRY(DirtyBitmapMigBitmapState) entry;
uint8_t flags;
/* For bulk phase. */
bool bulk_completed;
uint64_t cur_sector;
} DirtyBitmapMigBitmapState;
typedef struct DirtyBitmapMigState {
QSIMPLEQ_HEAD(dbms_list, DirtyBitmapMigBitmapState) dbms_list;
bool bulk_completed;
bool no_bitmaps;
/* for send_bitmap_bits() */
BlockDriverState *prev_bs;
BdrvDirtyBitmap *prev_bitmap;
} DirtyBitmapMigState;
typedef struct DirtyBitmapLoadState {
uint32_t flags;
char node_name[256];
char bitmap_name[256];
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
} DirtyBitmapLoadState;
static DirtyBitmapMigState dirty_bitmap_mig_state;
typedef struct DirtyBitmapLoadBitmapState {
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
bool migrated;
} DirtyBitmapLoadBitmapState;
static GSList *enabled_bitmaps;
QemuMutex finish_lock;
void init_dirty_bitmap_incoming_migration(void)
{
qemu_mutex_init(&finish_lock);
}
static uint32_t qemu_get_bitmap_flags(QEMUFile *f)
{
uint8_t flags = qemu_get_byte(f);
if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
flags = flags << 8 | qemu_get_byte(f);
if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
flags = flags << 16 | qemu_get_be16(f);
}
}
return flags;
}
static void qemu_put_bitmap_flags(QEMUFile *f, uint32_t flags)
{
/* The code currently do not send flags more than one byte */
assert(!(flags & (0xffffff00 | DIRTY_BITMAP_MIG_EXTRA_FLAGS)));
qemu_put_byte(f, flags);
}
static void send_bitmap_header(QEMUFile *f, DirtyBitmapMigBitmapState *dbms,
uint32_t additional_flags)
{
BlockDriverState *bs = dbms->bs;
BdrvDirtyBitmap *bitmap = dbms->bitmap;
uint32_t flags = additional_flags;
trace_send_bitmap_header_enter();
if (bs != dirty_bitmap_mig_state.prev_bs) {
dirty_bitmap_mig_state.prev_bs = bs;
flags |= DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME;
}
if (bitmap != dirty_bitmap_mig_state.prev_bitmap) {
dirty_bitmap_mig_state.prev_bitmap = bitmap;
flags |= DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME;
}
qemu_put_bitmap_flags(f, flags);
if (flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
qemu_put_counted_string(f, dbms->node_name);
}
if (flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
qemu_put_counted_string(f, bdrv_dirty_bitmap_name(bitmap));
}
}
static void send_bitmap_start(QEMUFile *f, DirtyBitmapMigBitmapState *dbms)
{
send_bitmap_header(f, dbms, DIRTY_BITMAP_MIG_FLAG_START);
qemu_put_be32(f, bdrv_dirty_bitmap_granularity(dbms->bitmap));
qemu_put_byte(f, dbms->flags);
}
static void send_bitmap_complete(QEMUFile *f, DirtyBitmapMigBitmapState *dbms)
{
send_bitmap_header(f, dbms, DIRTY_BITMAP_MIG_FLAG_COMPLETE);
}
static void send_bitmap_bits(QEMUFile *f, DirtyBitmapMigBitmapState *dbms,
uint64_t start_sector, uint32_t nr_sectors)
{
/* align for buffer_is_zero() */
uint64_t align = 4 * sizeof(long);
uint64_t unaligned_size =
bdrv_dirty_bitmap_serialization_size(
dbms->bitmap, start_sector << BDRV_SECTOR_BITS,
(uint64_t)nr_sectors << BDRV_SECTOR_BITS);
uint64_t buf_size = QEMU_ALIGN_UP(unaligned_size, align);
uint8_t *buf = g_malloc0(buf_size);
uint32_t flags = DIRTY_BITMAP_MIG_FLAG_BITS;
bdrv_dirty_bitmap_serialize_part(
dbms->bitmap, buf, start_sector << BDRV_SECTOR_BITS,
(uint64_t)nr_sectors << BDRV_SECTOR_BITS);
if (buffer_is_zero(buf, buf_size)) {
g_free(buf);
buf = NULL;
flags |= DIRTY_BITMAP_MIG_FLAG_ZEROES;
}
trace_send_bitmap_bits(flags, start_sector, nr_sectors, buf_size);
send_bitmap_header(f, dbms, flags);
qemu_put_be64(f, start_sector);
qemu_put_be32(f, nr_sectors);
/* if a block is zero we need to flush here since the network
* bandwidth is now a lot higher than the storage device bandwidth.
* thus if we queue zero blocks we slow down the migration. */
if (flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
qemu_fflush(f);
} else {
qemu_put_be64(f, buf_size);
qemu_put_buffer(f, buf, buf_size);
}
g_free(buf);
}
/* Called with iothread lock taken. */
static void dirty_bitmap_mig_cleanup(void)
{
DirtyBitmapMigBitmapState *dbms;
while ((dbms = QSIMPLEQ_FIRST(&dirty_bitmap_mig_state.dbms_list)) != NULL) {
QSIMPLEQ_REMOVE_HEAD(&dirty_bitmap_mig_state.dbms_list, entry);
bdrv_dirty_bitmap_set_qmp_locked(dbms->bitmap, false);
bdrv_unref(dbms->bs);
g_free(dbms);
}
}
/* Called with iothread lock taken. */
static int init_dirty_bitmap_migration(void)
{
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
DirtyBitmapMigBitmapState *dbms;
BdrvNextIterator it;
dirty_bitmap_mig_state.bulk_completed = false;
dirty_bitmap_mig_state.prev_bs = NULL;
dirty_bitmap_mig_state.prev_bitmap = NULL;
dirty_bitmap_mig_state.no_bitmaps = false;
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
const char *drive_name = bdrv_get_device_or_node_name(bs);
/* skip automatically inserted nodes */
while (bs && bs->drv && bs->implicit) {
bs = backing_bs(bs);
}
for (bitmap = bdrv_dirty_bitmap_next(bs, NULL); bitmap;
bitmap = bdrv_dirty_bitmap_next(bs, bitmap))
{
if (!bdrv_dirty_bitmap_name(bitmap)) {
continue;
}
if (drive_name == NULL) {
error_report("Found bitmap '%s' in unnamed node %p. It can't "
"be migrated", bdrv_dirty_bitmap_name(bitmap), bs);
goto fail;
}
if (bdrv_dirty_bitmap_frozen(bitmap)) {
error_report("Can't migrate frozen dirty bitmap: '%s",
bdrv_dirty_bitmap_name(bitmap));
goto fail;
}
if (bdrv_dirty_bitmap_qmp_locked(bitmap)) {
error_report("Can't migrate locked dirty bitmap: '%s",
bdrv_dirty_bitmap_name(bitmap));
goto fail;
}
bdrv_ref(bs);
bdrv_dirty_bitmap_set_qmp_locked(bitmap, true);
dbms = g_new0(DirtyBitmapMigBitmapState, 1);
dbms->bs = bs;
dbms->node_name = drive_name;
dbms->bitmap = bitmap;
dbms->total_sectors = bdrv_nb_sectors(bs);
dbms->sectors_per_chunk = CHUNK_SIZE * 8 *
bdrv_dirty_bitmap_granularity(bitmap) >> BDRV_SECTOR_BITS;
if (bdrv_dirty_bitmap_enabled(bitmap)) {
dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_ENABLED;
}
if (bdrv_dirty_bitmap_get_persistance(bitmap)) {
dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT;
}
QSIMPLEQ_INSERT_TAIL(&dirty_bitmap_mig_state.dbms_list,
dbms, entry);
}
}
/* unset persistance here, to not roll back it */
QSIMPLEQ_FOREACH(dbms, &dirty_bitmap_mig_state.dbms_list, entry) {
bdrv_dirty_bitmap_set_persistance(dbms->bitmap, false);
}
if (QSIMPLEQ_EMPTY(&dirty_bitmap_mig_state.dbms_list)) {
dirty_bitmap_mig_state.no_bitmaps = true;
}
return 0;
fail:
dirty_bitmap_mig_cleanup();
return -1;
}
/* Called with no lock taken. */
static void bulk_phase_send_chunk(QEMUFile *f, DirtyBitmapMigBitmapState *dbms)
{
uint32_t nr_sectors = MIN(dbms->total_sectors - dbms->cur_sector,
dbms->sectors_per_chunk);
send_bitmap_bits(f, dbms, dbms->cur_sector, nr_sectors);
dbms->cur_sector += nr_sectors;
if (dbms->cur_sector >= dbms->total_sectors) {
dbms->bulk_completed = true;
}
}
/* Called with no lock taken. */
static void bulk_phase(QEMUFile *f, bool limit)
{
DirtyBitmapMigBitmapState *dbms;
QSIMPLEQ_FOREACH(dbms, &dirty_bitmap_mig_state.dbms_list, entry) {
while (!dbms->bulk_completed) {
bulk_phase_send_chunk(f, dbms);
if (limit && qemu_file_rate_limit(f)) {
return;
}
}
}
dirty_bitmap_mig_state.bulk_completed = true;
}
/* for SaveVMHandlers */
static void dirty_bitmap_save_cleanup(void *opaque)
{
dirty_bitmap_mig_cleanup();
}
static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
{
trace_dirty_bitmap_save_iterate(migration_in_postcopy());
if (migration_in_postcopy() && !dirty_bitmap_mig_state.bulk_completed) {
bulk_phase(f, true);
}
qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
return dirty_bitmap_mig_state.bulk_completed;
}
/* Called with iothread lock taken. */
static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
{
DirtyBitmapMigBitmapState *dbms;
trace_dirty_bitmap_save_complete_enter();
if (!dirty_bitmap_mig_state.bulk_completed) {
bulk_phase(f, false);
}
QSIMPLEQ_FOREACH(dbms, &dirty_bitmap_mig_state.dbms_list, entry) {
send_bitmap_complete(f, dbms);
}
qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
trace_dirty_bitmap_save_complete_finish();
dirty_bitmap_mig_cleanup();
return 0;
}
static void dirty_bitmap_save_pending(QEMUFile *f, void *opaque,
uint64_t max_size,
uint64_t *res_precopy_only,
uint64_t *res_compatible,
uint64_t *res_postcopy_only)
{
DirtyBitmapMigBitmapState *dbms;
uint64_t pending = 0;
qemu_mutex_lock_iothread();
QSIMPLEQ_FOREACH(dbms, &dirty_bitmap_mig_state.dbms_list, entry) {
uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap);
uint64_t sectors = dbms->bulk_completed ? 0 :
dbms->total_sectors - dbms->cur_sector;
pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran);
}
qemu_mutex_unlock_iothread();
trace_dirty_bitmap_save_pending(pending, max_size);
*res_postcopy_only += pending;
}
/* First occurrence of this bitmap. It should be created if doesn't exist */
static int dirty_bitmap_load_start(QEMUFile *f, DirtyBitmapLoadState *s)
{
Error *local_err = NULL;
uint32_t granularity = qemu_get_be32(f);
uint8_t flags = qemu_get_byte(f);
if (s->bitmap) {
error_report("Bitmap with the same name ('%s') already exists on "
"destination", bdrv_dirty_bitmap_name(s->bitmap));
return -EINVAL;
} else {
s->bitmap = bdrv_create_dirty_bitmap(s->bs, granularity,
s->bitmap_name, &local_err);
if (!s->bitmap) {
error_report_err(local_err);
return -EINVAL;
}
}
if (flags & DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK) {
error_report("Unknown flags in migrated dirty bitmap header: %x",
flags);
return -EINVAL;
}
if (flags & DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT) {
bdrv_dirty_bitmap_set_persistance(s->bitmap, true);
}
bdrv_disable_dirty_bitmap(s->bitmap);
if (flags & DIRTY_BITMAP_MIG_START_FLAG_ENABLED) {
DirtyBitmapLoadBitmapState *b;
bdrv_dirty_bitmap_create_successor(s->bs, s->bitmap, &local_err);
if (local_err) {
error_report_err(local_err);
return -EINVAL;
}
b = g_new(DirtyBitmapLoadBitmapState, 1);
b->bs = s->bs;
b->bitmap = s->bitmap;
b->migrated = false;
enabled_bitmaps = g_slist_prepend(enabled_bitmaps, b);
}
return 0;
}
void dirty_bitmap_mig_before_vm_start(void)
{
GSList *item;
qemu_mutex_lock(&finish_lock);
for (item = enabled_bitmaps; item; item = g_slist_next(item)) {
DirtyBitmapLoadBitmapState *b = item->data;
if (b->migrated) {
bdrv_enable_dirty_bitmap(b->bitmap);
} else {
bdrv_dirty_bitmap_enable_successor(b->bitmap);
}
g_free(b);
}
g_slist_free(enabled_bitmaps);
enabled_bitmaps = NULL;
qemu_mutex_unlock(&finish_lock);
}
static void dirty_bitmap_load_complete(QEMUFile *f, DirtyBitmapLoadState *s)
{
GSList *item;
trace_dirty_bitmap_load_complete();
bdrv_dirty_bitmap_deserialize_finish(s->bitmap);
qemu_mutex_lock(&finish_lock);
for (item = enabled_bitmaps; item; item = g_slist_next(item)) {
DirtyBitmapLoadBitmapState *b = item->data;
if (b->bitmap == s->bitmap) {
b->migrated = true;
break;
}
}
if (bdrv_dirty_bitmap_frozen(s->bitmap)) {
bdrv_dirty_bitmap_lock(s->bitmap);
if (enabled_bitmaps == NULL) {
/* in postcopy */
bdrv_reclaim_dirty_bitmap_locked(s->bs, s->bitmap, &error_abort);
bdrv_enable_dirty_bitmap(s->bitmap);
} else {
/* target not started, successor must be empty */
int64_t count = bdrv_get_dirty_count(s->bitmap);
BdrvDirtyBitmap *ret = bdrv_reclaim_dirty_bitmap_locked(s->bs,
s->bitmap,
NULL);
/* bdrv_reclaim_dirty_bitmap can fail only on no successor (it
* must be) or on merge fail, but merge can't fail when second
* bitmap is empty
*/
assert(ret == s->bitmap &&
count == bdrv_get_dirty_count(s->bitmap));
}
bdrv_dirty_bitmap_unlock(s->bitmap);
}
qemu_mutex_unlock(&finish_lock);
}
static int dirty_bitmap_load_bits(QEMUFile *f, DirtyBitmapLoadState *s)
{
uint64_t first_byte = qemu_get_be64(f) << BDRV_SECTOR_BITS;
uint64_t nr_bytes = (uint64_t)qemu_get_be32(f) << BDRV_SECTOR_BITS;
trace_dirty_bitmap_load_bits_enter(first_byte >> BDRV_SECTOR_BITS,
nr_bytes >> BDRV_SECTOR_BITS);
if (s->flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
trace_dirty_bitmap_load_bits_zeroes();
bdrv_dirty_bitmap_deserialize_zeroes(s->bitmap, first_byte, nr_bytes,
false);
} else {
size_t ret;
uint8_t *buf;
uint64_t buf_size = qemu_get_be64(f);
uint64_t needed_size =
bdrv_dirty_bitmap_serialization_size(s->bitmap,
first_byte, nr_bytes);
if (needed_size > buf_size ||
buf_size > QEMU_ALIGN_UP(needed_size, 4 * sizeof(long))
/* Here used same alignment as in send_bitmap_bits */
) {
error_report("Migrated bitmap granularity doesn't "
"match the destination bitmap '%s' granularity",
bdrv_dirty_bitmap_name(s->bitmap));
return -EINVAL;
}
buf = g_malloc(buf_size);
ret = qemu_get_buffer(f, buf, buf_size);
if (ret != buf_size) {
error_report("Failed to read bitmap bits");
return -EIO;
}
bdrv_dirty_bitmap_deserialize_part(s->bitmap, buf, first_byte, nr_bytes,
false);
g_free(buf);
}
return 0;
}
static int dirty_bitmap_load_header(QEMUFile *f, DirtyBitmapLoadState *s)
{
Error *local_err = NULL;
bool nothing;
s->flags = qemu_get_bitmap_flags(f);
trace_dirty_bitmap_load_header(s->flags);
nothing = s->flags == (s->flags & DIRTY_BITMAP_MIG_FLAG_EOS);
if (s->flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
if (!qemu_get_counted_string(f, s->node_name)) {
error_report("Unable to read node name string");
return -EINVAL;
}
s->bs = bdrv_lookup_bs(s->node_name, s->node_name, &local_err);
if (!s->bs) {
error_report_err(local_err);
return -EINVAL;
}
} else if (!s->bs && !nothing) {
error_report("Error: block device name is not set");
return -EINVAL;
}
if (s->flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
if (!qemu_get_counted_string(f, s->bitmap_name)) {
error_report("Unable to read bitmap name string");
return -EINVAL;
}
s->bitmap = bdrv_find_dirty_bitmap(s->bs, s->bitmap_name);
/* bitmap may be NULL here, it wouldn't be an error if it is the
* first occurrence of the bitmap */
if (!s->bitmap && !(s->flags & DIRTY_BITMAP_MIG_FLAG_START)) {
error_report("Error: unknown dirty bitmap "
"'%s' for block device '%s'",
s->bitmap_name, s->node_name);
return -EINVAL;
}
} else if (!s->bitmap && !nothing) {
error_report("Error: block device name is not set");
return -EINVAL;
}
return 0;
}
static int dirty_bitmap_load(QEMUFile *f, void *opaque, int version_id)
{
static DirtyBitmapLoadState s;
int ret = 0;
trace_dirty_bitmap_load_enter();
if (version_id != 1) {
return -EINVAL;
}
do {
ret = dirty_bitmap_load_header(f, &s);
if (s.flags & DIRTY_BITMAP_MIG_FLAG_START) {
ret = dirty_bitmap_load_start(f, &s);
} else if (s.flags & DIRTY_BITMAP_MIG_FLAG_COMPLETE) {
dirty_bitmap_load_complete(f, &s);
} else if (s.flags & DIRTY_BITMAP_MIG_FLAG_BITS) {
ret = dirty_bitmap_load_bits(f, &s);
}
if (!ret) {
ret = qemu_file_get_error(f);
}
if (ret) {
return ret;
}
} while (!(s.flags & DIRTY_BITMAP_MIG_FLAG_EOS));
trace_dirty_bitmap_load_success();
return 0;
}
static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque)
{
DirtyBitmapMigBitmapState *dbms = NULL;
if (init_dirty_bitmap_migration() < 0) {
return -1;
}
QSIMPLEQ_FOREACH(dbms, &dirty_bitmap_mig_state.dbms_list, entry) {
send_bitmap_start(f, dbms);
}
qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
return 0;
}
static bool dirty_bitmap_is_active(void *opaque)
{
return migrate_dirty_bitmaps() && !dirty_bitmap_mig_state.no_bitmaps;
}
static bool dirty_bitmap_is_active_iterate(void *opaque)
{
return dirty_bitmap_is_active(opaque) && !runstate_is_running();
}
static bool dirty_bitmap_has_postcopy(void *opaque)
{
return true;
}
static SaveVMHandlers savevm_dirty_bitmap_handlers = {
.save_setup = dirty_bitmap_save_setup,
.save_live_complete_postcopy = dirty_bitmap_save_complete,
.save_live_complete_precopy = dirty_bitmap_save_complete,
.has_postcopy = dirty_bitmap_has_postcopy,
.save_live_pending = dirty_bitmap_save_pending,
.save_live_iterate = dirty_bitmap_save_iterate,
.is_active_iterate = dirty_bitmap_is_active_iterate,
.load_state = dirty_bitmap_load,
.save_cleanup = dirty_bitmap_save_cleanup,
.is_active = dirty_bitmap_is_active,
};
void dirty_bitmap_mig_init(void)
{
QSIMPLEQ_INIT(&dirty_bitmap_mig_state.dbms_list);
register_savevm_live(NULL, "dirty-bitmap", 0, 1,
&savevm_dirty_bitmap_handlers,
&dirty_bitmap_mig_state);
}

View file

@ -864,8 +864,9 @@ static int block_save_complete(QEMUFile *f, void *opaque)
} }
static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
uint64_t *non_postcopiable_pending, uint64_t *res_precopy_only,
uint64_t *postcopiable_pending) uint64_t *res_compatible,
uint64_t *res_postcopy_only)
{ {
/* Estimate pending number of bytes to send */ /* Estimate pending number of bytes to send */
uint64_t pending; uint64_t pending;
@ -886,7 +887,7 @@ static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
DPRINTF("Enter save live pending %" PRIu64 "\n", pending); DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
/* We don't do postcopy */ /* We don't do postcopy */
*non_postcopiable_pending += pending; *res_precopy_only += pending;
} }
static int block_load(QEMUFile *f, void *opaque, int version_id) static int block_load(QEMUFile *f, void *opaque, int version_id)

View file

@ -157,6 +157,9 @@ MigrationIncomingState *migration_incoming_get_current(void)
memset(&mis_current, 0, sizeof(MigrationIncomingState)); memset(&mis_current, 0, sizeof(MigrationIncomingState));
qemu_mutex_init(&mis_current.rp_mutex); qemu_mutex_init(&mis_current.rp_mutex);
qemu_event_init(&mis_current.main_thread_load_event, false); qemu_event_init(&mis_current.main_thread_load_event, false);
init_dirty_bitmap_incoming_migration();
once = true; once = true;
} }
return &mis_current; return &mis_current;
@ -320,6 +323,8 @@ static void process_incoming_migration_bh(void *opaque)
state, we need to obey autostart. Any other state is set with state, we need to obey autostart. Any other state is set with
runstate_set. */ runstate_set. */
dirty_bitmap_mig_before_vm_start();
if (!global_state_received() || if (!global_state_received() ||
global_state_get_runstate() == RUN_STATE_RUNNING) { global_state_get_runstate() == RUN_STATE_RUNNING) {
if (autostart) { if (autostart) {
@ -1022,7 +1027,7 @@ void qmp_migrate_start_postcopy(Error **errp)
{ {
MigrationState *s = migrate_get_current(); MigrationState *s = migrate_get_current();
if (!migrate_postcopy_ram()) { if (!migrate_postcopy()) {
error_setg(errp, "Enable postcopy with migrate_set_capability before" error_setg(errp, "Enable postcopy with migrate_set_capability before"
" the start of migration"); " the start of migration");
return; return;
@ -1508,7 +1513,7 @@ bool migrate_postcopy_ram(void)
bool migrate_postcopy(void) bool migrate_postcopy(void)
{ {
return migrate_postcopy_ram(); return migrate_postcopy_ram() || migrate_dirty_bitmaps();
} }
bool migrate_auto_converge(void) bool migrate_auto_converge(void)
@ -1565,6 +1570,15 @@ int migrate_decompress_threads(void)
return s->parameters.decompress_threads; return s->parameters.decompress_threads;
} }
bool migrate_dirty_bitmaps(void)
{
MigrationState *s;
s = migrate_get_current();
return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
}
bool migrate_use_events(void) bool migrate_use_events(void)
{ {
MigrationState *s; MigrationState *s;
@ -2242,20 +2256,20 @@ typedef enum {
*/ */
static MigIterateState migration_iteration_run(MigrationState *s) static MigIterateState migration_iteration_run(MigrationState *s)
{ {
uint64_t pending_size, pend_post, pend_nonpost; uint64_t pending_size, pend_pre, pend_compat, pend_post;
bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
qemu_savevm_state_pending(s->to_dst_file, s->threshold_size, qemu_savevm_state_pending(s->to_dst_file, s->threshold_size, &pend_pre,
&pend_nonpost, &pend_post); &pend_compat, &pend_post);
pending_size = pend_nonpost + pend_post; pending_size = pend_pre + pend_compat + pend_post;
trace_migrate_pending(pending_size, s->threshold_size, trace_migrate_pending(pending_size, s->threshold_size,
pend_post, pend_nonpost); pend_pre, pend_compat, pend_post);
if (pending_size && pending_size >= s->threshold_size) { if (pending_size && pending_size >= s->threshold_size) {
/* Still a significant amount to transfer */ /* Still a significant amount to transfer */
if (migrate_postcopy() && !in_postcopy && if (migrate_postcopy() && !in_postcopy &&
pend_nonpost <= s->threshold_size && pend_pre <= s->threshold_size &&
atomic_read(&s->start_postcopy)) { atomic_read(&s->start_postcopy)) {
if (postcopy_start(s)) { if (postcopy_start(s)) {
error_report("%s: postcopy failed to start", __func__); error_report("%s: postcopy failed to start", __func__);

View file

@ -205,6 +205,7 @@ bool migrate_postcopy(void);
bool migrate_release_ram(void); bool migrate_release_ram(void);
bool migrate_postcopy_ram(void); bool migrate_postcopy_ram(void);
bool migrate_zero_blocks(void); bool migrate_zero_blocks(void);
bool migrate_dirty_bitmaps(void);
bool migrate_auto_converge(void); bool migrate_auto_converge(void);
bool migrate_use_multifd(void); bool migrate_use_multifd(void);
@ -234,4 +235,7 @@ void migrate_send_rp_pong(MigrationIncomingState *mis,
int migrate_send_rp_req_pages(MigrationIncomingState *mis, const char* rbname, int migrate_send_rp_req_pages(MigrationIncomingState *mis, const char* rbname,
ram_addr_t start, size_t len); ram_addr_t start, size_t len);
void dirty_bitmap_mig_before_vm_start(void);
void init_dirty_bitmap_incoming_migration(void);
#endif #endif

View file

@ -733,6 +733,19 @@ size_t qemu_get_counted_string(QEMUFile *f, char buf[256])
return res == len ? res : 0; return res == len ? res : 0;
} }
/*
* Put a string with one preceding byte containing its length. The length of
* the string should be less than 256.
*/
void qemu_put_counted_string(QEMUFile *f, const char *str)
{
size_t len = strlen(str);
assert(len < 256);
qemu_put_byte(f, len);
qemu_put_buffer(f, (const uint8_t *)str, len);
}
/* /*
* Set the blocking state of the QEMUFile. * Set the blocking state of the QEMUFile.
* Note: On some transports the OS only keeps a single blocking state for * Note: On some transports the OS only keeps a single blocking state for

View file

@ -174,4 +174,6 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
ram_addr_t offset, size_t size, ram_addr_t offset, size_t size,
uint64_t *bytes_sent); uint64_t *bytes_sent);
void qemu_put_counted_string(QEMUFile *f, const char *name);
#endif #endif

View file

@ -2370,8 +2370,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
} }
static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
uint64_t *non_postcopiable_pending, uint64_t *res_precopy_only,
uint64_t *postcopiable_pending) uint64_t *res_compatible,
uint64_t *res_postcopy_only)
{ {
RAMState **temp = opaque; RAMState **temp = opaque;
RAMState *rs = *temp; RAMState *rs = *temp;
@ -2391,9 +2392,9 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
if (migrate_postcopy_ram()) { if (migrate_postcopy_ram()) {
/* We can do postcopy, and all the data is postcopiable */ /* We can do postcopy, and all the data is postcopiable */
*postcopiable_pending += remaining_size; *res_compatible += remaining_size;
} else { } else {
*non_postcopiable_pending += remaining_size; *res_precopy_only += remaining_size;
} }
} }

View file

@ -1029,6 +1029,11 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
continue; continue;
} }
} }
if (se->ops && se->ops->is_active_iterate) {
if (!se->ops->is_active_iterate(se->opaque)) {
continue;
}
}
/* /*
* In the postcopy phase, any device that doesn't know how to * In the postcopy phase, any device that doesn't know how to
* do postcopy should have saved it's state in the _complete * do postcopy should have saved it's state in the _complete
@ -1221,13 +1226,15 @@ int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
* for units that can't do postcopy. * for units that can't do postcopy.
*/ */
void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size, void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size,
uint64_t *res_non_postcopiable, uint64_t *res_precopy_only,
uint64_t *res_postcopiable) uint64_t *res_compatible,
uint64_t *res_postcopy_only)
{ {
SaveStateEntry *se; SaveStateEntry *se;
*res_non_postcopiable = 0; *res_precopy_only = 0;
*res_postcopiable = 0; *res_compatible = 0;
*res_postcopy_only = 0;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
@ -1240,7 +1247,8 @@ void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size,
} }
} }
se->ops->save_live_pending(f, se->opaque, threshold_size, se->ops->save_live_pending(f, se->opaque, threshold_size,
res_non_postcopiable, res_postcopiable); res_precopy_only, res_compatible,
res_postcopy_only);
} }
} }
@ -1686,6 +1694,8 @@ static void loadvm_postcopy_handle_run_bh(void *opaque)
trace_loadvm_postcopy_handle_run_vmstart(); trace_loadvm_postcopy_handle_run_vmstart();
dirty_bitmap_mig_before_vm_start();
if (autostart) { if (autostart) {
/* Hold onto your hats, starting the CPU */ /* Hold onto your hats, starting the CPU */
vm_start(); vm_start();

View file

@ -38,8 +38,9 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f);
int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only, int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
bool inactivate_disks); bool inactivate_disks);
void qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size, void qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size,
uint64_t *res_non_postcopiable, uint64_t *res_precopy_only,
uint64_t *res_postcopiable); uint64_t *res_compatible,
uint64_t *res_postcopy_only);
void qemu_savevm_send_ping(QEMUFile *f, uint32_t value); void qemu_savevm_send_ping(QEMUFile *f, uint32_t value);
void qemu_savevm_send_open_return_path(QEMUFile *f); void qemu_savevm_send_open_return_path(QEMUFile *f);
int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len); int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len);

View file

@ -86,7 +86,7 @@ migrate_fd_cleanup(void) ""
migrate_fd_error(const char *error_desc) "error=%s" migrate_fd_error(const char *error_desc) "error=%s"
migrate_fd_cancel(void) "" migrate_fd_cancel(void) ""
migrate_handle_rp_req_pages(const char *rbname, size_t start, size_t len) "in %s at 0x%zx len 0x%zx" migrate_handle_rp_req_pages(const char *rbname, size_t start, size_t len) "in %s at 0x%zx len 0x%zx"
migrate_pending(uint64_t size, uint64_t max, uint64_t post, uint64_t nonpost) "pending size %" PRIu64 " max %" PRIu64 " (post=%" PRIu64 " nonpost=%" PRIu64 ")" migrate_pending(uint64_t size, uint64_t max, uint64_t pre, uint64_t compat, uint64_t post) "pending size %" PRIu64 " max %" PRIu64 " (pre = %" PRIu64 " compat=%" PRIu64 " post=%" PRIu64 ")"
migrate_send_rp_message(int msg_type, uint16_t len) "%d: len %d" migrate_send_rp_message(int msg_type, uint16_t len) "%d: len %d"
migration_completion_file_err(void) "" migration_completion_file_err(void) ""
migration_completion_postcopy_end(void) "" migration_completion_postcopy_end(void) ""
@ -227,3 +227,17 @@ colo_vm_state_change(const char *old, const char *new) "Change '%s' => '%s'"
colo_send_message(const char *msg) "Send '%s' message" colo_send_message(const char *msg) "Send '%s' message"
colo_receive_message(const char *msg) "Receive '%s' message" colo_receive_message(const char *msg) "Receive '%s' message"
colo_failover_set_state(const char *new_state) "new state %s" colo_failover_set_state(const char *new_state) "new state %s"
# migration/block-dirty-bitmap.c
send_bitmap_header_enter(void) ""
send_bitmap_bits(uint32_t flags, uint64_t start_sector, uint32_t nr_sectors, uint64_t data_size) "flags: 0x%x, start_sector: %" PRIu64 ", nr_sectors: %" PRIu32 ", data_size: %" PRIu64
dirty_bitmap_save_iterate(int in_postcopy) "in postcopy: %d"
dirty_bitmap_save_complete_enter(void) ""
dirty_bitmap_save_complete_finish(void) ""
dirty_bitmap_save_pending(uint64_t pending, uint64_t max_size) "pending %" PRIu64 " max: %" PRIu64
dirty_bitmap_load_complete(void) ""
dirty_bitmap_load_bits_enter(uint64_t first_sector, uint32_t nr_sectors) "chunk: %" PRIu64 " %" PRIu32
dirty_bitmap_load_bits_zeroes(void) ""
dirty_bitmap_load_header(uint32_t flags) "flags 0x%x"
dirty_bitmap_load_enter(void) ""
dirty_bitmap_load_success(void) ""

View file

@ -426,10 +426,13 @@
# @active: The bitmap is actively monitoring for new writes, and can be cleared, # @active: The bitmap is actively monitoring for new writes, and can be cleared,
# deleted, or used for backup operations. # deleted, or used for backup operations.
# #
# @locked: The bitmap is currently in-use by some operation and can not be
# cleared, deleted, or used for backup operations. (Since 2.12)
#
# Since: 2.4 # Since: 2.4
## ##
{ 'enum': 'DirtyBitmapStatus', { 'enum': 'DirtyBitmapStatus',
'data': ['active', 'disabled', 'frozen'] } 'data': ['active', 'disabled', 'frozen', 'locked'] }
## ##
# @BlockDirtyInfo: # @BlockDirtyInfo:

View file

@ -354,12 +354,16 @@
# #
# @x-multifd: Use more than one fd for migration (since 2.11) # @x-multifd: Use more than one fd for migration (since 2.11)
# #
# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
# (since 2.12)
#
# Since: 1.2 # Since: 1.2
## ##
{ 'enum': 'MigrationCapability', { 'enum': 'MigrationCapability',
'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram', 'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram',
'block', 'return-path', 'pause-before-switchover', 'x-multifd' ] } 'block', 'return-path', 'pause-before-switchover', 'x-multifd',
'dirty-bitmaps' ] }
## ##
# @MigrationCapabilityStatus: # @MigrationCapabilityStatus:

156
tests/qemu-iotests/169 Executable file
View file

@ -0,0 +1,156 @@
#!/usr/bin/env python
#
# Tests for dirty bitmaps migration.
#
# Copyright (c) 2016-2017 Virtuozzo International GmbH. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import iotests
import time
import itertools
import operator
import new
from iotests import qemu_img
disk_a = os.path.join(iotests.test_dir, 'disk_a')
disk_b = os.path.join(iotests.test_dir, 'disk_b')
size = '1M'
mig_file = os.path.join(iotests.test_dir, 'mig_file')
class TestDirtyBitmapMigration(iotests.QMPTestCase):
def tearDown(self):
self.vm_a.shutdown()
self.vm_b.shutdown()
os.remove(disk_a)
os.remove(disk_b)
os.remove(mig_file)
def setUp(self):
qemu_img('create', '-f', iotests.imgfmt, disk_a, size)
qemu_img('create', '-f', iotests.imgfmt, disk_b, size)
self.vm_a = iotests.VM(path_suffix='a').add_drive(disk_a)
self.vm_a.launch()
self.vm_b = iotests.VM(path_suffix='b')
self.vm_b.add_incoming("exec: cat '" + mig_file + "'")
def add_bitmap(self, vm, granularity, persistent):
params = {'node': 'drive0',
'name': 'bitmap0',
'granularity': granularity}
if persistent:
params['persistent'] = True
params['autoload'] = True
result = vm.qmp('block-dirty-bitmap-add', **params)
self.assert_qmp(result, 'return', {});
def get_bitmap_hash(self, vm):
result = vm.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap0')
return result['return']['sha256']
def check_bitmap(self, vm, sha256):
result = vm.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap0')
if sha256:
self.assert_qmp(result, 'return/sha256', sha256);
else:
self.assert_qmp(result, 'error/desc',
"Dirty bitmap 'bitmap0' not found");
def do_test_migration(self, persistent, migrate_bitmaps, online,
shared_storage):
granularity = 512
# regions = ((start, count), ...)
regions = ((0, 0x10000),
(0xf0000, 0x10000),
(0xa0201, 0x1000))
should_migrate = migrate_bitmaps or persistent and shared_storage
self.vm_b.add_drive(disk_a if shared_storage else disk_b)
if online:
os.mkfifo(mig_file)
self.vm_b.launch()
self.add_bitmap(self.vm_a, granularity, persistent)
for r in regions:
self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % r)
sha256 = self.get_bitmap_hash(self.vm_a)
if migrate_bitmaps:
capabilities = [{'capability': 'dirty-bitmaps', 'state': True}]
result = self.vm_a.qmp('migrate-set-capabilities',
capabilities=capabilities)
self.assert_qmp(result, 'return', {})
if online:
result = self.vm_b.qmp('migrate-set-capabilities',
capabilities=capabilities)
self.assert_qmp(result, 'return', {})
result = self.vm_a.qmp('migrate-set-capabilities',
capabilities=[{'capability': 'events',
'state': True}])
self.assert_qmp(result, 'return', {})
result = self.vm_a.qmp('migrate', uri='exec:cat>' + mig_file)
while True:
event = self.vm_a.event_wait('MIGRATION')
if event['data']['status'] == 'completed':
break
if not online:
self.vm_a.shutdown()
self.vm_b.launch()
# TODO enable bitmap capability for vm_b in this case
self.vm_b.event_wait("RESUME", timeout=10.0)
self.check_bitmap(self.vm_b, sha256 if should_migrate else False)
if should_migrate:
self.vm_b.shutdown()
self.vm_b.launch()
self.check_bitmap(self.vm_b, sha256 if persistent else False)
def inject_test_case(klass, name, method, *args, **kwargs):
mc = operator.methodcaller(method, *args, **kwargs)
setattr(klass, 'test_' + name, new.instancemethod(mc, None, klass))
for cmb in list(itertools.product((True, False), repeat=3)):
name = ('_' if cmb[0] else '_not_') + 'persistent_'
name += ('_' if cmb[1] else '_not_') + 'migbitmap_'
name += '_online' if cmb[2] else '_offline'
# TODO fix shared-storage bitmap migration and enable cases for it
args = list(cmb) + [False]
inject_test_case(TestDirtyBitmapMigration, name, 'do_test_migration',
*args)
if __name__ == '__main__':
iotests.main(supported_fmts=['qcow2'])

View file

@ -0,0 +1,5 @@
........
----------------------------------------------------------------------
Ran 8 tests
OK

118
tests/qemu-iotests/199 Executable file
View file

@ -0,0 +1,118 @@
#!/usr/bin/env python
#
# Tests for dirty bitmaps postcopy migration.
#
# Copyright (c) 2016-2017 Virtuozzo International GmbH. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import iotests
import time
from iotests import qemu_img
disk_a = os.path.join(iotests.test_dir, 'disk_a')
disk_b = os.path.join(iotests.test_dir, 'disk_b')
size = '256G'
fifo = os.path.join(iotests.test_dir, 'mig_fifo')
class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase):
def tearDown(self):
self.vm_a.shutdown()
self.vm_b.shutdown()
os.remove(disk_a)
os.remove(disk_b)
os.remove(fifo)
def setUp(self):
os.mkfifo(fifo)
qemu_img('create', '-f', iotests.imgfmt, disk_a, size)
qemu_img('create', '-f', iotests.imgfmt, disk_b, size)
self.vm_a = iotests.VM(path_suffix='a').add_drive(disk_a)
self.vm_b = iotests.VM(path_suffix='b').add_drive(disk_b)
self.vm_b.add_incoming("exec: cat '" + fifo + "'")
self.vm_a.launch()
self.vm_b.launch()
def test_postcopy(self):
write_size = 0x40000000
granularity = 512
chunk = 4096
result = self.vm_a.qmp('block-dirty-bitmap-add', node='drive0',
name='bitmap', granularity=granularity)
self.assert_qmp(result, 'return', {});
s = 0
while s < write_size:
self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % (s, chunk))
s += 0x10000
s = 0x8000
while s < write_size:
self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % (s, chunk))
s += 0x10000
result = self.vm_a.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap')
sha256 = result['return']['sha256']
result = self.vm_a.qmp('block-dirty-bitmap-clear', node='drive0',
name='bitmap')
self.assert_qmp(result, 'return', {});
s = 0
while s < write_size:
self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % (s, chunk))
s += 0x10000
bitmaps_cap = {'capability': 'dirty-bitmaps', 'state': True}
events_cap = {'capability': 'events', 'state': True}
result = self.vm_a.qmp('migrate-set-capabilities',
capabilities=[bitmaps_cap, events_cap])
self.assert_qmp(result, 'return', {})
result = self.vm_b.qmp('migrate-set-capabilities',
capabilities=[bitmaps_cap])
self.assert_qmp(result, 'return', {})
result = self.vm_a.qmp('migrate', uri='exec:cat>' + fifo)
self.assert_qmp(result, 'return', {})
result = self.vm_a.qmp('migrate-start-postcopy')
self.assert_qmp(result, 'return', {})
while True:
event = self.vm_a.event_wait('MIGRATION')
if event['data']['status'] == 'completed':
break
s = 0x8000
while s < write_size:
self.vm_b.hmp_qemu_io('drive0', 'write %d %d' % (s, chunk))
s += 0x10000
result = self.vm_b.qmp('query-block');
while len(result['return'][0]['dirty-bitmaps']) > 1:
time.sleep(2)
result = self.vm_b.qmp('query-block');
result = self.vm_b.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap')
self.assert_qmp(result, 'return/sha256', sha256);
if __name__ == '__main__':
iotests.main(supported_fmts=['qcow2'], supported_cache_modes=['none'])

View file

@ -0,0 +1,5 @@
.
----------------------------------------------------------------------
Ran 1 tests
OK

View file

@ -169,6 +169,7 @@
162 auto quick 162 auto quick
163 rw auto quick 163 rw auto quick
165 rw auto quick 165 rw auto quick
169 rw auto quick
170 rw auto quick 170 rw auto quick
171 rw auto quick 171 rw auto quick
172 auto 172 auto
@ -196,6 +197,7 @@
196 rw auto quick 196 rw auto quick
197 rw auto quick 197 rw auto quick
198 rw auto 198 rw auto
199 rw auto
200 rw auto 200 rw auto
201 rw auto migration 201 rw auto migration
202 rw auto quick 202 rw auto quick

View file

@ -537,6 +537,10 @@ def verify_platform(supported_oses=['linux']):
if True not in [sys.platform.startswith(x) for x in supported_oses]: if True not in [sys.platform.startswith(x) for x in supported_oses]:
notrun('not suitable for this OS: %s' % sys.platform) notrun('not suitable for this OS: %s' % sys.platform)
def verify_cache_mode(supported_cache_modes=[]):
if supported_cache_modes and (cachemode not in supported_cache_modes):
notrun('not suitable for this cache mode: %s' % cachemode)
def supports_quorum(): def supports_quorum():
return 'quorum' in qemu_img_pipe('--help') return 'quorum' in qemu_img_pipe('--help')
@ -545,7 +549,7 @@ def verify_quorum():
if not supports_quorum(): if not supports_quorum():
notrun('quorum support missing') notrun('quorum support missing')
def main(supported_fmts=[], supported_oses=['linux']): def main(supported_fmts=[], supported_oses=['linux'], supported_cache_modes=[]):
'''Run tests''' '''Run tests'''
global debug global debug
@ -562,6 +566,7 @@ def main(supported_fmts=[], supported_oses=['linux']):
verbosity = 1 verbosity = 1
verify_image_format(supported_fmts) verify_image_format(supported_fmts)
verify_platform(supported_oses) verify_platform(supported_oses)
verify_cache_mode(supported_cache_modes)
# We need to filter out the time taken from the output so that qemu-iotest # We need to filter out the time taken from the output so that qemu-iotest
# can reliably diff the results against master output. # can reliably diff the results against master output.

1
vl.c
View file

@ -4503,6 +4503,7 @@ int main(int argc, char **argv, char **envp)
blk_mig_init(); blk_mig_init();
ram_mig_init(); ram_mig_init();
dirty_bitmap_mig_init();
/* If the currently selected machine wishes to override the units-per-bus /* If the currently selected machine wishes to override the units-per-bus
* property of its default HBA interface type, do so now. */ * property of its default HBA interface type, do so now. */