migrate/ram: remove "ram_bulk_stage" and "fpo_enabled"

The bulk stage is kind of weird: migration_bitmap_find_dirty() will
indicate a dirty page, however, ram_save_host_page() will never save it, as
migration_bitmap_clear_dirty() detects that it is not dirty.

We already fill the bitmap in ram_list_init_bitmaps() with ones, marking
everything dirty - it didn't used to be that way, which is why we needed
an explicit first bulk stage.

Let's simplify: make the bitmap the single source of thuth. Explicitly
handle the "xbzrle_enabled after first round" case.

Regarding XBZRLE (implicitly handled via "ram_bulk_stage = false" right
now), there is now a slight change in behavior:
- Colo: When starting, it will be disabled (was implicitly enabled)
  until the first round actually finishes.
- Free page hinting: When starting, XBZRLE will be disabled (was implicitly
  enabled) until the first round actually finished.
- Snapshots: When starting, XBZRLE will be disabled. We essentially only
  do a single run, so I guess it will never actually get disabled.

Postcopy seems to indirectly disable it in ram_save_page(), so there
shouldn't be really any change.

Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Juan Quintela <quintela@redhat.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Andrey Gruzdev <andrey.gruzdev@virtuozzo.com>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210216105039.40680-1-david@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
David Hildenbrand 2021-02-16 11:50:39 +01:00 committed by Dr. David Alan Gilbert
parent dab59ce031
commit 1a37352277
4 changed files with 18 additions and 68 deletions

View file

@ -663,9 +663,6 @@ virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data)
}
switch (pnd->reason) {
case PRECOPY_NOTIFY_SETUP:
precopy_enable_free_page_optimization();
break;
case PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC:
virtio_balloon_free_page_stop(dev);
break;
@ -685,6 +682,7 @@ virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data)
*/
virtio_balloon_free_page_done(dev);
break;
case PRECOPY_NOTIFY_SETUP:
case PRECOPY_NOTIFY_COMPLETE:
break;
default:

View file

@ -902,9 +902,6 @@ static int virtio_mem_precopy_notify(NotifierWithReturn *n, void *data)
PrecopyNotifyData *pnd = data;
switch (pnd->reason) {
case PRECOPY_NOTIFY_SETUP:
precopy_enable_free_page_optimization();
break;
case PRECOPY_NOTIFY_AFTER_BITMAP_SYNC:
virtio_mem_precopy_exclude_unplugged(vmem);
break;

View file

@ -37,7 +37,6 @@ void precopy_infrastructure_init(void);
void precopy_add_notifier(NotifierWithReturn *n);
void precopy_remove_notifier(NotifierWithReturn *n);
int precopy_notify(PrecopyNotifyReason reason, Error **errp);
void precopy_enable_free_page_optimization(void);
void ram_mig_init(void);
void qemu_guest_free_page_hint(void *addr, size_t len);

View file

@ -311,10 +311,6 @@ struct RAMState {
ram_addr_t last_page;
/* last ram version we have seen */
uint32_t last_version;
/* We are in the first round */
bool ram_bulk_stage;
/* The free page optimization is enabled */
bool fpo_enabled;
/* How many times we have dirty too many pages */
int dirty_rate_high_cnt;
/* these variables are used for bitmap sync */
@ -330,6 +326,8 @@ struct RAMState {
uint64_t xbzrle_pages_prev;
/* Amount of xbzrle encoded bytes since the beginning of the period */
uint64_t xbzrle_bytes_prev;
/* Start using XBZRLE (e.g., after the first round). */
bool xbzrle_enabled;
/* compression statistics since the beginning of the period */
/* amount of count that no free thread to compress data */
@ -383,15 +381,6 @@ int precopy_notify(PrecopyNotifyReason reason, Error **errp)
return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
}
void precopy_enable_free_page_optimization(void)
{
if (!ram_state) {
return;
}
ram_state->fpo_enabled = true;
}
uint64_t ram_bytes_remaining(void)
{
return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
@ -664,7 +653,7 @@ static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
*/
static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
{
if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
if (!rs->xbzrle_enabled) {
return;
}
@ -792,23 +781,12 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
{
unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
unsigned long *bitmap = rb->bmap;
unsigned long next;
if (ramblock_is_ignored(rb)) {
return size;
}
/*
* When the free page optimization is enabled, we need to check the bitmap
* to send the non-free pages rather than all the pages in the bulk stage.
*/
if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
next = start + 1;
} else {
next = find_next_bit(bitmap, size, start);
}
return next;
return find_next_bit(bitmap, size, start);
}
static inline bool migration_bitmap_clear_dirty(RAMState *rs,
@ -1185,8 +1163,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
trace_ram_save_page(block->idstr, (uint64_t)offset, p);
XBZRLE_cache_lock();
if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
migrate_use_xbzrle()) {
if (rs->xbzrle_enabled && !migration_in_postcopy()) {
pages = save_xbzrle_page(rs, &p, current_addr, block,
offset, last_stage);
if (!last_stage) {
@ -1386,7 +1363,10 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
/* Flag that we've looped */
pss->complete_round = true;
rs->ram_bulk_stage = false;
/* After the first round, enable XBZRLE. */
if (migrate_use_xbzrle()) {
rs->xbzrle_enabled = true;
}
}
/* Didn't find anything this time, but try again on the new block */
*again = true;
@ -1800,14 +1780,6 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
}
if (block) {
/*
* As soon as we start servicing pages out of order, then we have
* to kill the bulk stage, since the bulk stage assumes
* in (migration_bitmap_find_and_reset_dirty) that every page is
* dirty, that's no longer true.
*/
rs->ram_bulk_stage = false;
/*
* We want the background search to continue from the queued page
* since the guest is likely to want other pages near to the page
@ -1920,15 +1892,15 @@ static bool save_page_use_compression(RAMState *rs)
}
/*
* If xbzrle is on, stop using the data compression after first
* round of migration even if compression is enabled. In theory,
* xbzrle can do better than compression.
* If xbzrle is enabled (e.g., after first round of migration), stop
* using the data compression. In theory, xbzrle can do better than
* compression.
*/
if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
return true;
if (rs->xbzrle_enabled) {
return false;
}
return false;
return true;
}
/*
@ -2235,8 +2207,7 @@ static void ram_state_reset(RAMState *rs)
rs->last_sent_block = NULL;
rs->last_page = 0;
rs->last_version = ram_list.version;
rs->ram_bulk_stage = true;
rs->fpo_enabled = false;
rs->xbzrle_enabled = false;
}
#define MAX_WAIT 50 /* ms, half buffered_file limit */
@ -2720,15 +2691,7 @@ static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
/* This may not be aligned with current bitmaps. Recalculate. */
rs->migration_dirty_pages = pages;
rs->last_seen_block = NULL;
rs->last_sent_block = NULL;
rs->last_page = 0;
rs->last_version = ram_list.version;
/*
* Disable the bulk stage, otherwise we'll resend the whole RAM no
* matter what we have sent.
*/
rs->ram_bulk_stage = false;
ram_state_reset(rs);
/* Update RAMState cache of output QEMUFile */
rs->f = out;
@ -3345,16 +3308,9 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
}
}
/*
* we must set ram_bulk_stage to false, otherwise in
* migation_bitmap_find_dirty the bitmap will be unused and
* all the pages in ram cache wil be flushed to the ram of
* secondary VM.
*/
static void colo_init_ram_state(void)
{
ram_state_init(&ram_state);
ram_state->ram_bulk_stage = false;
}
/*