Change the method to calculate dirty-pages-rate

In function cpu_physical_memory_sync_dirty_bitmap, file
include/exec/ram_addr.h:

if (src[idx][offset]) {
    unsigned long bits = atomic_xchg(&src[idx][offset], 0);
    unsigned long new_dirty;
    new_dirty = ~dest[k];
    dest[k] |= bits;
    new_dirty &= bits;
    num_dirty += ctpopl(new_dirty);
}

After these codes executed, only the pages not dirtied in bitmap(dest),
but dirtied in dirty_memory[DIRTY_MEMORY_MIGRATION] will be calculated.
For example:
When ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION] = 0b00001111,
and atomic_rcu_read(&migration_bitmap_rcu)->bmap = 0b00000011,
the new_dirty will be 0b00001100, and this function will return 2 but not
4 which is expected.
the dirty pages in dirty_memory[DIRTY_MEMORY_MIGRATION] are all new,
so these should be calculated also.

Signed-off-by: Chao Fan <fanc.fnst@cn.fujitsu.com>
Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Chao Fan 2017-03-14 09:55:07 +08:00 committed by Juan Quintela
parent 1883ff34b5
commit 1ffb5dfd35
2 changed files with 9 additions and 8 deletions

View file

@ -355,7 +355,8 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
static inline static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest, uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
ram_addr_t start, ram_addr_t start,
ram_addr_t length) ram_addr_t length,
int64_t *real_dirty_pages)
{ {
ram_addr_t addr; ram_addr_t addr;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
@ -379,6 +380,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
if (src[idx][offset]) { if (src[idx][offset]) {
unsigned long bits = atomic_xchg(&src[idx][offset], 0); unsigned long bits = atomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty; unsigned long new_dirty;
*real_dirty_pages += ctpopl(bits);
new_dirty = ~dest[k]; new_dirty = ~dest[k];
dest[k] |= bits; dest[k] |= bits;
new_dirty &= bits; new_dirty &= bits;
@ -398,6 +400,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
start + addr, start + addr,
TARGET_PAGE_SIZE, TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) { DIRTY_MEMORY_MIGRATION)) {
*real_dirty_pages += 1;
long k = (start + addr) >> TARGET_PAGE_BITS; long k = (start + addr) >> TARGET_PAGE_BITS;
if (!test_and_set_bit(k, dest)) { if (!test_and_set_bit(k, dest)) {
num_dirty++; num_dirty++;

View file

@ -576,18 +576,18 @@ static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
return ret; return ret;
} }
static int64_t num_dirty_pages_period;
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
{ {
unsigned long *bitmap; unsigned long *bitmap;
bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
migration_dirty_pages += migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap,
cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length); start, length, &num_dirty_pages_period);
} }
/* Fix me: there are too many global variables used in migration process. */ /* Fix me: there are too many global variables used in migration process. */
static int64_t start_time; static int64_t start_time;
static int64_t bytes_xfer_prev; static int64_t bytes_xfer_prev;
static int64_t num_dirty_pages_period;
static uint64_t xbzrle_cache_miss_prev; static uint64_t xbzrle_cache_miss_prev;
static uint64_t iterations_prev; static uint64_t iterations_prev;
@ -620,7 +620,6 @@ uint64_t ram_pagesize_summary(void)
static void migration_bitmap_sync(void) static void migration_bitmap_sync(void)
{ {
RAMBlock *block; RAMBlock *block;
uint64_t num_dirty_pages_init = migration_dirty_pages;
MigrationState *s = migrate_get_current(); MigrationState *s = migrate_get_current();
int64_t end_time; int64_t end_time;
int64_t bytes_xfer_now; int64_t bytes_xfer_now;
@ -646,9 +645,8 @@ static void migration_bitmap_sync(void)
rcu_read_unlock(); rcu_read_unlock();
qemu_mutex_unlock(&migration_bitmap_mutex); qemu_mutex_unlock(&migration_bitmap_mutex);
trace_migration_bitmap_sync_end(migration_dirty_pages trace_migration_bitmap_sync_end(num_dirty_pages_period);
- num_dirty_pages_init);
num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
/* more than 1 second = 1000 millisecons */ /* more than 1 second = 1000 millisecons */