memory: use atomic ops for setting dirty memory bits

Use set_bit_atomic() and bitmap_set_atomic() so that multiple threads
can dirty memory without race conditions.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <1417519399-3166-4-git-send-email-stefanha@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2014-12-02 11:23:16 +00:00 committed by Paolo Bonzini
parent 36546e5b80
commit d114875b9a

View file

@ -114,7 +114,7 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
unsigned client)
{
assert(client < DIRTY_MEMORY_NUM);
set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
@ -122,17 +122,18 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
uint8_t mask)
{
unsigned long end, page;
unsigned long **d = ram_list.dirty_memory;
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
}
xen_modified_memory(start, length);
}
@ -159,11 +160,12 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
unsigned long **d = ram_list.dirty_memory;
ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp;
ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp;
atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
if (tcg_enabled()) {
ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
}
}
}