migration/next for 20140204

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJS8QygAAoJEPSH7xhYctcjk/sQAOhogC2u84L/bNO2gSnibK73
 jbqvQGRUSYyUB2O6jmCtO6lQL7KKwZ/hgNuA3bMzIwomdw4nYYgJxV9HqJa4mSku
 H5S5TSeggTAc/Y4U1fxME7E8dsN6k0RvgThFasW7chGZxcLs7DiDd0Qd3V8zL3Ev
 QB7zkDzm2vK2vT6zzW74K6MPCF2x7jDzAnAVJtPXnnJfRQ1o3cCrd1aDbi/I9qAs
 4WACLTsyKrsN7yqMIRsPRmoWyoSm65WqpNY9RnqnfphkJezSCoKEV+Rr0QVGkOUb
 ZVFt8N72vtrs2eGWf0KfnzL80yWE+m7pnZ4oTW3kfHbUeofaEb4Np61cGJA/FZBx
 yfWEpo8xY4ilHQzgwi7m8oLtrOsbVBzwtdFgr+qgIpOOrUW/CWrIVjT6/y/jZG/F
 SWupDg6TEUP7brMtyuNAU5If5RMhksnB46O7Sx9Y90Wwh8w4qudfjCmwNeuU7pzW
 yEAHfpFIWK919Biq3wshKBJIGGojbP/gNSTHThKdDCttsWMJY152G0n1KqFQ2GYH
 xOAC0lIRxl5pn8QKmP8YWTiTpXDNQzVCXIfqd+nRzr8ZD8kMJ9MjPr1z4ZcirnF/
 NN7ZPPj9Qd5OZcXxP+phc03oDkfSQNLuclK+tVI6fxNuBu+5VcQcmfPUz3pdvgQ3
 bXRD6/ov3xU/zGmiMkQL
 =vMR+
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20140204-1' into staging

migration/next for 20140204

# gpg: Signature made Tue 04 Feb 2014 15:52:00 GMT using RSA key ID 5872D723
# gpg: Can't check signature: public key not found

* remotes/juanquintela/tags/migration/20140204-1:
  Don't abort on memory allocation error
  Don't abort on out of memory when creating page cache
  XBZRLE cache size should not be larger than guest memory size
  migration:fix free XBZRLE decoded_buf wrong
  Add check for cache size smaller than page size
  Set xbzrle buffers to NULL after freeing them to avoid double free errors
  exec: fix ram_list dirty map optimization
  vmstate: Make VMSTATE_STRUCT_POINTER take type, not ptr-to-type

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2014-02-08 13:12:50 +00:00
commit 3ea3bd6245
9 changed files with 92 additions and 37 deletions

View file

@ -164,20 +164,22 @@ static struct {
uint8_t *encoded_buf;
/* buffer for storing page content */
uint8_t *current_buf;
/* buffer used for XBZRLE decoding */
uint8_t *decoded_buf;
/* Cache for XBZRLE */
PageCache *cache;
} XBZRLE = {
.encoded_buf = NULL,
.current_buf = NULL,
.decoded_buf = NULL,
.cache = NULL,
};
/* buffer used for XBZRLE decoding */
static uint8_t *xbzrle_decoded_buf;
int64_t xbzrle_cache_resize(int64_t new_size)
{
if (new_size < TARGET_PAGE_SIZE) {
return -1;
}
if (XBZRLE.cache != NULL) {
return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
TARGET_PAGE_SIZE;
@ -282,7 +284,9 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
if (!cache_is_cached(XBZRLE.cache, current_addr)) {
if (!last_stage) {
cache_insert(XBZRLE.cache, current_addr, current_data);
if (cache_insert(XBZRLE.cache, current_addr, current_data) == -1) {
return -1;
}
}
acct_info.xbzrle_cache_miss++;
return -1;
@ -602,6 +606,12 @@ uint64_t ram_bytes_total(void)
return total;
}
void free_xbzrle_decoded_buf(void)
{
g_free(xbzrle_decoded_buf);
xbzrle_decoded_buf = NULL;
}
static void migration_end(void)
{
if (migration_bitmap) {
@ -615,8 +625,9 @@ static void migration_end(void)
g_free(XBZRLE.cache);
g_free(XBZRLE.encoded_buf);
g_free(XBZRLE.current_buf);
g_free(XBZRLE.decoded_buf);
XBZRLE.cache = NULL;
XBZRLE.encoded_buf = NULL;
XBZRLE.current_buf = NULL;
}
}
@ -655,8 +666,22 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
DPRINTF("Error creating cache\n");
return -1;
}
XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
/* We prefer not to abort if there is no memory */
XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
if (!XBZRLE.encoded_buf) {
DPRINTF("Error allocating encoded_buf\n");
return -1;
}
XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
if (!XBZRLE.current_buf) {
DPRINTF("Error allocating current_buf\n");
g_free(XBZRLE.encoded_buf);
XBZRLE.encoded_buf = NULL;
return -1;
}
acct_clear();
}
@ -807,8 +832,8 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
unsigned int xh_len;
int xh_flags;
if (!XBZRLE.decoded_buf) {
XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
if (!xbzrle_decoded_buf) {
xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
}
/* extract RLE header */
@ -825,10 +850,10 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
return -1;
}
/* load data and decode */
qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
/* decode RLE */
ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
ret = xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
TARGET_PAGE_SIZE);
if (ret == -1) {
fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");

View file

@ -1448,7 +1448,7 @@ static const VMStateDescription vmstate_pxa2xx_i2c = {
VMSTATE_UINT8(ibmr, PXA2xxI2CState),
VMSTATE_UINT8(data, PXA2xxI2CState),
VMSTATE_STRUCT_POINTER(slave, PXA2xxI2CState,
vmstate_pxa2xx_i2c_slave, PXA2xxI2CSlaveState *),
vmstate_pxa2xx_i2c_slave, PXA2xxI2CSlaveState),
VMSTATE_END_OF_LIST()
}
};

View file

@ -93,7 +93,8 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
(hpratio == 1)) {
long k;
long nr = BITS_TO_LONGS(pages);

View file

@ -27,14 +27,8 @@ void ptimer_stop(ptimer_state *s);
extern const VMStateDescription vmstate_ptimer;
#define VMSTATE_PTIMER(_field, _state) { \
.name = (stringify(_field)), \
.version_id = (1), \
.vmsd = &vmstate_ptimer, \
.size = sizeof(ptimer_state *), \
.flags = VMS_STRUCT|VMS_POINTER, \
.offset = vmstate_offset_pointer(_state, _field, ptimer_state), \
}
#define VMSTATE_PTIMER(_field, _state) \
VMSTATE_STRUCT_POINTER_V(_field, _state, 1, vmstate_ptimer, ptimer_state)
#define VMSTATE_PTIMER_ARRAY(_f, _s, _n) \
VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(_f, _s, _n, 0, \

View file

@ -109,6 +109,7 @@ MigrationState *migrate_get_current(void);
uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_transferred(void);
uint64_t ram_bytes_total(void);
void free_xbzrle_decoded_buf(void);
void acct_update_position(QEMUFile *f, size_t size, bool zero);

View file

@ -60,11 +60,13 @@ uint8_t *get_cached_data(const PageCache *cache, uint64_t addr);
* cache_insert: insert the page into the cache. the page cache
* will dup the data on insert. the previous value will be overwritten
*
* Returns -1 on error
*
* @cache pointer to the PageCache struct
* @addr: page address
* @pdata: pointer to the page
*/
void cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata);
int cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata);
/**
* cache_resize: resize the page cache. In case of size reduction the extra

View file

@ -314,9 +314,9 @@ extern const VMStateInfo vmstate_info_bitmap;
.name = (stringify(_field)), \
.version_id = (_version), \
.vmsd = &(_vmsd), \
.size = sizeof(_type), \
.size = sizeof(_type *), \
.flags = VMS_STRUCT|VMS_POINTER, \
.offset = vmstate_offset_value(_state, _field, _type), \
.offset = vmstate_offset_pointer(_state, _field, _type), \
}
#define VMSTATE_STRUCT_POINTER_TEST_V(_field, _state, _test, _version, _vmsd, _type) { \
@ -324,9 +324,9 @@ extern const VMStateInfo vmstate_info_bitmap;
.version_id = (_version), \
.field_exists = (_test), \
.vmsd = &(_vmsd), \
.size = sizeof(_type), \
.size = sizeof(_type *), \
.flags = VMS_STRUCT|VMS_POINTER, \
.offset = vmstate_offset_value(_state, _field, _type), \
.offset = vmstate_offset_pointer(_state, _field, _type), \
}
#define VMSTATE_ARRAY_OF_POINTER(_field, _state, _num, _version, _info, _type) {\

View file

@ -105,6 +105,7 @@ static void process_incoming_migration_co(void *opaque)
ret = qemu_loadvm_state(f);
qemu_fclose(f);
free_xbzrle_decoded_buf();
if (ret < 0) {
fprintf(stderr, "load of migration failed\n");
exit(EXIT_FAILURE);
@ -469,6 +470,7 @@ void qmp_migrate_cancel(Error **errp)
void qmp_migrate_set_cache_size(int64_t value, Error **errp)
{
MigrationState *s = migrate_get_current();
int64_t new_size;
/* Check for truncation */
if (value != (size_t)value) {
@ -477,7 +479,21 @@ void qmp_migrate_set_cache_size(int64_t value, Error **errp)
return;
}
s->xbzrle_cache_size = xbzrle_cache_resize(value);
/* Cache should not be larger than guest ram size */
if (value > ram_bytes_total()) {
error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
"exceeds guest ram size ");
return;
}
new_size = xbzrle_cache_resize(value);
if (new_size < 0) {
error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
"is smaller than page size");
return;
}
s->xbzrle_cache_size = new_size;
}
int64_t qmp_query_migrate_cache_size(Error **errp)

View file

@ -60,8 +60,12 @@ PageCache *cache_init(int64_t num_pages, unsigned int page_size)
return NULL;
}
cache = g_malloc(sizeof(*cache));
/* We prefer not to abort if there is no memory */
cache = g_try_malloc(sizeof(*cache));
if (!cache) {
DPRINTF("Failed to allocate cache\n");
return NULL;
}
/* round down to the nearest power of 2 */
if (!is_power_of_2(num_pages)) {
num_pages = pow2floor(num_pages);
@ -74,8 +78,14 @@ PageCache *cache_init(int64_t num_pages, unsigned int page_size)
DPRINTF("Setting cache buckets to %" PRId64 "\n", cache->max_num_items);
cache->page_cache = g_malloc((cache->max_num_items) *
sizeof(*cache->page_cache));
/* We prefer not to abort if there is no memory */
cache->page_cache = g_try_malloc((cache->max_num_items) *
sizeof(*cache->page_cache));
if (!cache->page_cache) {
DPRINTF("Failed to allocate cache->page_cache\n");
g_free(cache);
return NULL;
}
for (i = 0; i < cache->max_num_items; i++) {
cache->page_cache[i].it_data = NULL;
@ -140,7 +150,7 @@ uint8_t *get_cached_data(const PageCache *cache, uint64_t addr)
return cache_get_by_addr(cache, addr)->it_data;
}
void cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata)
int cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata)
{
CacheItem *it = NULL;
@ -151,16 +161,22 @@ void cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata)
/* actual update of entry */
it = cache_get_by_addr(cache, addr);
/* free old cached data if any */
g_free(it->it_data);
/* allocate page */
if (!it->it_data) {
it->it_data = g_try_malloc(cache->page_size);
if (!it->it_data) {
DPRINTF("Error allocating page\n");
return -1;
}
cache->num_items++;
}
it->it_data = g_memdup(pdata, cache->page_size);
memcpy(it->it_data, pdata, cache->page_size);
it->it_age = ++cache->max_item_age;
it->it_addr = addr;
return 0;
}
int64_t cache_resize(PageCache *cache, int64_t new_num_pages)