diff --git a/block/backup.c b/block/backup.c index 99e6bcc748..4a16a37229 100644 --- a/block/backup.c +++ b/block/backup.c @@ -40,11 +40,12 @@ typedef struct BackupBlockJob { BlockdevOnError on_target_error; CoRwlock flush_rwlock; uint64_t bytes_read; - unsigned long *done_bitmap; int64_t cluster_size; bool compress; NotifierWithReturn before_write; QLIST_HEAD(, CowRequest) inflight_reqs; + + HBitmap *copy_bitmap; } BackupBlockJob; /* See if in-flight requests overlap and wait for them to complete */ @@ -109,10 +110,11 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, cow_request_begin(&cow_request, job, start, end); for (; start < end; start += job->cluster_size) { - if (test_bit(start / job->cluster_size, job->done_bitmap)) { + if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) { trace_backup_do_cow_skip(job, start); continue; /* already copied */ } + hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1); trace_backup_do_cow_process(job, start); @@ -132,6 +134,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, if (error_is_read) { *error_is_read = true; } + hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); goto out; } @@ -148,11 +151,10 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, if (error_is_read) { *error_is_read = false; } + hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); goto out; } - set_bit(start / job->cluster_size, job->done_bitmap); - /* Publish progress, guest I/O counts as progress too. Note that the * offset field is an opaque progress value, it is not a disk offset. */ @@ -260,7 +262,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp) } len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size); - bitmap_zero(backup_job->done_bitmap, len); + hbitmap_set(backup_job->copy_bitmap, 0, len); } void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset, @@ -360,64 +362,68 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job) static int coroutine_fn backup_run_incremental(BackupBlockJob *job) { + int ret; bool error_is_read; - int ret = 0; - int clusters_per_iter; - uint32_t granularity; - int64_t offset; int64_t cluster; - int64_t end; - int64_t last_cluster = -1; + HBitmapIter hbi; + + hbitmap_iter_init(&hbi, job->copy_bitmap, 0); + while ((cluster = hbitmap_iter_next(&hbi)) != -1) { + do { + if (yield_and_check(job)) { + return 0; + } + ret = backup_do_cow(job, cluster * job->cluster_size, + job->cluster_size, &error_is_read, false); + if (ret < 0 && backup_error_action(job, error_is_read, -ret) == + BLOCK_ERROR_ACTION_REPORT) + { + return ret; + } + } while (ret < 0); + } + + return 0; +} + +/* init copy_bitmap from sync_bitmap */ +static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) +{ BdrvDirtyBitmapIter *dbi; + int64_t offset; + int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap), + job->cluster_size); - granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); - clusters_per_iter = MAX((granularity / job->cluster_size), 1); dbi = bdrv_dirty_iter_new(job->sync_bitmap); + while ((offset = bdrv_dirty_iter_next(dbi)) != -1) { + int64_t cluster = offset / job->cluster_size; + int64_t next_cluster; - /* Find the next dirty sector(s) */ - while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) { - cluster = offset / job->cluster_size; - - /* Fake progress updates for any clusters we skipped */ - if (cluster != last_cluster + 1) { - job->common.offset += ((cluster - last_cluster - 1) * - job->cluster_size); + offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap); + if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) { + hbitmap_set(job->copy_bitmap, cluster, end - cluster); + break; } - for (end = cluster + clusters_per_iter; cluster < end; cluster++) { - do { - if (yield_and_check(job)) { - goto out; - } - ret = backup_do_cow(job, cluster * job->cluster_size, - job->cluster_size, &error_is_read, - false); - if ((ret < 0) && - backup_error_action(job, error_is_read, -ret) == - BLOCK_ERROR_ACTION_REPORT) { - goto out; - } - } while (ret < 0); + offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset); + if (offset == -1) { + hbitmap_set(job->copy_bitmap, cluster, end - cluster); + break; } - /* If the bitmap granularity is smaller than the backup granularity, - * we need to advance the iterator pointer to the next cluster. */ - if (granularity < job->cluster_size) { - bdrv_set_dirty_iter(dbi, cluster * job->cluster_size); + next_cluster = DIV_ROUND_UP(offset, job->cluster_size); + hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster); + if (next_cluster >= end) { + break; } - last_cluster = cluster - 1; + bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); } - /* Play some final catchup with the progress meter */ - end = DIV_ROUND_UP(job->common.len, job->cluster_size); - if (last_cluster + 1 < end) { - job->common.offset += ((end - last_cluster - 1) * job->cluster_size); - } + job->common.offset = job->common.len - + hbitmap_count(job->copy_bitmap) * job->cluster_size; -out: bdrv_dirty_iter_free(dbi); - return ret; } static void coroutine_fn backup_run(void *opaque) @@ -425,19 +431,27 @@ static void coroutine_fn backup_run(void *opaque) BackupBlockJob *job = opaque; BackupCompleteData *data; BlockDriverState *bs = blk_bs(job->common.blk); - int64_t offset; + int64_t offset, nb_clusters; int ret = 0; QLIST_INIT(&job->inflight_reqs); qemu_co_rwlock_init(&job->flush_rwlock); - job->done_bitmap = bitmap_new(DIV_ROUND_UP(job->common.len, - job->cluster_size)); + nb_clusters = DIV_ROUND_UP(job->common.len, job->cluster_size); + job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); + if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { + backup_incremental_init_copy_bitmap(job); + } else { + hbitmap_set(job->copy_bitmap, 0, nb_clusters); + } + job->before_write.notify = backup_before_write_notify; bdrv_add_before_write_notifier(bs, &job->before_write); if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { + /* All bits are set in copy_bitmap to allow any cluster to be copied. + * This does not actually require them to be copied. */ while (!block_job_is_cancelled(&job->common)) { /* Yield until the job is cancelled. We just let our before_write * notify callback service CoW requests. */ @@ -512,7 +526,7 @@ static void coroutine_fn backup_run(void *opaque) /* wait until pending backup_do_cow() calls have completed */ qemu_co_rwlock_wrlock(&job->flush_rwlock); qemu_co_rwlock_unlock(&job->flush_rwlock); - g_free(job->done_bitmap); + hbitmap_free(job->copy_bitmap); data = g_malloc(sizeof(*data)); data->ret = ret; diff --git a/block/curl.c b/block/curl.c index 2a244e2439..35cf417f59 100644 --- a/block/curl.c +++ b/block/curl.c @@ -89,6 +89,8 @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle, struct BDRVCURLState; +static bool libcurl_initialized; + typedef struct CURLAIOCB { Coroutine *co; QEMUIOVector *qiov; @@ -686,14 +688,23 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, double d; const char *secretid; const char *protocol_delimiter; + int ret; - static int inited = 0; if (flags & BDRV_O_RDWR) { error_setg(errp, "curl block device does not support writes"); return -EROFS; } + if (!libcurl_initialized) { + ret = curl_global_init(CURL_GLOBAL_ALL); + if (ret) { + error_setg(errp, "libcurl initialization failed with %d", ret); + return -EIO; + } + libcurl_initialized = true; + } + qemu_mutex_init(&s->mutex); opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); @@ -772,11 +783,6 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, } } - if (!inited) { - curl_global_init(CURL_GLOBAL_ALL); - inited = 1; - } - DPRINTF("CURL: Opening %s\n", file); QSIMPLEQ_INIT(&s->free_state_waitq); s->aio_context = bdrv_get_aio_context(bs); @@ -851,6 +857,9 @@ out_noclean: qemu_mutex_destroy(&s->mutex); g_free(s->cookie); g_free(s->url); + g_free(s->username); + g_free(s->proxyusername); + g_free(s->proxypassword); qemu_opts_del(opts); return -EINVAL; } @@ -949,6 +958,9 @@ static void curl_close(BlockDriverState *bs) g_free(s->cookie); g_free(s->url); + g_free(s->username); + g_free(s->proxyusername); + g_free(s->proxypassword); } static int64_t curl_getlength(BlockDriverState *bs) diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index bd04e991b1..7879d13ddb 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -715,3 +715,8 @@ char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp) { return hbitmap_sha256(bitmap->bitmap, errp); } + +int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset) +{ + return hbitmap_next_zero(bitmap->bitmap, offset); +} diff --git a/block/sheepdog.c b/block/sheepdog.c index 696a71442a..488bad333b 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -400,7 +400,7 @@ typedef struct BDRVSheepdogReopenState { int cache_flags; } BDRVSheepdogReopenState; -static const char * sd_strerror(int err) +static const char *sd_strerror(int err) { int i; @@ -1632,7 +1632,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags, if (!tag) { tag = ""; } - if (tag && strlen(tag) >= SD_MAX_VDI_TAG_LEN) { + if (strlen(tag) >= SD_MAX_VDI_TAG_LEN) { error_setg(errp, "value of parameter 'tag' is too long"); ret = -EINVAL; goto err_no_fd; @@ -3078,111 +3078,111 @@ static QemuOptsList sd_create_opts = { }; static BlockDriver bdrv_sheepdog = { - .format_name = "sheepdog", - .protocol_name = "sheepdog", - .instance_size = sizeof(BDRVSheepdogState), - .bdrv_parse_filename = sd_parse_filename, - .bdrv_file_open = sd_open, - .bdrv_reopen_prepare = sd_reopen_prepare, - .bdrv_reopen_commit = sd_reopen_commit, - .bdrv_reopen_abort = sd_reopen_abort, - .bdrv_close = sd_close, - .bdrv_create = sd_create, - .bdrv_has_zero_init = bdrv_has_zero_init_1, - .bdrv_getlength = sd_getlength, + .format_name = "sheepdog", + .protocol_name = "sheepdog", + .instance_size = sizeof(BDRVSheepdogState), + .bdrv_parse_filename = sd_parse_filename, + .bdrv_file_open = sd_open, + .bdrv_reopen_prepare = sd_reopen_prepare, + .bdrv_reopen_commit = sd_reopen_commit, + .bdrv_reopen_abort = sd_reopen_abort, + .bdrv_close = sd_close, + .bdrv_create = sd_create, + .bdrv_has_zero_init = bdrv_has_zero_init_1, + .bdrv_getlength = sd_getlength, .bdrv_get_allocated_file_size = sd_get_allocated_file_size, - .bdrv_truncate = sd_truncate, + .bdrv_truncate = sd_truncate, - .bdrv_co_readv = sd_co_readv, - .bdrv_co_writev = sd_co_writev, - .bdrv_co_flush_to_disk = sd_co_flush_to_disk, - .bdrv_co_pdiscard = sd_co_pdiscard, - .bdrv_co_get_block_status = sd_co_get_block_status, + .bdrv_co_readv = sd_co_readv, + .bdrv_co_writev = sd_co_writev, + .bdrv_co_flush_to_disk = sd_co_flush_to_disk, + .bdrv_co_pdiscard = sd_co_pdiscard, + .bdrv_co_get_block_status = sd_co_get_block_status, - .bdrv_snapshot_create = sd_snapshot_create, - .bdrv_snapshot_goto = sd_snapshot_goto, - .bdrv_snapshot_delete = sd_snapshot_delete, - .bdrv_snapshot_list = sd_snapshot_list, + .bdrv_snapshot_create = sd_snapshot_create, + .bdrv_snapshot_goto = sd_snapshot_goto, + .bdrv_snapshot_delete = sd_snapshot_delete, + .bdrv_snapshot_list = sd_snapshot_list, - .bdrv_save_vmstate = sd_save_vmstate, - .bdrv_load_vmstate = sd_load_vmstate, + .bdrv_save_vmstate = sd_save_vmstate, + .bdrv_load_vmstate = sd_load_vmstate, - .bdrv_detach_aio_context = sd_detach_aio_context, - .bdrv_attach_aio_context = sd_attach_aio_context, + .bdrv_detach_aio_context = sd_detach_aio_context, + .bdrv_attach_aio_context = sd_attach_aio_context, - .create_opts = &sd_create_opts, + .create_opts = &sd_create_opts, }; static BlockDriver bdrv_sheepdog_tcp = { - .format_name = "sheepdog", - .protocol_name = "sheepdog+tcp", - .instance_size = sizeof(BDRVSheepdogState), - .bdrv_parse_filename = sd_parse_filename, - .bdrv_file_open = sd_open, - .bdrv_reopen_prepare = sd_reopen_prepare, - .bdrv_reopen_commit = sd_reopen_commit, - .bdrv_reopen_abort = sd_reopen_abort, - .bdrv_close = sd_close, - .bdrv_create = sd_create, - .bdrv_has_zero_init = bdrv_has_zero_init_1, - .bdrv_getlength = sd_getlength, + .format_name = "sheepdog", + .protocol_name = "sheepdog+tcp", + .instance_size = sizeof(BDRVSheepdogState), + .bdrv_parse_filename = sd_parse_filename, + .bdrv_file_open = sd_open, + .bdrv_reopen_prepare = sd_reopen_prepare, + .bdrv_reopen_commit = sd_reopen_commit, + .bdrv_reopen_abort = sd_reopen_abort, + .bdrv_close = sd_close, + .bdrv_create = sd_create, + .bdrv_has_zero_init = bdrv_has_zero_init_1, + .bdrv_getlength = sd_getlength, .bdrv_get_allocated_file_size = sd_get_allocated_file_size, - .bdrv_truncate = sd_truncate, + .bdrv_truncate = sd_truncate, - .bdrv_co_readv = sd_co_readv, - .bdrv_co_writev = sd_co_writev, - .bdrv_co_flush_to_disk = sd_co_flush_to_disk, - .bdrv_co_pdiscard = sd_co_pdiscard, - .bdrv_co_get_block_status = sd_co_get_block_status, + .bdrv_co_readv = sd_co_readv, + .bdrv_co_writev = sd_co_writev, + .bdrv_co_flush_to_disk = sd_co_flush_to_disk, + .bdrv_co_pdiscard = sd_co_pdiscard, + .bdrv_co_get_block_status = sd_co_get_block_status, - .bdrv_snapshot_create = sd_snapshot_create, - .bdrv_snapshot_goto = sd_snapshot_goto, - .bdrv_snapshot_delete = sd_snapshot_delete, - .bdrv_snapshot_list = sd_snapshot_list, + .bdrv_snapshot_create = sd_snapshot_create, + .bdrv_snapshot_goto = sd_snapshot_goto, + .bdrv_snapshot_delete = sd_snapshot_delete, + .bdrv_snapshot_list = sd_snapshot_list, - .bdrv_save_vmstate = sd_save_vmstate, - .bdrv_load_vmstate = sd_load_vmstate, + .bdrv_save_vmstate = sd_save_vmstate, + .bdrv_load_vmstate = sd_load_vmstate, - .bdrv_detach_aio_context = sd_detach_aio_context, - .bdrv_attach_aio_context = sd_attach_aio_context, + .bdrv_detach_aio_context = sd_detach_aio_context, + .bdrv_attach_aio_context = sd_attach_aio_context, - .create_opts = &sd_create_opts, + .create_opts = &sd_create_opts, }; static BlockDriver bdrv_sheepdog_unix = { - .format_name = "sheepdog", - .protocol_name = "sheepdog+unix", - .instance_size = sizeof(BDRVSheepdogState), - .bdrv_parse_filename = sd_parse_filename, - .bdrv_file_open = sd_open, - .bdrv_reopen_prepare = sd_reopen_prepare, - .bdrv_reopen_commit = sd_reopen_commit, - .bdrv_reopen_abort = sd_reopen_abort, - .bdrv_close = sd_close, - .bdrv_create = sd_create, - .bdrv_has_zero_init = bdrv_has_zero_init_1, - .bdrv_getlength = sd_getlength, + .format_name = "sheepdog", + .protocol_name = "sheepdog+unix", + .instance_size = sizeof(BDRVSheepdogState), + .bdrv_parse_filename = sd_parse_filename, + .bdrv_file_open = sd_open, + .bdrv_reopen_prepare = sd_reopen_prepare, + .bdrv_reopen_commit = sd_reopen_commit, + .bdrv_reopen_abort = sd_reopen_abort, + .bdrv_close = sd_close, + .bdrv_create = sd_create, + .bdrv_has_zero_init = bdrv_has_zero_init_1, + .bdrv_getlength = sd_getlength, .bdrv_get_allocated_file_size = sd_get_allocated_file_size, - .bdrv_truncate = sd_truncate, + .bdrv_truncate = sd_truncate, - .bdrv_co_readv = sd_co_readv, - .bdrv_co_writev = sd_co_writev, - .bdrv_co_flush_to_disk = sd_co_flush_to_disk, - .bdrv_co_pdiscard = sd_co_pdiscard, - .bdrv_co_get_block_status = sd_co_get_block_status, + .bdrv_co_readv = sd_co_readv, + .bdrv_co_writev = sd_co_writev, + .bdrv_co_flush_to_disk = sd_co_flush_to_disk, + .bdrv_co_pdiscard = sd_co_pdiscard, + .bdrv_co_get_block_status = sd_co_get_block_status, - .bdrv_snapshot_create = sd_snapshot_create, - .bdrv_snapshot_goto = sd_snapshot_goto, - .bdrv_snapshot_delete = sd_snapshot_delete, - .bdrv_snapshot_list = sd_snapshot_list, + .bdrv_snapshot_create = sd_snapshot_create, + .bdrv_snapshot_goto = sd_snapshot_goto, + .bdrv_snapshot_delete = sd_snapshot_delete, + .bdrv_snapshot_list = sd_snapshot_list, - .bdrv_save_vmstate = sd_save_vmstate, - .bdrv_load_vmstate = sd_load_vmstate, + .bdrv_save_vmstate = sd_save_vmstate, + .bdrv_load_vmstate = sd_load_vmstate, - .bdrv_detach_aio_context = sd_detach_aio_context, - .bdrv_attach_aio_context = sd_attach_aio_context, + .bdrv_detach_aio_context = sd_detach_aio_context, + .bdrv_attach_aio_context = sd_attach_aio_context, - .create_opts = &sd_create_opts, + .create_opts = &sd_create_opts, }; static void bdrv_sheepdog_init(void) diff --git a/blockjob.c b/blockjob.c index 715c2c2680..6173e4728c 100644 --- a/blockjob.c +++ b/blockjob.c @@ -59,6 +59,7 @@ static void __attribute__((__constructor__)) block_job_init(void) static void block_job_event_cancelled(BlockJob *job); static void block_job_event_completed(BlockJob *job, const char *msg); +static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)); /* Transactional group of block jobs */ struct BlockJobTxn { @@ -480,9 +481,16 @@ static void block_job_completed_txn_success(BlockJob *job) } } +/* Assumes the block_job_mutex is held */ +static bool block_job_timer_pending(BlockJob *job) +{ + return timer_pending(&job->sleep_timer); +} + void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) { Error *local_err = NULL; + int64_t old_speed = job->speed; if (!job->driver->set_speed) { error_setg(errp, QERR_UNSUPPORTED); @@ -495,6 +503,12 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) } job->speed = speed; + if (speed <= old_speed) { + return; + } + + /* kick only if a timer is pending */ + block_job_enter_cond(job, block_job_timer_pending); } void block_job_complete(BlockJob *job, Error **errp) @@ -821,7 +835,11 @@ void block_job_resume_all(void) } } -void block_job_enter(BlockJob *job) +/* + * Conditionally enter a block_job pending a call to fn() while + * under the block_job_lock critical section. + */ +static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)) { if (!block_job_started(job)) { return; @@ -836,6 +854,11 @@ void block_job_enter(BlockJob *job) return; } + if (fn && !fn(job)) { + block_job_unlock(); + return; + } + assert(!job->deferred_to_main_loop); timer_del(&job->sleep_timer); job->busy = true; @@ -843,6 +866,11 @@ void block_job_enter(BlockJob *job) aio_co_wake(job->co); } +void block_job_enter(BlockJob *job) +{ + block_job_enter_cond(job, NULL); +} + bool block_job_is_cancelled(BlockJob *job) { return job->cancelled; diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index 3579a7597c..a591c27213 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -91,5 +91,6 @@ bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs); BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs, BdrvDirtyBitmap *bitmap); char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp); +int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start); #endif diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index 81e78043d1..6b6490ecad 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -292,6 +292,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first); */ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); +/* hbitmap_next_zero: + * @hb: The HBitmap to operate on + * @start: The bit to start from. + * + * Find next not dirty bit. + */ +int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start); + /* hbitmap_create_meta: * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap. * The caller owns the created bitmap and must call hbitmap_free_meta(hb) to diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index af41642346..9091c639b3 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -925,6 +925,61 @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data, hbitmap_iter_next(&hbi); } +static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) +{ + int64_t ret1 = hbitmap_next_zero(data->hb, start); + int64_t ret2 = start; + for ( ; ret2 < data->size && hbitmap_get(data->hb, ret2); ret2++) { + ; + } + if (ret2 == data->size) { + ret2 = -1; + } + + g_assert_cmpint(ret1, ==, ret2); +} + +static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity) +{ + hbitmap_test_init(data, L3, granularity); + test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_zero_check(data, L3 - 1); + + hbitmap_set(data->hb, L2, 1); + test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_zero_check(data, L2 - 1); + test_hbitmap_next_zero_check(data, L2); + test_hbitmap_next_zero_check(data, L2 + 1); + + hbitmap_set(data->hb, L2 + 5, L1); + test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_zero_check(data, L2 + 1); + test_hbitmap_next_zero_check(data, L2 + 2); + test_hbitmap_next_zero_check(data, L2 + 5); + test_hbitmap_next_zero_check(data, L2 + L1 - 1); + test_hbitmap_next_zero_check(data, L2 + L1); + + hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); + test_hbitmap_next_zero_check(data, L2 * 2 - L1); + test_hbitmap_next_zero_check(data, L2 * 2 - 2); + test_hbitmap_next_zero_check(data, L2 * 2 - 1); + test_hbitmap_next_zero_check(data, L2 * 2); + test_hbitmap_next_zero_check(data, L3 - 1); + + hbitmap_set(data->hb, 0, L3); + test_hbitmap_next_zero_check(data, 0); +} + +static void test_hbitmap_next_zero_0(TestHBitmapData *data, const void *unused) +{ + test_hbitmap_next_zero_do(data, 0); +} + +static void test_hbitmap_next_zero_4(TestHBitmapData *data, const void *unused) +{ + test_hbitmap_next_zero_do(data, 4); +} + int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); @@ -985,6 +1040,12 @@ int main(int argc, char **argv) hbitmap_test_add("/hbitmap/iter/iter_and_reset", test_hbitmap_iter_and_reset); + + hbitmap_test_add("/hbitmap/next_zero/next_zero_0", + test_hbitmap_next_zero_0); + hbitmap_test_add("/hbitmap/next_zero/next_zero_4", + test_hbitmap_next_zero_4); + g_test_run(); return 0; diff --git a/util/hbitmap.c b/util/hbitmap.c index 2f9d0fdbd0..289778a55c 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -188,6 +188,45 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first) } } +int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start) +{ + size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; + unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1]; + uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1]; + unsigned long cur = last_lev[pos]; + unsigned start_bit_offset = + (start >> hb->granularity) & (BITS_PER_LONG - 1); + int64_t res; + + cur |= (1UL << start_bit_offset) - 1; + assert((start >> hb->granularity) < hb->size); + + if (cur == (unsigned long)-1) { + do { + pos++; + } while (pos < sz && last_lev[pos] == (unsigned long)-1); + + if (pos >= sz) { + return -1; + } + + cur = last_lev[pos]; + } + + res = (pos << BITS_PER_LEVEL) + ctol(cur); + if (res >= hb->size) { + return -1; + } + + res = res << hb->granularity; + if (res < start) { + assert(((start - res) >> hb->granularity) == 0); + return start; + } + + return res; +} + bool hbitmap_empty(const HBitmap *hb) { return hb->count == 0;