Block patches:

- New block filter: preallocate (which, on writes beyond an image file's
   end, allocates big chunks of data so that such post-EOF writes will
   occur less frequently)
 - write-zeroes and block-status support for Quorum
 - Implementation of truncate for the nvme block driver similarly to the
   existing implementations for host block devices and iscsi devices
 - Block layer refactoring: Drop the tighten_restrictions concept in the
   block permission functions
 - iotest fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEkb62CjDbPohX0Rgp9AfbAGHVz0AFAl/cwIoSHG1yZWl0ekBy
 ZWRoYXQuY29tAAoJEPQH2wBh1c9AnvMH+gOnZCwEUKWuBxGX3Wjb/kqV1OuhAhcP
 IVrKLRnqdarCYMQ9M4SZL6pedfsujHA7vClTV7NTrenXBsEIradBQ59ztQ0oDirS
 4ipIjVtNqj7m86l+IRZDq5HlwOYwwFnWogmLo2bcmNJGLpPQQfrhL2vRJ1wLgFYk
 WjeAVlkkYcHnTIDvs4ne9WRSlxGVBWJ4X5nSlRdZqeyUcMY9v4wL4P9Wc4ZuORmq
 /5HRcT5JKGaT2bAueaqAGEdtPFGbazEP5uU7MTTK/fueDKIRAXO2d0gqhANtOOJQ
 7hMmKhwOPOOhrrpCVi9nxsVwdCOHfurV0km6cOs+Iprm/Wm2UtuS/A8=
 =z+7k
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-12-18' into staging

Block patches:
- New block filter: preallocate (which, on writes beyond an image file's
  end, allocates big chunks of data so that such post-EOF writes will
  occur less frequently)
- write-zeroes and block-status support for Quorum
- Implementation of truncate for the nvme block driver similarly to the
  existing implementations for host block devices and iscsi devices
- Block layer refactoring: Drop the tighten_restrictions concept in the
  block permission functions
- iotest fixes

# gpg: Signature made Fri 18 Dec 2020 14:45:30 GMT
# gpg:                using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40
# gpg:                issuer "mreitz@redhat.com"
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full]
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* remotes/maxreitz/tags/pull-block-2020-12-18: (30 commits)
  iotests: Fix _send_qemu_cmd with bash 5.1
  iotests/102: Pass $QEMU_HANDLE to _send_qemu_cmd
  block/nvme: Implement fake truncate() coroutine
  quorum: Implement bdrv_co_pwrite_zeroes()
  quorum: Implement bdrv_co_block_status()
  scripts/simplebench: add bench_prealloc.py
  simplebench/results_to_text: make executable
  simplebench/results_to_text: add difference line to the table
  simplebench/results_to_text: improve view of the table
  simplebench: move results_to_text() into separate file
  simplebench: rename ascii() to results_to_text()
  scripts/simplebench: use standard deviation for +- error
  scripts/simplebench: support iops
  scripts/simplebench: fix grammar: s/successed/succeeded/
  iotests: add 298 to test new preallocate filter driver
  iotests.py: execute_setup_common(): add required_fmts argument
  iotests: qemu_io_silent: support --image-opts
  qemu-io: add preallocate mode parameter for truncate command
  block: introduce preallocate filter
  block: bdrv_check_perm(): process children anyway
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-12-31 23:26:46 +00:00
commit 26f6b15e26
48 changed files with 2346 additions and 436 deletions

185
block.c
View file

@ -1900,10 +1900,9 @@ static int bdrv_fill_options(QDict **options, const char *filename,
static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q,
uint64_t perm, uint64_t shared,
GSList *ignore_children,
bool *tighten_restrictions, Error **errp);
GSList *ignore_children, Error **errp);
static void bdrv_child_abort_perm_update(BdrvChild *c);
static void bdrv_child_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared);
static void bdrv_child_set_perm(BdrvChild *c);
typedef struct BlockReopenQueueEntry {
bool prepared;
@ -1974,43 +1973,18 @@ static void bdrv_child_perm(BlockDriverState *bs, BlockDriverState *child_bs,
* permissions of all its parents. This involves checking whether all necessary
* permission changes to child nodes can be performed.
*
* Will set *tighten_restrictions to true if and only if new permissions have to
* be taken or currently shared permissions are to be unshared. Otherwise,
* errors are not fatal as long as the caller accepts that the restrictions
* remain tighter than they need to be. The caller still has to abort the
* transaction.
* @tighten_restrictions cannot be used together with @q: When reopening, we may
* encounter fatal errors even though no restrictions are to be tightened. For
* example, changing a node from RW to RO will fail if the WRITE permission is
* to be kept.
*
* A call to this function must always be followed by a call to bdrv_set_perm()
* or bdrv_abort_perm_update().
*/
static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q,
uint64_t cumulative_perms,
uint64_t cumulative_shared_perms,
GSList *ignore_children,
bool *tighten_restrictions, Error **errp)
GSList *ignore_children, Error **errp)
{
BlockDriver *drv = bs->drv;
BdrvChild *c;
int ret;
assert(!q || !tighten_restrictions);
if (tighten_restrictions) {
uint64_t current_perms, current_shared;
uint64_t added_perms, removed_shared_perms;
bdrv_get_cumulative_perm(bs, &current_perms, &current_shared);
added_perms = cumulative_perms & ~current_perms;
removed_shared_perms = current_shared & ~cumulative_shared_perms;
*tighten_restrictions = added_perms || removed_shared_perms;
}
/* Write permissions never work with read-only images */
if ((cumulative_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) &&
!bdrv_is_writable_after_reopen(bs, q))
@ -2054,8 +2028,11 @@ static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q,
}
if (drv->bdrv_check_perm) {
return drv->bdrv_check_perm(bs, cumulative_perms,
cumulative_shared_perms, errp);
ret = drv->bdrv_check_perm(bs, cumulative_perms,
cumulative_shared_perms, errp);
if (ret < 0) {
return ret;
}
}
/* Drivers that never have children can omit .bdrv_child_perm() */
@ -2067,18 +2044,12 @@ static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q,
/* Check all children */
QLIST_FOREACH(c, &bs->children, next) {
uint64_t cur_perm, cur_shared;
bool child_tighten_restr;
bdrv_child_perm(bs, c->bs, c, c->role, q,
cumulative_perms, cumulative_shared_perms,
&cur_perm, &cur_shared);
ret = bdrv_child_check_perm(c, q, cur_perm, cur_shared, ignore_children,
tighten_restrictions ? &child_tighten_restr
: NULL,
errp);
if (tighten_restrictions) {
*tighten_restrictions |= child_tighten_restr;
}
if (ret < 0) {
return ret;
}
@ -2112,9 +2083,9 @@ static void bdrv_abort_perm_update(BlockDriverState *bs)
}
}
static void bdrv_set_perm(BlockDriverState *bs, uint64_t cumulative_perms,
uint64_t cumulative_shared_perms)
static void bdrv_set_perm(BlockDriverState *bs)
{
uint64_t cumulative_perms, cumulative_shared_perms;
BlockDriver *drv = bs->drv;
BdrvChild *c;
@ -2122,6 +2093,8 @@ static void bdrv_set_perm(BlockDriverState *bs, uint64_t cumulative_perms,
return;
}
bdrv_get_cumulative_perm(bs, &cumulative_perms, &cumulative_shared_perms);
/* Update this node */
if (drv->bdrv_set_perm) {
drv->bdrv_set_perm(bs, cumulative_perms, cumulative_shared_perms);
@ -2135,11 +2108,7 @@ static void bdrv_set_perm(BlockDriverState *bs, uint64_t cumulative_perms,
/* Update all children */
QLIST_FOREACH(c, &bs->children, next) {
uint64_t cur_perm, cur_shared;
bdrv_child_perm(bs, c->bs, c, c->role, NULL,
cumulative_perms, cumulative_shared_perms,
&cur_perm, &cur_shared);
bdrv_child_set_perm(c, cur_perm, cur_shared);
bdrv_child_set_perm(c);
}
}
@ -2203,22 +2172,18 @@ char *bdrv_perm_names(uint64_t perm)
* set, the BdrvChild objects in this list are ignored in the calculations;
* this allows checking permission updates for an existing reference.
*
* See bdrv_check_perm() for the semantics of @tighten_restrictions.
*
* Needs to be followed by a call to either bdrv_set_perm() or
* bdrv_abort_perm_update(). */
static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q,
uint64_t new_used_perm,
uint64_t new_shared_perm,
GSList *ignore_children,
bool *tighten_restrictions,
Error **errp)
{
BdrvChild *c;
uint64_t cumulative_perms = new_used_perm;
uint64_t cumulative_shared_perms = new_shared_perm;
assert(!q || !tighten_restrictions);
/* There is no reason why anyone couldn't tolerate write_unchanged */
assert(new_shared_perm & BLK_PERM_WRITE_UNCHANGED);
@ -2232,10 +2197,6 @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q,
char *user = bdrv_child_user_desc(c);
char *perm_names = bdrv_perm_names(new_used_perm & ~c->shared_perm);
if (tighten_restrictions) {
*tighten_restrictions = true;
}
error_setg(errp, "Conflicts with use by %s as '%s', which does not "
"allow '%s' on %s",
user, c->name, perm_names, bdrv_get_node_name(c->bs));
@ -2248,10 +2209,6 @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q,
char *user = bdrv_child_user_desc(c);
char *perm_names = bdrv_perm_names(c->perm & ~new_shared_perm);
if (tighten_restrictions) {
*tighten_restrictions = true;
}
error_setg(errp, "Conflicts with use by %s as '%s', which uses "
"'%s' on %s",
user, c->name, perm_names, bdrv_get_node_name(c->bs));
@ -2265,21 +2222,19 @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q,
}
return bdrv_check_perm(bs, q, cumulative_perms, cumulative_shared_perms,
ignore_children, tighten_restrictions, errp);
ignore_children, errp);
}
/* Needs to be followed by a call to either bdrv_child_set_perm() or
* bdrv_child_abort_perm_update(). */
static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q,
uint64_t perm, uint64_t shared,
GSList *ignore_children,
bool *tighten_restrictions, Error **errp)
GSList *ignore_children, Error **errp)
{
int ret;
ignore_children = g_slist_prepend(g_slist_copy(ignore_children), c);
ret = bdrv_check_update_perm(c->bs, q, perm, shared, ignore_children,
tighten_restrictions, errp);
ret = bdrv_check_update_perm(c->bs, q, perm, shared, ignore_children, errp);
g_slist_free(ignore_children);
if (ret < 0) {
@ -2302,18 +2257,11 @@ static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q,
return 0;
}
static void bdrv_child_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared)
static void bdrv_child_set_perm(BdrvChild *c)
{
uint64_t cumulative_perms, cumulative_shared_perms;
c->has_backup_perm = false;
c->perm = perm;
c->shared_perm = shared;
bdrv_get_cumulative_perm(c->bs, &cumulative_perms,
&cumulative_shared_perms);
bdrv_set_perm(c->bs, cumulative_perms, cumulative_shared_perms);
bdrv_set_perm(c->bs);
}
static void bdrv_child_abort_perm_update(BdrvChild *c)
@ -2327,18 +2275,33 @@ static void bdrv_child_abort_perm_update(BdrvChild *c)
bdrv_abort_perm_update(c->bs);
}
static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp)
{
int ret;
uint64_t perm, shared_perm;
bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, errp);
if (ret < 0) {
bdrv_abort_perm_update(bs);
return ret;
}
bdrv_set_perm(bs);
return 0;
}
int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
Error **errp)
{
Error *local_err = NULL;
int ret;
bool tighten_restrictions;
ret = bdrv_child_check_perm(c, NULL, perm, shared, NULL,
&tighten_restrictions, &local_err);
ret = bdrv_child_check_perm(c, NULL, perm, shared, NULL, &local_err);
if (ret < 0) {
bdrv_child_abort_perm_update(c);
if (tighten_restrictions) {
if ((perm & ~c->perm) || (c->shared_perm & ~shared)) {
/* tighten permissions */
error_propagate(errp, local_err);
} else {
/*
@ -2353,7 +2316,7 @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
return ret;
}
bdrv_child_set_perm(c, perm, shared);
bdrv_child_set_perm(c);
return 0;
}
@ -2623,7 +2586,6 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs)
{
BlockDriverState *old_bs = child->bs;
uint64_t perm, shared_perm;
/* Asserts that child->frozen == false */
bdrv_replace_child_noperm(child, new_bs);
@ -2637,27 +2599,16 @@ static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs)
* restrictions.
*/
if (new_bs) {
bdrv_get_cumulative_perm(new_bs, &perm, &shared_perm);
bdrv_set_perm(new_bs, perm, shared_perm);
bdrv_set_perm(new_bs);
}
if (old_bs) {
/* Update permissions for old node. This is guaranteed to succeed
* because we're just taking a parent away, so we're loosening
* restrictions. */
bool tighten_restrictions;
int ret;
bdrv_get_cumulative_perm(old_bs, &perm, &shared_perm);
ret = bdrv_check_perm(old_bs, NULL, perm, shared_perm, NULL,
&tighten_restrictions, NULL);
assert(tighten_restrictions == false);
if (ret < 0) {
/* We only tried to loosen restrictions, so errors are not fatal */
bdrv_abort_perm_update(old_bs);
} else {
bdrv_set_perm(old_bs, perm, shared_perm);
}
/*
* Update permissions for old node. We're just taking a parent away, so
* we're loosening restrictions. Errors of permission update are not
* fatal in this case, ignore them.
*/
bdrv_refresh_perms(old_bs, NULL);
/* When the parent requiring a non-default AioContext is removed, the
* node moves back to the main AioContext */
@ -2687,8 +2638,7 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
Error *local_err = NULL;
int ret;
ret = bdrv_check_update_perm(child_bs, NULL, perm, shared_perm, NULL, NULL,
errp);
ret = bdrv_check_update_perm(child_bs, NULL, perm, shared_perm, NULL, errp);
if (ret < 0) {
bdrv_abort_perm_update(child_bs);
bdrv_unref(child_bs);
@ -3820,7 +3770,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
QTAILQ_FOREACH(bs_entry, bs_queue, entry) {
BDRVReopenState *state = &bs_entry->state;
ret = bdrv_check_perm(state->bs, bs_queue, state->perm,
state->shared_perm, NULL, NULL, errp);
state->shared_perm, NULL, errp);
if (ret < 0) {
goto cleanup_perm;
}
@ -3832,7 +3782,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
bs_queue, state->perm, state->shared_perm,
&nperm, &nshared);
ret = bdrv_check_update_perm(state->new_backing_bs, NULL,
nperm, nshared, NULL, NULL, errp);
nperm, nshared, NULL, errp);
if (ret < 0) {
goto cleanup_perm;
}
@ -3863,7 +3813,13 @@ cleanup_perm:
}
if (ret == 0) {
bdrv_set_perm(state->bs, state->perm, state->shared_perm);
uint64_t perm, shared;
bdrv_get_cumulative_perm(state->bs, &perm, &shared);
assert(perm == state->perm);
assert(shared == state->shared_perm);
bdrv_set_perm(state->bs);
} else {
bdrv_abort_perm_update(state->bs);
if (state->replace_backing_bs && state->new_backing_bs) {
@ -4616,7 +4572,7 @@ static void bdrv_replace_node_common(BlockDriverState *from,
/* Check whether the required permissions can be granted on @to, ignoring
* all BdrvChild in @list so that they can't block themselves. */
ret = bdrv_check_update_perm(to, NULL, perm, shared, list, NULL, errp);
ret = bdrv_check_update_perm(to, NULL, perm, shared, list, errp);
if (ret < 0) {
bdrv_abort_perm_update(to);
goto out;
@ -4633,8 +4589,7 @@ static void bdrv_replace_node_common(BlockDriverState *from,
bdrv_unref(from);
}
bdrv_get_cumulative_perm(to, &perm, &shared);
bdrv_set_perm(to, perm, shared);
bdrv_set_perm(to);
out:
g_slist_free(list);
@ -5781,7 +5736,6 @@ void bdrv_init_with_whitelist(void)
int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
{
BdrvChild *child, *parent;
uint64_t perm, shared_perm;
Error *local_err = NULL;
int ret;
BdrvDirtyBitmap *bm;
@ -5813,14 +5767,11 @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
*/
if (bs->open_flags & BDRV_O_INACTIVE) {
bs->open_flags &= ~BDRV_O_INACTIVE;
bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, NULL, errp);
ret = bdrv_refresh_perms(bs, errp);
if (ret < 0) {
bdrv_abort_perm_update(bs);
bs->open_flags |= BDRV_O_INACTIVE;
return ret;
}
bdrv_set_perm(bs, perm, shared_perm);
if (bs->drv->bdrv_co_invalidate_cache) {
bs->drv->bdrv_co_invalidate_cache(bs, &local_err);
@ -5895,8 +5846,6 @@ static bool bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
static int bdrv_inactivate_recurse(BlockDriverState *bs)
{
BdrvChild *child, *parent;
bool tighten_restrictions;
uint64_t perm, shared_perm;
int ret;
if (!bs->drv) {
@ -5930,18 +5879,12 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
bs->open_flags |= BDRV_O_INACTIVE;
/* Update permissions, they may differ for inactive nodes */
bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL,
&tighten_restrictions, NULL);
assert(tighten_restrictions == false);
if (ret < 0) {
/* We only tried to loosen restrictions, so errors are not fatal */
bdrv_abort_perm_update(bs);
} else {
bdrv_set_perm(bs, perm, shared_perm);
}
/*
* Update permissions, they may differ for inactive nodes.
* We only tried to loosen restrictions, so errors are not fatal, ignore
* them.
*/
bdrv_refresh_perms(bs, NULL);
/* Recursively inactivate children */
QLIST_FOREACH(child, &bs->children, next) {

View file

@ -2953,7 +2953,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
assert(bdrv_check_request(req->offset, req->bytes) == 0);
bdrv_mark_request_serialising(req, bs->bl.request_alignment);
bdrv_make_request_serialising(req, bs->bl.request_alignment);
}
#endif

View file

@ -754,55 +754,65 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
return true;
}
static bool coroutine_fn
bdrv_wait_serialising_requests_locked(BlockDriverState *bs,
BdrvTrackedRequest *self)
/* Called with self->bs->reqs_lock held */
static BdrvTrackedRequest *
bdrv_find_conflicting_request(BdrvTrackedRequest *self)
{
BdrvTrackedRequest *req;
bool retry;
bool waited = false;
do {
retry = false;
QLIST_FOREACH(req, &bs->tracked_requests, list) {
if (req == self || (!req->serialising && !self->serialising)) {
continue;
}
if (tracked_request_overlaps(req, self->overlap_offset,
self->overlap_bytes))
{
/* Hitting this means there was a reentrant request, for
* example, a block driver issuing nested requests. This must
* never happen since it means deadlock.
*/
assert(qemu_coroutine_self() != req->co);
QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
if (req == self || (!req->serialising && !self->serialising)) {
continue;
}
if (tracked_request_overlaps(req, self->overlap_offset,
self->overlap_bytes))
{
/*
* Hitting this means there was a reentrant request, for
* example, a block driver issuing nested requests. This must
* never happen since it means deadlock.
*/
assert(qemu_coroutine_self() != req->co);
/* If the request is already (indirectly) waiting for us, or
* will wait for us as soon as it wakes up, then just go on
* (instead of producing a deadlock in the former case). */
if (!req->waiting_for) {
self->waiting_for = req;
qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
self->waiting_for = NULL;
retry = true;
waited = true;
break;
}
/*
* If the request is already (indirectly) waiting for us, or
* will wait for us as soon as it wakes up, then just go on
* (instead of producing a deadlock in the former case).
*/
if (!req->waiting_for) {
return req;
}
}
} while (retry);
}
return NULL;
}
/* Called with self->bs->reqs_lock held */
static bool coroutine_fn
bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
{
BdrvTrackedRequest *req;
bool waited = false;
while ((req = bdrv_find_conflicting_request(self))) {
self->waiting_for = req;
qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
self->waiting_for = NULL;
waited = true;
}
return waited;
}
bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
/* Called with req->bs->reqs_lock held */
static void tracked_request_set_serialising(BdrvTrackedRequest *req,
uint64_t align)
{
BlockDriverState *bs = req->bs;
int64_t overlap_offset = req->offset & ~(align - 1);
uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
- overlap_offset;
bool waited;
qemu_co_mutex_lock(&bs->reqs_lock);
if (!req->serialising) {
qatomic_inc(&req->bs->serialising_in_flight);
req->serialising = true;
@ -810,9 +820,6 @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
waited = bdrv_wait_serialising_requests_locked(bs, req);
qemu_co_mutex_unlock(&bs->reqs_lock);
return waited;
}
/**
@ -892,12 +899,27 @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self
}
qemu_co_mutex_lock(&bs->reqs_lock);
waited = bdrv_wait_serialising_requests_locked(bs, self);
waited = bdrv_wait_serialising_requests_locked(self);
qemu_co_mutex_unlock(&bs->reqs_lock);
return waited;
}
bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
uint64_t align)
{
bool waited;
qemu_co_mutex_lock(&req->bs->reqs_lock);
tracked_request_set_serialising(req, align);
waited = bdrv_wait_serialising_requests_locked(req);
qemu_co_mutex_unlock(&req->bs->reqs_lock);
return waited;
}
int bdrv_check_request(int64_t offset, int64_t bytes)
{
if (offset < 0 || bytes < 0) {
@ -1423,7 +1445,7 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
* with each other for the same cluster. For example, in copy-on-read
* it ensures that the CoR read and write operations are atomic and
* guest writes cannot interleave between them. */
bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
} else {
bdrv_wait_serialising_requests(req);
}
@ -1827,7 +1849,6 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
BdrvTrackedRequest *req, int flags)
{
BlockDriverState *bs = child->bs;
bool waited;
int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
if (bs->read_only) {
@ -1837,17 +1858,18 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
assert(!(bs->open_flags & BDRV_O_INACTIVE));
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
assert(!(flags & ~BDRV_REQ_MASK));
assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
if (flags & BDRV_REQ_SERIALISING) {
waited = bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
/*
* For a misaligned request we should have already waited earlier,
* because we come after bdrv_padding_rmw_read which must be called
* with the request already marked as serialising.
*/
assert(!waited ||
(req->offset == req->overlap_offset &&
req->bytes == req->overlap_bytes));
QEMU_LOCK_GUARD(&bs->reqs_lock);
tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
return -EBUSY;
}
bdrv_wait_serialising_requests_locked(req);
} else {
bdrv_wait_serialising_requests(req);
}
@ -2013,7 +2035,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
padding = bdrv_init_padding(bs, offset, bytes, &pad);
if (padding) {
bdrv_mark_request_serialising(req, align);
bdrv_make_request_serialising(req, align);
bdrv_padding_rmw_read(child, req, &pad, true);
@ -2127,7 +2149,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
}
if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
bdrv_mark_request_serialising(&req, align);
bdrv_make_request_serialising(&req, align);
bdrv_padding_rmw_read(child, &req, &pad, false);
}
@ -3248,7 +3270,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
* new area, we need to make sure that no write requests are made to it
* concurrently or they might be overwritten by preallocation. */
if (new_bytes) {
bdrv_mark_request_serialising(&req, 1);
bdrv_make_request_serialising(&req, 1);
}
if (bs->read_only) {
error_setg(errp, "Image is read-only");

View file

@ -12,6 +12,7 @@ block_ss.add(files(
'block-copy.c',
'commit.c',
'copy-on-read.c',
'preallocate.c',
'create.c',
'crypto.c',
'dirty-bitmap.c',

View file

@ -1389,6 +1389,29 @@ out:
}
static int coroutine_fn nvme_co_truncate(BlockDriverState *bs, int64_t offset,
bool exact, PreallocMode prealloc,
BdrvRequestFlags flags, Error **errp)
{
int64_t cur_length;
if (prealloc != PREALLOC_MODE_OFF) {
error_setg(errp, "Unsupported preallocation mode '%s'",
PreallocMode_str(prealloc));
return -ENOTSUP;
}
cur_length = nvme_getlength(bs);
if (offset != cur_length && exact) {
error_setg(errp, "Cannot resize NVMe devices");
return -ENOTSUP;
} else if (offset > cur_length) {
error_setg(errp, "Cannot grow NVMe devices");
return -EINVAL;
}
return 0;
}
static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue, Error **errp)
@ -1523,6 +1546,7 @@ static BlockDriver bdrv_nvme = {
.bdrv_close = nvme_close,
.bdrv_getlength = nvme_getlength,
.bdrv_probe_blocksizes = nvme_probe_blocksizes,
.bdrv_co_truncate = nvme_co_truncate,
.bdrv_co_preadv = nvme_co_preadv,
.bdrv_co_pwritev = nvme_co_pwritev,

559
block/preallocate.c Normal file
View file

@ -0,0 +1,559 @@
/*
* preallocate filter driver
*
* The driver performs preallocate operation: it is injected above
* some node, and before each write over EOF it does additional preallocating
* write-zeroes request.
*
* Copyright (c) 2020 Virtuozzo International GmbH.
*
* Author:
* Sementsov-Ogievskiy Vladimir <vsementsov@virtuozzo.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/units.h"
#include "block/block_int.h"
typedef struct PreallocateOpts {
int64_t prealloc_size;
int64_t prealloc_align;
} PreallocateOpts;
typedef struct BDRVPreallocateState {
PreallocateOpts opts;
/*
* Track real data end, to crop preallocation on close. If < 0 the status is
* unknown.
*
* @data_end is a maximum of file size on open (or when we get write/resize
* permissions) and all write request ends after it. So it's safe to
* truncate to data_end if it is valid.
*/
int64_t data_end;
/*
* Start of trailing preallocated area which reads as zero. May be smaller
* than data_end, if user does over-EOF write zero operation. If < 0 the
* status is unknown.
*
* If both @zero_start and @file_end are valid, the region
* [@zero_start, @file_end) is known to be preallocated zeroes. If @file_end
* is not valid, @zero_start doesn't make much sense.
*/
int64_t zero_start;
/*
* Real end of file. Actually the cache for bdrv_getlength(bs->file->bs),
* to avoid extra lseek() calls on each write operation. If < 0 the status
* is unknown.
*/
int64_t file_end;
/*
* All three states @data_end, @zero_start and @file_end are guaranteed to
* be invalid (< 0) when we don't have both exclusive BLK_PERM_RESIZE and
* BLK_PERM_WRITE permissions on file child.
*/
} BDRVPreallocateState;
#define PREALLOCATE_OPT_PREALLOC_ALIGN "prealloc-align"
#define PREALLOCATE_OPT_PREALLOC_SIZE "prealloc-size"
static QemuOptsList runtime_opts = {
.name = "preallocate",
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
.desc = {
{
.name = PREALLOCATE_OPT_PREALLOC_ALIGN,
.type = QEMU_OPT_SIZE,
.help = "on preallocation, align file length to this number, "
"default 1M",
},
{
.name = PREALLOCATE_OPT_PREALLOC_SIZE,
.type = QEMU_OPT_SIZE,
.help = "how much to preallocate, default 128M",
},
{ /* end of list */ }
},
};
static bool preallocate_absorb_opts(PreallocateOpts *dest, QDict *options,
BlockDriverState *child_bs, Error **errp)
{
QemuOpts *opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
return false;
}
dest->prealloc_align =
qemu_opt_get_size(opts, PREALLOCATE_OPT_PREALLOC_ALIGN, 1 * MiB);
dest->prealloc_size =
qemu_opt_get_size(opts, PREALLOCATE_OPT_PREALLOC_SIZE, 128 * MiB);
qemu_opts_del(opts);
if (!QEMU_IS_ALIGNED(dest->prealloc_align, BDRV_SECTOR_SIZE)) {
error_setg(errp, "prealloc-align parameter of preallocate filter "
"is not aligned to %llu", BDRV_SECTOR_SIZE);
return false;
}
if (!QEMU_IS_ALIGNED(dest->prealloc_align,
child_bs->bl.request_alignment)) {
error_setg(errp, "prealloc-align parameter of preallocate filter "
"is not aligned to underlying node request alignment "
"(%" PRIi32 ")", child_bs->bl.request_alignment);
return false;
}
return true;
}
static int preallocate_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVPreallocateState *s = bs->opaque;
/*
* s->data_end and friends should be initialized on permission update.
* For this to work, mark them invalid.
*/
s->file_end = s->zero_start = s->data_end = -EINVAL;
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
false, errp);
if (!bs->file) {
return -EINVAL;
}
if (!preallocate_absorb_opts(&s->opts, options, bs->file->bs, errp)) {
return -EINVAL;
}
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
(BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
bs->file->bs->supported_zero_flags);
return 0;
}
static void preallocate_close(BlockDriverState *bs)
{
int ret;
BDRVPreallocateState *s = bs->opaque;
if (s->data_end < 0) {
return;
}
if (s->file_end < 0) {
s->file_end = bdrv_getlength(bs->file->bs);
if (s->file_end < 0) {
return;
}
}
if (s->data_end < s->file_end) {
ret = bdrv_truncate(bs->file, s->data_end, true, PREALLOC_MODE_OFF, 0,
NULL);
s->file_end = ret < 0 ? ret : s->data_end;
}
}
/*
* Handle reopen.
*
* We must implement reopen handlers, otherwise reopen just don't work. Handle
* new options and don't care about preallocation state, as it is handled in
* set/check permission handlers.
*/
static int preallocate_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue, Error **errp)
{
PreallocateOpts *opts = g_new0(PreallocateOpts, 1);
if (!preallocate_absorb_opts(opts, reopen_state->options,
reopen_state->bs->file->bs, errp)) {
g_free(opts);
return -EINVAL;
}
reopen_state->opaque = opts;
return 0;
}
static void preallocate_reopen_commit(BDRVReopenState *state)
{
BDRVPreallocateState *s = state->bs->opaque;
s->opts = *(PreallocateOpts *)state->opaque;
g_free(state->opaque);
state->opaque = NULL;
}
static void preallocate_reopen_abort(BDRVReopenState *state)
{
g_free(state->opaque);
state->opaque = NULL;
}
static coroutine_fn int preallocate_co_preadv_part(
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset, int flags)
{
return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset,
flags);
}
static int coroutine_fn preallocate_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
{
return bdrv_co_pdiscard(bs->file, offset, bytes);
}
static bool can_write_resize(uint64_t perm)
{
return (perm & BLK_PERM_WRITE) && (perm & BLK_PERM_RESIZE);
}
static bool has_prealloc_perms(BlockDriverState *bs)
{
BDRVPreallocateState *s = bs->opaque;
if (can_write_resize(bs->file->perm)) {
assert(!(bs->file->shared_perm & BLK_PERM_WRITE));
assert(!(bs->file->shared_perm & BLK_PERM_RESIZE));
return true;
}
assert(s->data_end < 0);
assert(s->zero_start < 0);
assert(s->file_end < 0);
return false;
}
/*
* Call on each write. Returns true if @want_merge_zero is true and the region
* [offset, offset + bytes) is zeroed (as a result of this call or earlier
* preallocation).
*
* want_merge_zero is used to merge write-zero request with preallocation in
* one bdrv_co_pwrite_zeroes() call.
*/
static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset,
int64_t bytes, bool want_merge_zero)
{
BDRVPreallocateState *s = bs->opaque;
int64_t end = offset + bytes;
int64_t prealloc_start, prealloc_end;
int ret;
if (!has_prealloc_perms(bs)) {
/* We don't have state neither should try to recover it */
return false;
}
if (s->data_end < 0) {
s->data_end = bdrv_getlength(bs->file->bs);
if (s->data_end < 0) {
return false;
}
if (s->file_end < 0) {
s->file_end = s->data_end;
}
}
if (end <= s->data_end) {
return false;
}
/* We have valid s->data_end, and request writes beyond it. */
s->data_end = end;
if (s->zero_start < 0 || !want_merge_zero) {
s->zero_start = end;
}
if (s->file_end < 0) {
s->file_end = bdrv_getlength(bs->file->bs);
if (s->file_end < 0) {
return false;
}
}
/* Now s->data_end, s->zero_start and s->file_end are valid. */
if (end <= s->file_end) {
/* No preallocation needed. */
return want_merge_zero && offset >= s->zero_start;
}
/* Now we want new preallocation, as request writes beyond s->file_end. */
prealloc_start = want_merge_zero ? MIN(offset, s->file_end) : s->file_end;
prealloc_end = QEMU_ALIGN_UP(end + s->opts.prealloc_size,
s->opts.prealloc_align);
ret = bdrv_co_pwrite_zeroes(
bs->file, prealloc_start, prealloc_end - prealloc_start,
BDRV_REQ_NO_FALLBACK | BDRV_REQ_SERIALISING | BDRV_REQ_NO_WAIT);
if (ret < 0) {
s->file_end = ret;
return false;
}
s->file_end = prealloc_end;
return want_merge_zero;
}
static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
{
bool want_merge_zero =
!(flags & ~(BDRV_REQ_ZERO_WRITE | BDRV_REQ_NO_FALLBACK));
if (handle_write(bs, offset, bytes, want_merge_zero)) {
return 0;
}
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
}
static coroutine_fn int preallocate_co_pwritev_part(BlockDriverState *bs,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset,
int flags)
{
handle_write(bs, offset, bytes, false);
return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset,
flags);
}
static int coroutine_fn
preallocate_co_truncate(BlockDriverState *bs, int64_t offset,
bool exact, PreallocMode prealloc,
BdrvRequestFlags flags, Error **errp)
{
ERRP_GUARD();
BDRVPreallocateState *s = bs->opaque;
int ret;
if (s->data_end >= 0 && offset > s->data_end) {
if (s->file_end < 0) {
s->file_end = bdrv_getlength(bs->file->bs);
if (s->file_end < 0) {
error_setg(errp, "failed to get file length");
return s->file_end;
}
}
if (prealloc == PREALLOC_MODE_FALLOC) {
/*
* If offset <= s->file_end, the task is already done, just
* update s->data_end, to move part of "filter preallocation"
* to "preallocation requested by user".
* Otherwise just proceed to preallocate missing part.
*/
if (offset <= s->file_end) {
s->data_end = offset;
return 0;
}
} else {
/*
* We have to drop our preallocation, to
* - avoid "Cannot use preallocation for shrinking files" in
* case of offset < file_end
* - give PREALLOC_MODE_OFF a chance to keep small disk
* usage
* - give PREALLOC_MODE_FULL a chance to actually write the
* whole region as user expects
*/
if (s->file_end > s->data_end) {
ret = bdrv_co_truncate(bs->file, s->data_end, true,
PREALLOC_MODE_OFF, 0, errp);
if (ret < 0) {
s->file_end = ret;
error_prepend(errp, "preallocate-filter: failed to drop "
"write-zero preallocation: ");
return ret;
}
s->file_end = s->data_end;
}
}
s->data_end = offset;
}
ret = bdrv_co_truncate(bs->file, offset, exact, prealloc, flags, errp);
if (ret < 0) {
s->file_end = s->zero_start = s->data_end = ret;
return ret;
}
if (has_prealloc_perms(bs)) {
s->file_end = s->zero_start = s->data_end = offset;
}
return 0;
}
static int coroutine_fn preallocate_co_flush(BlockDriverState *bs)
{
return bdrv_co_flush(bs->file->bs);
}
static int64_t preallocate_getlength(BlockDriverState *bs)
{
int64_t ret;
BDRVPreallocateState *s = bs->opaque;
if (s->data_end >= 0) {
return s->data_end;
}
ret = bdrv_getlength(bs->file->bs);
if (has_prealloc_perms(bs)) {
s->file_end = s->zero_start = s->data_end = ret;
}
return ret;
}
static int preallocate_check_perm(BlockDriverState *bs,
uint64_t perm, uint64_t shared, Error **errp)
{
BDRVPreallocateState *s = bs->opaque;
if (s->data_end >= 0 && !can_write_resize(perm)) {
/*
* Lose permissions.
* We should truncate in check_perm, as in set_perm bs->file->perm will
* be already changed, and we should not violate it.
*/
if (s->file_end < 0) {
s->file_end = bdrv_getlength(bs->file->bs);
if (s->file_end < 0) {
error_setg(errp, "Failed to get file length");
return s->file_end;
}
}
if (s->data_end < s->file_end) {
int ret = bdrv_truncate(bs->file, s->data_end, true,
PREALLOC_MODE_OFF, 0, NULL);
if (ret < 0) {
error_setg(errp, "Failed to drop preallocation");
s->file_end = ret;
return ret;
}
s->file_end = s->data_end;
}
}
return 0;
}
static void preallocate_set_perm(BlockDriverState *bs,
uint64_t perm, uint64_t shared)
{
BDRVPreallocateState *s = bs->opaque;
if (can_write_resize(perm)) {
if (s->data_end < 0) {
s->data_end = s->file_end = s->zero_start =
bdrv_getlength(bs->file->bs);
}
} else {
/*
* We drop our permissions, as well as allow shared
* permissions (see preallocate_child_perm), anyone will be able to
* change the child, so mark all states invalid. We'll regain control if
* get good permissions back.
*/
s->data_end = s->file_end = s->zero_start = -EINVAL;
}
}
static void preallocate_child_perm(BlockDriverState *bs, BdrvChild *c,
BdrvChildRole role, BlockReopenQueue *reopen_queue,
uint64_t perm, uint64_t shared, uint64_t *nperm, uint64_t *nshared)
{
bdrv_default_perms(bs, c, role, reopen_queue, perm, shared, nperm, nshared);
if (can_write_resize(perm)) {
/* This should come by default, but let's enforce: */
*nperm |= BLK_PERM_WRITE | BLK_PERM_RESIZE;
/*
* Don't share, to keep our states s->file_end, s->data_end and
* s->zero_start valid.
*/
*nshared &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE);
}
}
BlockDriver bdrv_preallocate_filter = {
.format_name = "preallocate",
.instance_size = sizeof(BDRVPreallocateState),
.bdrv_getlength = preallocate_getlength,
.bdrv_open = preallocate_open,
.bdrv_close = preallocate_close,
.bdrv_reopen_prepare = preallocate_reopen_prepare,
.bdrv_reopen_commit = preallocate_reopen_commit,
.bdrv_reopen_abort = preallocate_reopen_abort,
.bdrv_co_preadv_part = preallocate_co_preadv_part,
.bdrv_co_pwritev_part = preallocate_co_pwritev_part,
.bdrv_co_pwrite_zeroes = preallocate_co_pwrite_zeroes,
.bdrv_co_pdiscard = preallocate_co_pdiscard,
.bdrv_co_flush = preallocate_co_flush,
.bdrv_co_truncate = preallocate_co_truncate,
.bdrv_check_perm = preallocate_check_perm,
.bdrv_set_perm = preallocate_set_perm,
.bdrv_child_perm = preallocate_child_perm,
.has_variable_length = true,
.is_filter = true,
};
static void bdrv_preallocate_init(void)
{
bdrv_register(&bdrv_preallocate_filter);
}
block_init(bdrv_preallocate_init);

View file

@ -18,6 +18,7 @@
#include "qemu/module.h"
#include "qemu/option.h"
#include "block/block_int.h"
#include "block/coroutines.h"
#include "block/qdict.h"
#include "qapi/error.h"
#include "qapi/qapi-events-block.h"
@ -691,8 +692,13 @@ static void write_quorum_entry(void *opaque)
QuorumChildRequest *sacb = &acb->qcrs[i];
sacb->bs = s->children[i]->bs;
sacb->ret = bdrv_co_pwritev(s->children[i], acb->offset, acb->bytes,
acb->qiov, acb->flags);
if (acb->flags & BDRV_REQ_ZERO_WRITE) {
sacb->ret = bdrv_co_pwrite_zeroes(s->children[i], acb->offset,
acb->bytes, acb->flags);
} else {
sacb->ret = bdrv_co_pwritev(s->children[i], acb->offset, acb->bytes,
acb->qiov, acb->flags);
}
if (sacb->ret == 0) {
acb->success_count++;
} else {
@ -738,6 +744,14 @@ static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset,
return ret;
}
static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int bytes, BdrvRequestFlags flags)
{
return quorum_co_pwritev(bs, offset, bytes, NULL,
flags | BDRV_REQ_ZERO_WRITE);
}
static int64_t quorum_getlength(BlockDriverState *bs)
{
BDRVQuorumState *s = bs->opaque;
@ -896,6 +910,21 @@ static QemuOptsList quorum_runtime_opts = {
},
};
static void quorum_refresh_flags(BlockDriverState *bs)
{
BDRVQuorumState *s = bs->opaque;
int i;
bs->supported_zero_flags =
BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
for (i = 0; i < s->num_children; i++) {
bs->supported_zero_flags &= s->children[i]->bs->supported_zero_flags;
}
bs->supported_zero_flags |= BDRV_REQ_WRITE_UNCHANGED;
}
static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
@ -990,6 +1019,7 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
s->next_child_index = s->num_children;
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
quorum_refresh_flags(bs);
g_free(opened);
goto exit;
@ -1061,6 +1091,7 @@ static void quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs,
}
s->children = g_renew(BdrvChild *, s->children, s->num_children + 1);
s->children[s->num_children++] = child;
quorum_refresh_flags(bs);
out:
bdrv_drained_end(bs);
@ -1105,6 +1136,7 @@ static void quorum_del_child(BlockDriverState *bs, BdrvChild *child,
s->children = g_renew(BdrvChild *, s->children, --s->num_children);
bdrv_unref_child(bs, child);
quorum_refresh_flags(bs);
bdrv_drained_end(bs);
}
@ -1179,6 +1211,56 @@ static void quorum_child_perm(BlockDriverState *bs, BdrvChild *c,
| DEFAULT_PERM_UNCHANGED;
}
/*
* Each one of the children can report different status flags even
* when they contain the same data, so what this function does is
* return BDRV_BLOCK_ZERO if *all* children agree that a certain
* region contains zeroes, and BDRV_BLOCK_DATA otherwise.
*/
static int coroutine_fn quorum_co_block_status(BlockDriverState *bs,
bool want_zero,
int64_t offset, int64_t count,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
BDRVQuorumState *s = bs->opaque;
int i, ret;
int64_t pnum_zero = count;
int64_t pnum_data = 0;
for (i = 0; i < s->num_children; i++) {
int64_t bytes;
ret = bdrv_co_common_block_status_above(s->children[i]->bs, NULL, false,
want_zero, offset, count,
&bytes, NULL, NULL, NULL);
if (ret < 0) {
quorum_report_bad(QUORUM_OP_TYPE_READ, offset, count,
s->children[i]->bs->node_name, ret);
pnum_data = count;
break;
}
/*
* Even if all children agree about whether there are zeroes
* or not at @offset they might disagree on the size, so use
* the smallest when reporting BDRV_BLOCK_ZERO and the largest
* when reporting BDRV_BLOCK_DATA.
*/
if (ret & BDRV_BLOCK_ZERO) {
pnum_zero = MIN(pnum_zero, bytes);
} else {
pnum_data = MAX(pnum_data, bytes);
}
}
if (pnum_data) {
*pnum = pnum_data;
return BDRV_BLOCK_DATA;
} else {
*pnum = pnum_zero;
return BDRV_BLOCK_ZERO;
}
}
static const char *const quorum_strong_runtime_opts[] = {
QUORUM_OPT_VOTE_THRESHOLD,
QUORUM_OPT_BLKVERIFY,
@ -1197,6 +1279,7 @@ static BlockDriver bdrv_quorum = {
.bdrv_close = quorum_close,
.bdrv_gather_child_options = quorum_gather_child_options,
.bdrv_dirname = quorum_dirname,
.bdrv_co_block_status = quorum_co_block_status,
.bdrv_co_flush_to_disk = quorum_co_flush,
@ -1204,6 +1287,7 @@ static BlockDriver bdrv_quorum = {
.bdrv_co_preadv = quorum_co_preadv,
.bdrv_co_pwritev = quorum_co_pwritev,
.bdrv_co_pwrite_zeroes = quorum_co_pwrite_zeroes,
.bdrv_add_child = quorum_add_child,
.bdrv_del_child = quorum_del_child,

View file

@ -952,3 +952,29 @@ on host and see if there are locks held by the QEMU process on the image file.
More than one byte could be locked by the QEMU instance, each byte of which
reflects a particular permission that is acquired or protected by the running
block driver.
Filter drivers
~~~~~~~~~~~~~~
QEMU supports several filter drivers, which don't store any data, but perform
some additional tasks, hooking io requests.
.. program:: filter-drivers
.. option:: preallocate
The preallocate filter driver is intended to be inserted between format
and protocol nodes and preallocates some additional space
(expanding the protocol file) when writing past the files end. This can be
useful for file-systems with slow allocation.
Supported options:
.. program:: preallocate
.. option:: prealloc-align
On preallocation, align the file length to this value (in bytes), default 1M.
.. program:: preallocate
.. option:: prealloc-size
How much to preallocate (in bytes), default 128M.

View file

@ -63,16 +63,7 @@ typedef enum {
* content. */
BDRV_REQ_WRITE_UNCHANGED = 0x40,
/*
* BDRV_REQ_SERIALISING forces request serialisation for writes.
* It is used to ensure that writes to the backing file of a backup process
* target cannot race with a read of the backup target that defers to the
* backing file.
*
* Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to
* BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be
* _DO_NOT_WAIT_FOR_SERIALISING, except that is too long.
*/
/* Forces request serialisation. Use only with write requests. */
BDRV_REQ_SERIALISING = 0x80,
/* Execute the request only if the operation can be offloaded or otherwise
@ -86,8 +77,15 @@ typedef enum {
* written to qiov parameter which may be NULL.
*/
BDRV_REQ_PREFETCH = 0x200,
/*
* If we need to wait for other requests, just fail immediately. Used
* only together with BDRV_REQ_SERIALISING.
*/
BDRV_REQ_NO_WAIT = 0x400,
/* Mask of valid flags */
BDRV_REQ_MASK = 0x3ff,
BDRV_REQ_MASK = 0x7ff,
} BdrvRequestFlags;
typedef struct BlockSizes {

View file

@ -1060,7 +1060,8 @@ extern unsigned int bdrv_drain_all_count;
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
bool coroutine_fn bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align);
bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
uint64_t align);
BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs);
int get_tmp_filename(char *filename, int size);

View file

@ -2829,7 +2829,7 @@
'cloop', 'compress', 'copy-on-read', 'dmg', 'file', 'ftp', 'ftps',
'gluster', 'host_cdrom', 'host_device', 'http', 'https', 'iscsi',
'luks', 'nbd', 'nfs', 'null-aio', 'null-co', 'nvme', 'parallels',
'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'rbd',
'preallocate', 'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'rbd',
{ 'name': 'replication', 'if': 'defined(CONFIG_REPLICATION)' },
'sheepdog',
'ssh', 'throttle', 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat' ] }
@ -3098,6 +3098,23 @@
'data': { 'aes': 'QCryptoBlockOptionsQCow',
'luks': 'QCryptoBlockOptionsLUKS'} }
##
# @BlockdevOptionsPreallocate:
#
# Filter driver intended to be inserted between format and protocol node
# and do preallocation in protocol node on write.
#
# @prealloc-align: on preallocation, align file length to this number,
# default 1048576 (1M)
#
# @prealloc-size: how much to preallocate, default 134217728 (128M)
#
# Since: 6.0
##
{ 'struct': 'BlockdevOptionsPreallocate',
'base': 'BlockdevOptionsGenericFormat',
'data': { '*prealloc-align': 'int', '*prealloc-size': 'int' } }
##
# @BlockdevOptionsQcow2:
#
@ -4006,6 +4023,7 @@
'null-co': 'BlockdevOptionsNull',
'nvme': 'BlockdevOptionsNVMe',
'parallels': 'BlockdevOptionsGenericFormat',
'preallocate':'BlockdevOptionsPreallocate',
'qcow2': 'BlockdevOptionsQcow2',
'qcow': 'BlockdevOptionsQcow',
'qed': 'BlockdevOptionsGenericCOWFormat',

View file

@ -1698,13 +1698,42 @@ static const cmdinfo_t flush_cmd = {
.oneline = "flush all in-core file state to disk",
};
static int truncate_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t truncate_cmd = {
.name = "truncate",
.altname = "t",
.cfunc = truncate_f,
.perm = BLK_PERM_WRITE | BLK_PERM_RESIZE,
.argmin = 1,
.argmax = 3,
.args = "[-m prealloc_mode] off",
.oneline = "truncates the current file at the given offset",
};
static int truncate_f(BlockBackend *blk, int argc, char **argv)
{
Error *local_err = NULL;
int64_t offset;
int ret;
int c, ret;
PreallocMode prealloc = PREALLOC_MODE_OFF;
offset = cvtnum(argv[1]);
while ((c = getopt(argc, argv, "m:")) != -1) {
switch (c) {
case 'm':
prealloc = qapi_enum_parse(&PreallocMode_lookup, optarg,
PREALLOC_MODE__MAX, NULL);
if (prealloc == PREALLOC_MODE__MAX) {
error_report("Invalid preallocation mode '%s'", optarg);
return -EINVAL;
}
break;
default:
qemuio_command_usage(&truncate_cmd);
return -EINVAL;
}
}
offset = cvtnum(argv[optind]);
if (offset < 0) {
print_cvtnum_err(offset, argv[1]);
return offset;
@ -1715,7 +1744,7 @@ static int truncate_f(BlockBackend *blk, int argc, char **argv)
* exact=true. It is better to err on the "emit more errors" side
* than to be overly permissive.
*/
ret = blk_truncate(blk, offset, false, PREALLOC_MODE_OFF, 0, &local_err);
ret = blk_truncate(blk, offset, false, prealloc, 0, &local_err);
if (ret < 0) {
error_report_err(local_err);
return ret;
@ -1724,17 +1753,6 @@ static int truncate_f(BlockBackend *blk, int argc, char **argv)
return 0;
}
static const cmdinfo_t truncate_cmd = {
.name = "truncate",
.altname = "t",
.cfunc = truncate_f,
.perm = BLK_PERM_WRITE | BLK_PERM_RESIZE,
.argmin = 1,
.argmax = 1,
.args = "off",
.oneline = "truncates the current file at the given offset",
};
static int length_f(BlockBackend *blk, int argc, char **argv)
{
int64_t size;

View file

@ -19,6 +19,7 @@
#
import simplebench
from results_to_text import results_to_text
from bench_block_job import bench_block_copy, drv_file, drv_nbd
@ -77,4 +78,4 @@ test_envs = [
]
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
print(simplebench.ascii(result))
print(results_to_text(result))

View file

@ -0,0 +1,132 @@
#!/usr/bin/env python3
#
# Benchmark preallocate filter
#
# Copyright (c) 2020 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import subprocess
import re
import json
import simplebench
from results_to_text import results_to_text
def qemu_img_bench(args):
p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
if p.returncode == 0:
try:
m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
return {'seconds': float(m.group(1))}
except Exception:
return {'error': f'failed to parse qemu-img output: {p.stdout}'}
else:
return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
def bench_func(env, case):
fname = f"{case['dir']}/prealloc-test.qcow2"
try:
os.remove(fname)
except OSError:
pass
subprocess.run([env['qemu-img-binary'], 'create', '-f', 'qcow2', fname,
'16G'], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
args = [env['qemu-img-binary'], 'bench', '-c', str(case['count']),
'-d', '64', '-s', case['block-size'], '-t', 'none', '-n', '-w']
if env['prealloc']:
args += ['--image-opts',
'driver=qcow2,file.driver=preallocate,file.file.driver=file,'
f'file.file.filename={fname}']
else:
args += ['-f', 'qcow2', fname]
return qemu_img_bench(args)
def auto_count_bench_func(env, case):
case['count'] = 100
while True:
res = bench_func(env, case)
if 'error' in res:
return res
if res['seconds'] >= 1:
break
case['count'] *= 10
if res['seconds'] < 5:
case['count'] = round(case['count'] * 5 / res['seconds'])
res = bench_func(env, case)
if 'error' in res:
return res
res['iops'] = case['count'] / res['seconds']
return res
if __name__ == '__main__':
if len(sys.argv) < 2:
print(f'USAGE: {sys.argv[0]} <qemu-img binary> '
'DISK_NAME:DIR_PATH ...')
exit(1)
qemu_img = sys.argv[1]
envs = [
{
'id': 'no-prealloc',
'qemu-img-binary': qemu_img,
'prealloc': False
},
{
'id': 'prealloc',
'qemu-img-binary': qemu_img,
'prealloc': True
}
]
aligned_cases = []
unaligned_cases = []
for disk in sys.argv[2:]:
name, path = disk.split(':')
aligned_cases.append({
'id': f'{name}, aligned sequential 16k',
'block-size': '16k',
'dir': path
})
unaligned_cases.append({
'id': f'{name}, unaligned sequential 64k',
'block-size': '16k',
'dir': path
})
result = simplebench.bench(auto_count_bench_func, envs,
aligned_cases + unaligned_cases, count=5)
print(results_to_text(result))
with open('results.json', 'w') as f:
json.dump(result, f, indent=4)

View file

@ -26,6 +26,7 @@ import sys
import os
import subprocess
import simplebench
from results_to_text import results_to_text
def bench_func(env, case):
@ -167,4 +168,4 @@ if __name__ == '__main__':
result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
initial_run=False)
print(simplebench.ascii(result))
print(results_to_text(result))

View file

@ -0,0 +1,126 @@
#!/usr/bin/env python3
#
# Simple benchmarking framework
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import math
import tabulate
# We want leading whitespace for difference row cells (see below)
tabulate.PRESERVE_WHITESPACE = True
def format_value(x, stdev):
stdev_pr = stdev / x * 100
if stdev_pr < 1.5:
# don't care too much
return f'{x:.2g}'
else:
return f'{x:.2g} ± {math.ceil(stdev_pr)}%'
def result_to_text(result):
"""Return text representation of bench_one() returned dict."""
if 'average' in result:
s = format_value(result['average'], result['stdev'])
if 'n-failed' in result:
s += '\n({} failed)'.format(result['n-failed'])
return s
else:
return 'FAILED'
def results_dimension(results):
dim = None
for case in results['cases']:
for env in results['envs']:
res = results['tab'][case['id']][env['id']]
if dim is None:
dim = res['dimension']
else:
assert dim == res['dimension']
assert dim in ('iops', 'seconds')
return dim
def results_to_text(results):
"""Return text representation of bench() returned dict."""
n_columns = len(results['envs'])
named_columns = n_columns > 2
dim = results_dimension(results)
tab = []
if named_columns:
# Environment columns are named A, B, ...
tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)])
tab.append([''] + [c['id'] for c in results['envs']])
for case in results['cases']:
row = [case['id']]
case_results = results['tab'][case['id']]
for env in results['envs']:
res = case_results[env['id']]
row.append(result_to_text(res))
tab.append(row)
# Add row of difference between columns. For each column starting from
# B we calculate difference with all previous columns.
row = ['', ''] # case name and first column
for i in range(1, n_columns):
cell = ''
env = results['envs'][i]
res = case_results[env['id']]
if 'average' not in res:
# Failed result
row.append(cell)
continue
for j in range(0, i):
env_j = results['envs'][j]
res_j = case_results[env_j['id']]
cell += ' '
if 'average' not in res_j:
# Failed result
cell += '--'
continue
col_j = tab[0][j + 1] if named_columns else ''
diff_pr = round((res['average'] - res_j['average']) /
res_j['average'] * 100)
cell += f' {col_j}{diff_pr:+}%'
row.append(cell)
tab.append(row)
return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)
if __name__ == '__main__':
import sys
import json
if len(sys.argv) < 2:
print(f'USAGE: {sys.argv[0]} results.json')
exit(1)
with open(sys.argv[1]) as f:
print(results_to_text(json.load(f)))

View file

@ -18,15 +18,20 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import statistics
def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
"""Benchmark one test-case
test_func -- benchmarking function with prototype
test_func(env, case), which takes test_env and test_case
arguments and returns {'seconds': int} (which is benchmark
result) on success and {'error': str} on error. Returned
dict may contain any other additional fields.
arguments and on success returns dict with 'seconds' or
'iops' (or both) fields, specifying the benchmark result.
If both 'iops' and 'seconds' provided, the 'iops' is
considered the main, and 'seconds' is just an additional
info. On failure test_func should return {'error': str}.
Returned dict may contain any other additional fields.
test_env -- test environment - opaque first argument for test_func
test_case -- test case - opaque second argument for test_func
count -- how many times to call test_func, to calculate average
@ -34,9 +39,10 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
Returns dict with the following fields:
'runs': list of test_func results
'average': average seconds per run (exists only if at least one run
succeeded)
'delta': maximum delta between test_func result and the average
'dimension': dimension of results, may be 'seconds' or 'iops'
'average': average value (iops or seconds) per run (exists only if at
least one run succeeded)
'stdev': standard deviation of results
(exists only if at least one run succeeded)
'n-failed': number of failed runs (exists only if at least one run
failed)
@ -54,29 +60,25 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
result = {'runs': runs}
successed = [r for r in runs if ('seconds' in r)]
if successed:
avg = sum(r['seconds'] for r in successed) / len(successed)
result['average'] = avg
result['delta'] = max(abs(r['seconds'] - avg) for r in successed)
succeeded = [r for r in runs if ('seconds' in r or 'iops' in r)]
if succeeded:
if 'iops' in succeeded[0]:
assert all('iops' in r for r in succeeded)
dim = 'iops'
else:
assert all('seconds' in r for r in succeeded)
assert all('iops' not in r for r in succeeded)
dim = 'seconds'
result['dimension'] = dim
result['average'] = statistics.mean(r[dim] for r in succeeded)
result['stdev'] = statistics.stdev(r[dim] for r in succeeded)
if len(successed) < count:
result['n-failed'] = count - len(successed)
if len(succeeded) < count:
result['n-failed'] = count - len(succeeded)
return result
def ascii_one(result):
"""Return ASCII representation of bench_one() returned dict."""
if 'average' in result:
s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta'])
if 'n-failed' in result:
s += '\n({} failed)'.format(result['n-failed'])
return s
else:
return 'FAILED'
def bench(test_func, test_envs, test_cases, *args, **vargs):
"""Fill benchmark table
@ -112,17 +114,3 @@ def bench(test_func, test_envs, test_cases, *args, **vargs):
print('Done')
return results
def ascii(results):
"""Return ASCII representation of bench() returned dict."""
from tabulate import tabulate
tab = [[""] + [c['id'] for c in results['envs']]]
for case in results['cases']:
row = [case['id']]
for env in results['envs']:
row.append(ascii_one(results['tab'][case['id']][env['id']]))
tab.append(row)
return tabulate(tab)

View file

@ -12,56 +12,135 @@ Formatting 'TEST_DIR/t.IMGFMT.2', fmt=IMGFMT size=134217728
=== Create a single snapshot on virtio0 ===
{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'virtio0', 'snapshot-file':'TEST_DIR/1-snapshot-v0.IMGFMT', 'format': 'IMGFMT' } }
{ 'execute': 'blockdev-snapshot-sync',
'arguments': { 'device': 'virtio0',
'snapshot-file':'TEST_DIR/1-snapshot-v0.IMGFMT',
'format': 'IMGFMT' } }
Formatting 'TEST_DIR/1-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/t.qcow2.1 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
=== Invalid command - missing device and nodename ===
{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'snapshot-file':'TEST_DIR/1-snapshot-v0.IMGFMT', 'format': 'IMGFMT' } }
{ 'execute': 'blockdev-snapshot-sync',
'arguments': { 'snapshot-file':'TEST_DIR/1-snapshot-v0.IMGFMT',
'format': 'IMGFMT' } }
{"error": {"class": "GenericError", "desc": "Cannot find device= nor node_name="}}
=== Invalid command - missing snapshot-file ===
{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'virtio0', 'format': 'IMGFMT' } }
{ 'execute': 'blockdev-snapshot-sync',
'arguments': { 'device': 'virtio0',
'format': 'IMGFMT' } }
{"error": {"class": "GenericError", "desc": "Parameter 'snapshot-file' is missing"}}
=== Create several transactional group snapshots ===
{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/2-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/2-snapshot-v1.IMGFMT' } } ] } }
{ 'execute': 'transaction', 'arguments':
{'actions': [
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio0',
'snapshot-file': 'TEST_DIR/2-snapshot-v0.IMGFMT' } },
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio1',
'snapshot-file': 'TEST_DIR/2-snapshot-v1.IMGFMT' } } ]
} }
Formatting 'TEST_DIR/2-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/1-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
Formatting 'TEST_DIR/2-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/t.qcow2.2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/3-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/3-snapshot-v1.IMGFMT' } } ] } }
{ 'execute': 'transaction', 'arguments':
{'actions': [
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio0',
'snapshot-file': 'TEST_DIR/3-snapshot-v0.IMGFMT' } },
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio1',
'snapshot-file': 'TEST_DIR/3-snapshot-v1.IMGFMT' } } ]
} }
Formatting 'TEST_DIR/3-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/2-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
Formatting 'TEST_DIR/3-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/2-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/4-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/4-snapshot-v1.IMGFMT' } } ] } }
{ 'execute': 'transaction', 'arguments':
{'actions': [
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio0',
'snapshot-file': 'TEST_DIR/4-snapshot-v0.IMGFMT' } },
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio1',
'snapshot-file': 'TEST_DIR/4-snapshot-v1.IMGFMT' } } ]
} }
Formatting 'TEST_DIR/4-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/3-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
Formatting 'TEST_DIR/4-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/3-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/5-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/5-snapshot-v1.IMGFMT' } } ] } }
{ 'execute': 'transaction', 'arguments':
{'actions': [
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio0',
'snapshot-file': 'TEST_DIR/5-snapshot-v0.IMGFMT' } },
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio1',
'snapshot-file': 'TEST_DIR/5-snapshot-v1.IMGFMT' } } ]
} }
Formatting 'TEST_DIR/5-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/4-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
Formatting 'TEST_DIR/5-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/4-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/6-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/6-snapshot-v1.IMGFMT' } } ] } }
{ 'execute': 'transaction', 'arguments':
{'actions': [
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio0',
'snapshot-file': 'TEST_DIR/6-snapshot-v0.IMGFMT' } },
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio1',
'snapshot-file': 'TEST_DIR/6-snapshot-v1.IMGFMT' } } ]
} }
Formatting 'TEST_DIR/6-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/5-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
Formatting 'TEST_DIR/6-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/5-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/7-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/7-snapshot-v1.IMGFMT' } } ] } }
{ 'execute': 'transaction', 'arguments':
{'actions': [
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio0',
'snapshot-file': 'TEST_DIR/7-snapshot-v0.IMGFMT' } },
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio1',
'snapshot-file': 'TEST_DIR/7-snapshot-v1.IMGFMT' } } ]
} }
Formatting 'TEST_DIR/7-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/6-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
Formatting 'TEST_DIR/7-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/6-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/8-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/8-snapshot-v1.IMGFMT' } } ] } }
{ 'execute': 'transaction', 'arguments':
{'actions': [
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio0',
'snapshot-file': 'TEST_DIR/8-snapshot-v0.IMGFMT' } },
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio1',
'snapshot-file': 'TEST_DIR/8-snapshot-v1.IMGFMT' } } ]
} }
Formatting 'TEST_DIR/8-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/7-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
Formatting 'TEST_DIR/8-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/7-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/9-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/9-snapshot-v1.IMGFMT' } } ] } }
{ 'execute': 'transaction', 'arguments':
{'actions': [
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio0',
'snapshot-file': 'TEST_DIR/9-snapshot-v0.IMGFMT' } },
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio1',
'snapshot-file': 'TEST_DIR/9-snapshot-v1.IMGFMT' } } ]
} }
Formatting 'TEST_DIR/9-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/8-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
Formatting 'TEST_DIR/9-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/8-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/10-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/10-snapshot-v1.IMGFMT' } } ] } }
{ 'execute': 'transaction', 'arguments':
{'actions': [
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio0',
'snapshot-file': 'TEST_DIR/10-snapshot-v0.IMGFMT' } },
{ 'type': 'blockdev-snapshot-sync', 'data' :
{ 'device': 'virtio1',
'snapshot-file': 'TEST_DIR/10-snapshot-v1.IMGFMT' } } ]
} }
Formatting 'TEST_DIR/10-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/9-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
Formatting 'TEST_DIR/10-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/9-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
@ -69,48 +148,84 @@ Formatting 'TEST_DIR/10-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extende
=== Create a couple of snapshots using blockdev-snapshot ===
Formatting 'TEST_DIR/11-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/10-snapshot-v0.IMGFMT backing_fmt=IMGFMT
{ 'execute': 'blockdev-add', 'arguments': { 'driver': 'IMGFMT', 'node-name': 'snap_11', 'backing': null, 'file': { 'driver': 'file', 'filename': 'TEST_DIR/11-snapshot-v0.IMGFMT', 'node-name': 'file_11' } } }
{ 'execute': 'blockdev-add', 'arguments':
{ 'driver': 'IMGFMT', 'node-name': 'snap_11', 'backing': null,
'file':
{ 'driver': 'file', 'filename': 'TEST_DIR/11-snapshot-v0.IMGFMT',
'node-name': 'file_11' } } }
{"return": {}}
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_11' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node': 'virtio0',
'overlay':'snap_11' } }
{"return": {}}
Formatting 'TEST_DIR/12-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/11-snapshot-v0.IMGFMT backing_fmt=IMGFMT
{ 'execute': 'blockdev-add', 'arguments': { 'driver': 'IMGFMT', 'node-name': 'snap_12', 'backing': null, 'file': { 'driver': 'file', 'filename': 'TEST_DIR/12-snapshot-v0.IMGFMT', 'node-name': 'file_12' } } }
{ 'execute': 'blockdev-add', 'arguments':
{ 'driver': 'IMGFMT', 'node-name': 'snap_12', 'backing': null,
'file':
{ 'driver': 'file', 'filename': 'TEST_DIR/12-snapshot-v0.IMGFMT',
'node-name': 'file_12' } } }
{"return": {}}
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_12' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node': 'virtio0',
'overlay':'snap_12' } }
{"return": {}}
=== Invalid command - cannot create a snapshot using a file BDS ===
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node':'virtio0', 'overlay':'file_12' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'virtio0',
'overlay':'file_12' }
}
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
=== Invalid command - snapshot node used as active layer ===
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_12' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node': 'virtio0',
'overlay':'snap_12' } }
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node':'virtio0', 'overlay':'virtio0' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'virtio0',
'overlay':'virtio0' }
}
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node':'virtio0', 'overlay':'virtio1' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'virtio0',
'overlay':'virtio1' }
}
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
=== Invalid command - snapshot node used as backing hd ===
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_11' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node': 'virtio0',
'overlay':'snap_11' } }
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
=== Invalid command - snapshot node has a backing image ===
Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT
{ 'execute': 'blockdev-add', 'arguments': { 'driver': 'IMGFMT', 'node-name': 'snap_13', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT', 'node-name': 'file_13' } } }
{ 'execute': 'blockdev-add', 'arguments':
{ 'driver': 'IMGFMT', 'node-name': 'snap_13',
'file':
{ 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT',
'node-name': 'file_13' } } }
{"return": {}}
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_13' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node': 'virtio0',
'overlay':'snap_13' } }
{"error": {"class": "GenericError", "desc": "The overlay already has a backing image"}}
=== Invalid command - The node does not exist ===
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_14' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node': 'virtio0',
'overlay':'snap_14' } }
{"error": {"class": "GenericError", "desc": "Cannot find device=snap_14 nor node_name=snap_14"}}
{ 'execute': 'blockdev-snapshot', 'arguments': { 'node':'nodevice', 'overlay':'snap_13' } }
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'nodevice',
'overlay':'snap_13' }
}
{"error": {"class": "GenericError", "desc": "Cannot find device=nodevice nor node_name=nodevice"}}
*** done

View file

@ -3,13 +3,19 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/source.IMGFMT', fmt=IMGFMT size=67108864
{'execute': 'qmp_capabilities'}
{"return": {}}
{'execute': 'drive-mirror', 'arguments': {'device': 'src', 'target': 'nbd+unix:///?socket=SOCK_DIR/nbd', 'format': 'nbd', 'sync':'full', 'mode':'existing'}}
{'execute': 'drive-mirror',
'arguments': {'device': 'src',
'target': 'nbd+unix:///?socket=SOCK_DIR/nbd',
'format': 'nbd',
'sync':'full',
'mode':'existing'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}}
{'execute': 'block-job-complete', 'arguments': {'device': 'src'}}
{'execute': 'block-job-complete',
'arguments': {'device': 'src'}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}

View file

@ -12,7 +12,9 @@ virtual size: 5 MiB (5242880 bytes)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'block-commit', 'arguments': { 'device': 'test', 'top': 'TEST_DIR/t.IMGFMT.snp1' } }
{ 'execute': 'block-commit',
'arguments': { 'device': 'test',
'top': 'TEST_DIR/t.IMGFMT.snp1' } }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "test"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "test"}}
{"return": {}}

View file

@ -68,7 +68,7 @@ $QEMU_IO -c 'write 0 64k' "$TEST_IMG" | _filter_qemu_io
qemu_comm_method=monitor _launch_qemu -drive if=none,file="$TEST_IMG",id=drv0
# Wait for a prompt to appear (so we know qemu has opened the image)
_send_qemu_cmd '' '(qemu)'
_send_qemu_cmd $QEMU_HANDLE '' '(qemu)'
$QEMU_IMG resize --shrink --image-opts \
"driver=raw,file.driver=file,file.filename=$TEST_IMG,file.locking=off" \

View file

@ -16,8 +16,8 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=65536
wrote 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
QEMU X.Y.Z monitor - type 'help' for more information
Image resized.
(qemu)
Image resized.
(qemu) qemu-io drv0 map
64 KiB (0x10000) bytes allocated at offset 0 bytes (0x0)
*** done

View file

@ -6,7 +6,9 @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -27,7 +29,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -51,7 +55,9 @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -72,7 +78,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -96,7 +104,9 @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -117,7 +127,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -141,7 +153,9 @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -162,7 +176,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -186,7 +202,9 @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -207,7 +225,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -231,7 +251,9 @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -252,7 +274,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -275,7 +299,9 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -296,7 +322,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -319,7 +347,9 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -340,7 +370,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -363,7 +395,9 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -384,7 +418,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -407,7 +443,9 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -428,7 +466,9 @@ read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}
@ -451,7 +491,9 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
'mode': 'existing', 'sync': 'full'}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions.
@ -473,7 +515,9 @@ WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed
Images are identical.
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
{'execute':'drive-mirror', 'arguments':{
'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
'mode': 'existing', 'sync': 'full'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}}

View file

@ -2,11 +2,18 @@ QA output created by 117
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=65536
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'blockdev-add', 'arguments': { 'node-name': 'protocol', 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' } }
{ 'execute': 'blockdev-add',
'arguments': { 'node-name': 'protocol',
'driver': 'file',
'filename': 'TEST_DIR/t.IMGFMT' } }
{"return": {}}
{ 'execute': 'blockdev-add', 'arguments': { 'node-name': 'format', 'driver': 'IMGFMT', 'file': 'protocol' } }
{ 'execute': 'blockdev-add',
'arguments': { 'node-name': 'format',
'driver': 'IMGFMT',
'file': 'protocol' } }
{"return": {}}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io format "write -P 42 0 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line': 'qemu-io format "write -P 42 0 64k"' } }
wrote 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}

View file

@ -6,13 +6,21 @@ wrote 42/42 bytes at offset 0
42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'drive-mirror', 'arguments': { 'job-id': 'mirror', 'device': 'source', 'target': 'TEST_DIR/t.IMGFMT.overlay1', 'mode': 'existing', 'sync': 'top' } }
{ 'execute': 'drive-mirror',
'arguments': {
'job-id': 'mirror',
'device': 'source',
'target': 'TEST_DIR/t.IMGFMT.overlay1',
'mode': 'existing',
'sync': 'top'
} }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "mirror"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "mirror", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
{ 'execute': 'block-job-complete', 'arguments': { 'device': 'mirror' } }
{ 'execute': 'block-job-complete',
'arguments': { 'device': 'mirror' } }
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "mirror"}}

View file

@ -4,13 +4,17 @@ wrote 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'nbd-server-start', 'arguments': { 'addr': { 'type': 'unix', 'data': { 'path': 'SOCK_DIR/nbd' }}}}
{ 'execute': 'nbd-server-start',
'arguments': { 'addr': { 'type': 'unix',
'data': { 'path': 'SOCK_DIR/nbd' }}}}
{"return": {}}
{ 'execute': 'nbd-server-add', 'arguments': { 'device': 'drv' }}
{ 'execute': 'nbd-server-add',
'arguments': { 'device': 'drv' }}
{"return": {}}
read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{ 'execute': 'eject', 'arguments': { 'device': 'drv' }}
{ 'execute': 'eject',
'arguments': { 'device': 'drv' }}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "drv"}}
qemu-io: can't open device nbd+unix:///drv?socket=SOCK_DIR/nbd: Requested export not available
server reported: export 'drv' not present

View file

@ -7,105 +7,173 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/m.
=== Testing drive-backup ===
{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
{'execute': 'blockdev-add',
'arguments': {
'node-name': 'drv0',
'driver': 'IMGFMT',
'file': {
'driver': 'file',
'filename': 'TEST_DIR/t.IMGFMT'
}}}
{"return": {}}
{'execute': 'drive-backup', 'arguments': {'job-id': 'job0', 'device': 'drv0', 'target': 'TEST_DIR/o.IMGFMT', 'format': 'IMGFMT', 'sync': 'none'}}
{'execute': 'drive-backup',
'arguments': {'job-id': 'job0',
'device': 'drv0',
'target': 'TEST_DIR/o.IMGFMT',
'format': 'IMGFMT',
'sync': 'none'}}
Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "paused", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}}
{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
{'execute': 'block-job-cancel',
'arguments': {'device': 'job0'}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 0, "speed": 0, "type": "backup"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"return": {}}
=== Testing drive-mirror ===
{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
{'execute': 'blockdev-add',
'arguments': {
'node-name': 'drv0',
'driver': 'IMGFMT',
'file': {
'driver': 'file',
'filename': 'TEST_DIR/t.IMGFMT'
}}}
{"return": {}}
{'execute': 'drive-mirror', 'arguments': {'job-id': 'job0', 'device': 'drv0', 'target': 'TEST_DIR/o.IMGFMT', 'format': 'IMGFMT', 'sync': 'none'}}
{'execute': 'drive-mirror',
'arguments': {'job-id': 'job0',
'device': 'drv0',
'target': 'TEST_DIR/o.IMGFMT',
'format': 'IMGFMT',
'sync': 'none'}}
Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: block device is in use by block job: mirror"}}
{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
{'execute': 'block-job-cancel',
'arguments': {'device': 'job0'}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"return": {}}
=== Testing active block-commit ===
{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
{'execute': 'blockdev-add',
'arguments': {
'node-name': 'drv0',
'driver': 'IMGFMT',
'file': {
'driver': 'file',
'filename': 'TEST_DIR/t.IMGFMT'
}}}
{"return": {}}
{'execute': 'block-commit', 'arguments': {'job-id': 'job0', 'device': 'drv0'}}
{'execute': 'block-commit',
'arguments': {'job-id': 'job0', 'device': 'drv0'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: block device is in use by block job: commit"}}
{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
{'execute': 'block-job-cancel',
'arguments': {'device': 'job0'}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"return": {}}
=== Testing non-active block-commit ===
wrote 1048576/1048576 bytes at offset 0
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
{'execute': 'blockdev-add',
'arguments': {
'node-name': 'drv0',
'driver': 'IMGFMT',
'file': {
'driver': 'file',
'filename': 'TEST_DIR/t.IMGFMT'
}}}
{"return": {}}
{'execute': 'block-commit', 'arguments': {'job-id': 'job0', 'device': 'drv0', 'top': 'TEST_DIR/m.IMGFMT', 'speed': 1}}
{'execute': 'block-commit',
'arguments': {'job-id': 'job0',
'device': 'drv0',
'top': 'TEST_DIR/m.IMGFMT',
'speed': 1}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
{'execute': 'block-job-cancel',
'arguments': {'device': 'job0'}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "commit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"return": {}}
=== Testing block-stream ===
wrote 1048576/1048576 bytes at offset 0
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
{'execute': 'blockdev-add',
'arguments': {
'node-name': 'drv0',
'driver': 'IMGFMT',
'file': {
'driver': 'file',
'filename': 'TEST_DIR/t.IMGFMT'
}}}
{"return": {}}
{'execute': 'block-stream', 'arguments': {'job-id': 'job0', 'device': 'drv0', 'speed': 1}}
{'execute': 'block-stream',
'arguments': {'job-id': 'job0',
'device': 'drv0',
'speed': 1}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
{'execute': 'block-job-cancel',
'arguments': {'device': 'job0'}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "stream"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"return": {}}
*** done

View file

@ -1,7 +1,9 @@
QA output created by 143
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'nbd-server-start', 'arguments': { 'addr': { 'type': 'unix', 'data': { 'path': 'SOCK_DIR/nbd' }}}}
{ 'execute': 'nbd-server-start',
'arguments': { 'addr': { 'type': 'unix',
'data': { 'path': 'SOCK_DIR/nbd' }}}}
{"return": {}}
qemu-io: can't open device nbd+unix:///no_such_export?socket=SOCK_DIR/nbd: Requested export not available
server reported: export 'no_such_export' not present

View file

@ -8,19 +8,33 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=536870912
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'virtio0', 'snapshot-file':'TEST_DIR/tmp.IMGFMT', 'format': 'IMGFMT' } }
{ 'execute': 'blockdev-snapshot-sync',
'arguments': {
'device': 'virtio0',
'snapshot-file':'TEST_DIR/tmp.IMGFMT',
'format': 'IMGFMT'
}
}
Formatting 'TEST_DIR/tmp.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=536870912 backing_file=TEST_DIR/t.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
=== Performing block-commit on active layer ===
{ 'execute': 'block-commit', 'arguments': { 'device': 'virtio0' } }
{ 'execute': 'block-commit',
'arguments': {
'device': 'virtio0'
}
}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "virtio0"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "virtio0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
{ 'execute': 'block-job-complete', 'arguments': { 'device': 'virtio0' } }
{ 'execute': 'block-job-complete',
'arguments': {
'device': 'virtio0'
}
}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "virtio0"}}
@ -30,7 +44,13 @@ Formatting 'TEST_DIR/tmp.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off co
=== Performing Live Snapshot 2 ===
{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'virtio0', 'snapshot-file':'TEST_DIR/tmp2.IMGFMT', 'format': 'IMGFMT' } }
{ 'execute': 'blockdev-snapshot-sync',
'arguments': {
'device': 'virtio0',
'snapshot-file':'TEST_DIR/tmp2.IMGFMT',
'format': 'IMGFMT'
}
}
Formatting 'TEST_DIR/tmp2.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=536870912 backing_file=TEST_DIR/t.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
*** done

View file

@ -425,7 +425,8 @@ _qemu_img_wrapper commit -b TEST_DIR/t.qcow2.b TEST_DIR/t.qcow2.c
{ 'execute': 'qmp_capabilities' }
{"return": {}}
Adding drive
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_add 0 if=none,id=d0,file=TEST_DIR/t.IMGFMT' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line': 'drive_add 0 if=none,id=d0,file=TEST_DIR/t.IMGFMT' } }
{"return": "OKrn"}
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512
@ -435,25 +436,30 @@ Creating overlay with qemu-img when the guest is running should be allowed
_qemu_img_wrapper create -f qcow2 -b TEST_DIR/t.qcow2 -F qcow2 TEST_DIR/t.qcow2.overlay
== Closing an image should unlock it ==
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_del d0' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line': 'drive_del d0' } }
{"return": ""}
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512
Adding two and closing one
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_add 0 if=none,id=d0,file=TEST_DIR/t.IMGFMT,readonly=on' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line': 'drive_add 0 if=none,id=d0,file=TEST_DIR/t.IMGFMT,readonly=on' } }
{"return": "OKrn"}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_add 0 if=none,id=d1,file=TEST_DIR/t.IMGFMT,readonly=on' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line': 'drive_add 0 if=none,id=d1,file=TEST_DIR/t.IMGFMT,readonly=on' } }
{"return": "OKrn"}
_qemu_img_wrapper info TEST_DIR/t.qcow2
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_del d0' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line': 'drive_del d0' } }
{"return": ""}
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512
qemu-io: can't open device TEST_DIR/t.qcow2: Failed to get "write" lock
Is another process using the image [TEST_DIR/t.qcow2]?
Closing the other
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_del d1' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line': 'drive_del d1' } }
{"return": ""}
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512

View file

@ -8,24 +8,37 @@ wrote 196608/196608 bytes at offset 65536
{ 'execute': 'qmp_capabilities' }
{"return": {}}
Formatting 'TEST_DIR/t.IMGFMT.overlay', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'source', 'snapshot-file': 'TEST_DIR/t.IMGFMT.overlay', 'format': 'IMGFMT', 'mode': 'existing' } }
{ 'execute': 'blockdev-snapshot-sync',
'arguments': { 'device': 'source',
'snapshot-file': 'TEST_DIR/t.IMGFMT.overlay',
'format': 'IMGFMT',
'mode': 'existing' } }
{"return": {}}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "write -P 3 128k 128k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io source "write -P 3 128k 128k"' } }
wrote 131072/131072 bytes at offset 131072
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
Formatting 'TEST_DIR/t.IMGFMT.target.overlay', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.target backing_fmt=IMGFMT
{ 'execute': 'drive-mirror', 'arguments': { 'device': 'source', 'target': 'TEST_DIR/t.IMGFMT.target.overlay', 'mode': 'existing', 'sync': 'top' } }
{ 'execute': 'drive-mirror',
'arguments': { 'device': 'source',
'target': 'TEST_DIR/t.IMGFMT.target.overlay',
'mode': 'existing',
'sync': 'top' } }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "source"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "source"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "source"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "source", "len": 131072, "offset": 131072, "speed": 0, "type": "mirror"}}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "write -P 4 192k 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io source "write -P 4 192k 64k"' } }
wrote 65536/65536 bytes at offset 196608
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
{ 'execute': 'block-job-complete', 'arguments': { 'device': 'source' } }
{ 'execute': 'block-job-complete',
'arguments': { 'device': 'source' } }
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "source"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "source"}}
@ -33,19 +46,27 @@ wrote 65536/65536 bytes at offset 196608
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "source"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "source"}}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "read -P 1 0k 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io source "read -P 1 0k 64k"' } }
read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "read -P 2 64k 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io source "read -P 2 64k 64k"' } }
read 65536/65536 bytes at offset 65536
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "read -P 3 128k 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io source "read -P 3 128k 64k"' } }
read 65536/65536 bytes at offset 131072
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "read -P 4 192k 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io source "read -P 4 192k 64k"' } }
read 65536/65536 bytes at offset 196608
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}

View file

@ -7,18 +7,23 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
{"return": ""}
*** Stream and then change an option on the backing file
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'block-stream', 'arguments': { 'device': 'none0', 'base': 'TEST_DIR/t.IMGFMT.base' } }
{ 'execute': 'block-stream', 'arguments': { 'device': 'none0',
'base': 'TEST_DIR/t.IMGFMT.base' } }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "none0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "none0"}}
{"return": {}}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "none0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "none0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "none0", "len": 1048576, "offset": 1048576, "speed": 0, "type": "stream"}}
@ -33,11 +38,14 @@ Formatting 'TEST_DIR/t.IMGFMT.int', fmt=IMGFMT size=1048576 backing_file=TEST_DI
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.int backing_fmt=IMGFMT
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'block-commit', 'arguments': { 'device': 'none0', 'top': 'TEST_DIR/t.IMGFMT.int' } }
{ 'execute': 'block-commit', 'arguments': { 'device': 'none0',
'top': 'TEST_DIR/t.IMGFMT.int' } }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "none0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "none0"}}
{"return": {}}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "none0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "none0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "none0", "len": 1048576, "offset": 1048576, "speed": 0, "type": "commit"}}

View file

@ -6,11 +6,30 @@ Formatting 'TEST_DIR/image.snp1', fmt=IMGFMT size=104857600
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'arguments': { 'device': 'disk2', 'format': 'IMGFMT', 'mode': 'existing', 'snapshot-file': 'TEST_DIR/image.snp1', 'snapshot-node-name': 'snp1' }, 'execute': 'blockdev-snapshot-sync' }
{ 'arguments': {
'device': 'disk2',
'format': 'IMGFMT',
'mode': 'existing',
'snapshot-file': 'TEST_DIR/image.snp1',
'snapshot-node-name': 'snp1'
},
'execute': 'blockdev-snapshot-sync'
}
{"return": {}}
{ 'arguments': { 'backing-file': 'image.base', 'device': 'disk2', 'image-node-name': 'snp1' }, 'execute': 'change-backing-file' }
{ 'arguments': {
'backing-file': 'image.base',
'device': 'disk2',
'image-node-name': 'snp1'
},
'execute': 'change-backing-file'
}
{"return": {}}
{ 'arguments': { 'base': 'TEST_DIR/image.base', 'device': 'disk2' }, 'execute': 'block-stream' }
{ 'arguments': {
'base': 'TEST_DIR/image.base',
'device': 'disk2'
},
'execute': 'block-stream'
}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk2"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk2"}}
{"return": {}}

View file

@ -10,16 +10,42 @@ Is another process using the image [TEST_DIR/t.qcow2]?
{'execute': 'qmp_capabilities'}
{"return": {}}
{'execute': 'blockdev-add', 'arguments': { 'node-name': 'node0', 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT', 'locking': 'on' } }
{'execute': 'blockdev-add',
'arguments': {
'node-name': 'node0',
'driver': 'file',
'filename': 'TEST_DIR/t.IMGFMT',
'locking': 'on'
} }
{"return": {}}
{'execute': 'blockdev-snapshot-sync', 'arguments': { 'node-name': 'node0', 'snapshot-file': 'TEST_DIR/t.IMGFMT.overlay', 'snapshot-node-name': 'node1' } }
{'execute': 'blockdev-snapshot-sync',
'arguments': {
'node-name': 'node0',
'snapshot-file': 'TEST_DIR/t.IMGFMT.overlay',
'snapshot-node-name': 'node1'
} }
Formatting 'TEST_DIR/t.qcow2.overlay', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=197120 backing_file=TEST_DIR/t.qcow2 backing_fmt=file lazy_refcounts=off refcount_bits=16
{"return": {}}
{'execute': 'blockdev-add', 'arguments': { 'node-name': 'node1', 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT', 'locking': 'on' } }
{'execute': 'blockdev-add',
'arguments': {
'node-name': 'node1',
'driver': 'file',
'filename': 'TEST_DIR/t.IMGFMT',
'locking': 'on'
} }
{"return": {}}
{'execute': 'nbd-server-start', 'arguments': { 'addr': { 'type': 'unix', 'data': { 'path': 'SOCK_DIR/nbd.socket' } } } }
{'execute': 'nbd-server-start',
'arguments': {
'addr': {
'type': 'unix',
'data': {
'path': 'SOCK_DIR/nbd.socket'
} } } }
{"return": {}}
{'execute': 'nbd-server-add', 'arguments': { 'device': 'node1' } }
{'execute': 'nbd-server-add',
'arguments': {
'device': 'node1'
} }
{"return": {}}
=== Testing failure to loosen restrictions ===

View file

@ -11,18 +11,23 @@ Formatting 'TEST_DIR/t.IMGFMT.dest', fmt=IMGFMT size=67108864
=== Write something on the source ===
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "write -P 0x55 0 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io disk "write -P 0x55 0 64k"' } }
wrote 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "read -P 0x55 0 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io disk "read -P 0x55 0 64k"' } }
read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
=== Do block migration to destination ===
{ 'execute': 'migrate', 'arguments': { 'uri': 'unix:SOCK_DIR/migrate', 'blk': true } }
{ 'execute': 'migrate',
'arguments': { 'uri': 'unix:SOCK_DIR/migrate', 'blk': true } }
{"return": {}}
{ 'execute': 'query-status' }
{"return": {"status": "postmigrate", "singlestep": false, "running": false}}
@ -32,11 +37,15 @@ read 65536/65536 bytes at offset 0
{ 'execute': 'query-status' }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "RESUME"}
{"return": {"status": "running", "singlestep": false, "running": true}}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "read -P 0x55 0 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io disk "read -P 0x55 0 64k"' } }
read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "write -P 0x66 1M 64k"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io disk "write -P 0x66 1M 64k"' } }
wrote 65536/65536 bytes at offset 1048576
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}

View file

@ -8,20 +8,34 @@ Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864
=== Creating backing chain ===
{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'disk', 'snapshot-file': 'TEST_DIR/t.IMGFMT.mid', 'format': 'IMGFMT', 'mode': 'absolute-paths' } }
{ 'execute': 'blockdev-snapshot-sync',
'arguments': { 'device': 'disk',
'snapshot-file': 'TEST_DIR/t.IMGFMT.mid',
'format': 'IMGFMT',
'mode': 'absolute-paths' } }
Formatting 'TEST_DIR/t.qcow2.mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 backing_file=TEST_DIR/t.qcow2.base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "write 0 4M"' } }
{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io disk "write 0 4M"' } }
wrote 4194304/4194304 bytes at offset 0
4 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'disk', 'snapshot-file': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'absolute-paths' } }
{ 'execute': 'blockdev-snapshot-sync',
'arguments': { 'device': 'disk',
'snapshot-file': 'TEST_DIR/t.IMGFMT',
'format': 'IMGFMT',
'mode': 'absolute-paths' } }
Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 backing_file=TEST_DIR/t.qcow2.mid backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
{"return": {}}
=== Start commit job and exit qemu ===
{ 'execute': 'block-commit', 'arguments': { 'device': 'disk', 'base':'TEST_DIR/t.IMGFMT.base', 'top': 'TEST_DIR/t.IMGFMT.mid', 'speed': 65536 } }
{ 'execute': 'block-commit',
'arguments': { 'device': 'disk',
'base':'TEST_DIR/t.IMGFMT.base',
'top': 'TEST_DIR/t.IMGFMT.mid',
'speed': 65536 } }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
{"return": {}}
@ -34,7 +48,10 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off comp
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'block-commit', 'arguments': { 'device': 'disk', 'base':'TEST_DIR/t.IMGFMT.base', 'speed': 65536 } }
{ 'execute': 'block-commit',
'arguments': { 'device': 'disk',
'base':'TEST_DIR/t.IMGFMT.base',
'speed': 65536 } }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
{"return": {}}
@ -47,7 +64,12 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off comp
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'drive-mirror', 'arguments': { 'device': 'disk', 'target': 'TEST_DIR/t.IMGFMT.copy', 'format': 'IMGFMT', 'sync': 'full', 'speed': 65536 } }
{ 'execute': 'drive-mirror',
'arguments': { 'device': 'disk',
'target': 'TEST_DIR/t.IMGFMT.copy',
'format': 'IMGFMT',
'sync': 'full',
'speed': 65536 } }
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 lazy_refcounts=off refcount_bits=16
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
@ -61,7 +83,12 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'drive-backup', 'arguments': { 'device': 'disk', 'target': 'TEST_DIR/t.IMGFMT.copy', 'format': 'IMGFMT', 'sync': 'full', 'speed': 65536 } }
{ 'execute': 'drive-backup',
'arguments': { 'device': 'disk',
'target': 'TEST_DIR/t.IMGFMT.copy',
'format': 'IMGFMT',
'sync': 'full',
'speed': 65536 } }
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 lazy_refcounts=off refcount_bits=16
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
@ -77,7 +104,9 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{ 'execute': 'block-stream', 'arguments': { 'device': 'disk', 'speed': 65536 } }
{ 'execute': 'block-stream',
'arguments': { 'device': 'disk',
'speed': 65536 } }
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
{"return": {}}

View file

@ -16,7 +16,11 @@ wrote 65536/65536 bytes at offset 1048576
=== Perform commit job ===
{ 'execute': 'block-commit', 'arguments': { 'job-id': 'commit0', 'device': 'top', 'base':'TEST_DIR/t.IMGFMT.base', 'top': 'TEST_DIR/t.IMGFMT.mid' } }
{ 'execute': 'block-commit',
'arguments': { 'job-id': 'commit0',
'device': 'top',
'base':'TEST_DIR/t.IMGFMT.base',
'top': 'TEST_DIR/t.IMGFMT.mid' } }
{
"timestamp": {
"seconds": TIMESTAMP,
@ -427,7 +431,11 @@ wrote 65536/65536 bytes at offset 1048576
=== Perform commit job ===
{ 'execute': 'block-commit', 'arguments': { 'job-id': 'commit0', 'device': 'top', 'base':'TEST_DIR/t.IMGFMT.base', 'top': 'TEST_DIR/t.IMGFMT.mid' } }
{ 'execute': 'block-commit',
'arguments': { 'job-id': 'commit0',
'device': 'top',
'base':'TEST_DIR/t.IMGFMT.base',
'top': 'TEST_DIR/t.IMGFMT.mid' } }
{
"timestamp": {
"seconds": TIMESTAMP,

View file

@ -26,31 +26,48 @@ wrote 2097152/2097152 bytes at offset 2097152
{"execute":"qmp_capabilities"}
{"return": {}}
{"execute":"blockdev-add", "arguments":{"driver":"IMGFMT", "node-name":"n", "file":{"driver":"file", "filename":"TEST_DIR/t.IMGFMT"}}}
{"execute":"blockdev-add",
"arguments":{"driver":"IMGFMT", "node-name":"n",
"file":{"driver":"file", "filename":"TEST_DIR/t.IMGFMT"}}}
{"return": {}}
{"execute":"block-dirty-bitmap-disable", "arguments":{"node":"n", "name":"b"}}
{"execute":"block-dirty-bitmap-disable",
"arguments":{"node":"n", "name":"b"}}
{"return": {}}
=== Set up NBD with normal access ===
{"execute":"nbd-server-add", "arguments":{"device":"n"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n"}}
{"error": {"class": "GenericError", "desc": "NBD server not running"}}
{"execute":"nbd-server-start", "arguments":{"addr":{"type":"unix", "data":{"path":"SOCK_DIR/nbd"}}}}
{"execute":"nbd-server-start",
"arguments":{"addr":{"type":"unix",
"data":{"path":"SOCK_DIR/nbd"}}}}
{"return": {}}
{"execute":"nbd-server-start", "arguments":{"addr":{"type":"unix", "data":{"path":"SOCK_DIR/nbd1"}}}}
{"execute":"nbd-server-start",
"arguments":{"addr":{"type":"unix",
"data":{"path":"SOCK_DIR/nbd1"}}}}
{"error": {"class": "GenericError", "desc": "NBD server already running"}}
exports available: 0
{"execute":"nbd-server-add", "arguments":{"device":"n", "bitmap":"b"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n", "bitmap":"b"}}
{"return": {}}
{"execute":"nbd-server-add", "arguments":{"device":"nosuch"}}
{"execute":"nbd-server-add",
"arguments":{"device":"nosuch"}}
{"error": {"class": "GenericError", "desc": "Cannot find device=nosuch nor node_name=nosuch"}}
{"execute":"nbd-server-add", "arguments":{"device":"n"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n"}}
{"error": {"class": "GenericError", "desc": "Block export id 'n' is already in use"}}
{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "bitmap":"b2"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n", "name":"n2",
"bitmap":"b2"}}
{"error": {"class": "GenericError", "desc": "Enabled bitmap 'b2' incompatible with readonly export"}}
{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "bitmap":"b3"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n", "name":"n2",
"bitmap":"b3"}}
{"error": {"class": "GenericError", "desc": "Bitmap 'b3' is not found"}}
{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "writable":true, "description":"some text", "bitmap":"b2"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n", "name":"n2", "writable":true,
"description":"some text", "bitmap":"b2"}}
{"return": {}}
exports available: 2
export: 'n'
@ -99,12 +116,15 @@ read 2097152/2097152 bytes at offset 2097152
=== End qemu NBD server ===
{"execute":"nbd-server-remove", "arguments":{"name":"n"}}
{"execute":"nbd-server-remove",
"arguments":{"name":"n"}}
{"return": {}}
{"execute":"nbd-server-remove", "arguments":{"name":"n2"}}
{"execute":"nbd-server-remove",
"arguments":{"name":"n2"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n"}}
{"return": {}}
{"execute":"nbd-server-remove", "arguments":{"name":"n2"}}
{"execute":"nbd-server-remove",
"arguments":{"name":"n2"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n2"}}
{"error": {"class": "GenericError", "desc": "Export 'n2' is not found"}}
{"execute":"nbd-server-stop"}
@ -114,26 +134,41 @@ read 2097152/2097152 bytes at offset 2097152
=== Set up NBD with iothread access ===
{"execute":"x-blockdev-set-iothread", "arguments":{"node-name":"n", "iothread":"io0"}}
{"execute":"x-blockdev-set-iothread",
"arguments":{"node-name":"n", "iothread":"io0"}}
{"return": {}}
{"execute":"nbd-server-add", "arguments":{"device":"n"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n"}}
{"error": {"class": "GenericError", "desc": "NBD server not running"}}
{"execute":"nbd-server-start", "arguments":{"addr":{"type":"unix", "data":{"path":"SOCK_DIR/nbd"}}}}
{"execute":"nbd-server-start",
"arguments":{"addr":{"type":"unix",
"data":{"path":"SOCK_DIR/nbd"}}}}
{"return": {}}
{"execute":"nbd-server-start", "arguments":{"addr":{"type":"unix", "data":{"path":"SOCK_DIR/nbd1"}}}}
{"execute":"nbd-server-start",
"arguments":{"addr":{"type":"unix",
"data":{"path":"SOCK_DIR/nbd1"}}}}
{"error": {"class": "GenericError", "desc": "NBD server already running"}}
exports available: 0
{"execute":"nbd-server-add", "arguments":{"device":"n", "bitmap":"b"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n", "bitmap":"b"}}
{"return": {}}
{"execute":"nbd-server-add", "arguments":{"device":"nosuch"}}
{"execute":"nbd-server-add",
"arguments":{"device":"nosuch"}}
{"error": {"class": "GenericError", "desc": "Cannot find device=nosuch nor node_name=nosuch"}}
{"execute":"nbd-server-add", "arguments":{"device":"n"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n"}}
{"error": {"class": "GenericError", "desc": "Block export id 'n' is already in use"}}
{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "bitmap":"b2"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n", "name":"n2",
"bitmap":"b2"}}
{"error": {"class": "GenericError", "desc": "Enabled bitmap 'b2' incompatible with readonly export"}}
{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "bitmap":"b3"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n", "name":"n2",
"bitmap":"b3"}}
{"error": {"class": "GenericError", "desc": "Bitmap 'b3' is not found"}}
{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "writable":true, "description":"some text", "bitmap":"b2"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n", "name":"n2", "writable":true,
"description":"some text", "bitmap":"b2"}}
{"return": {}}
exports available: 2
export: 'n'
@ -182,12 +217,15 @@ read 2097152/2097152 bytes at offset 2097152
=== End qemu NBD server ===
{"execute":"nbd-server-remove", "arguments":{"name":"n"}}
{"execute":"nbd-server-remove",
"arguments":{"name":"n"}}
{"return": {}}
{"execute":"nbd-server-remove", "arguments":{"name":"n2"}}
{"execute":"nbd-server-remove",
"arguments":{"name":"n2"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n"}}
{"return": {}}
{"execute":"nbd-server-remove", "arguments":{"name":"n2"}}
{"execute":"nbd-server-remove",
"arguments":{"name":"n2"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n2"}}
{"error": {"class": "GenericError", "desc": "Export 'n2' is not found"}}
{"execute":"nbd-server-stop"}

View file

@ -8,7 +8,14 @@ wrote 2097152/2097152 bytes at offset 0
=== Starting drive-mirror, causing error & stop ===
{'execute': 'drive-mirror', 'arguments': {'device': 'testdisk', 'format': 'IMGFMT', 'target': 'blkdebug:TEST_DIR/blkdebug.conf:TEST_DIR/t.IMGFMT.dest', 'sync': 'full', 'mode': 'existing', 'on-source-error': 'stop', 'on-target-error': 'stop' }}
{'execute': 'drive-mirror',
'arguments': {'device': 'testdisk',
'format': 'IMGFMT',
'target': 'blkdebug:TEST_DIR/blkdebug.conf:TEST_DIR/t.IMGFMT.dest',
'sync': 'full',
'mode': 'existing',
'on-source-error': 'stop',
'on-target-error': 'stop' }}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "testdisk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "testdisk"}}
{"return": {}}
@ -17,7 +24,9 @@ wrote 2097152/2097152 bytes at offset 0
=== Force cancel job paused in error state ===
{'execute': 'block-job-cancel', 'arguments': { 'device': 'testdisk', 'force': true}}
{'execute': 'block-job-cancel',
'arguments': { 'device': 'testdisk',
'force': true}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "testdisk"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "testdisk"}}

View file

@ -7,24 +7,29 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.
=== Send a write command to a drive opened in read-only mode (1)
{ 'execute': 'human-monitor-command', 'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
{ 'execute': 'human-monitor-command',
'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
{"return": "Block node is read-onlyrn"}
=== Run block-commit on base using an invalid filter node name
{ 'execute': 'block-commit', 'arguments': {'job-id': 'job0', 'device': 'none1', 'top-node': 'int', 'filter-node-name': '1234'}}
{ 'execute': 'block-commit',
'arguments': {'job-id': 'job0', 'device': 'none1', 'top-node': 'int',
'filter-node-name': '1234'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{"error": {"class": "GenericError", "desc": "Invalid node name"}}
=== Send a write command to a drive opened in read-only mode (2)
{ 'execute': 'human-monitor-command', 'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
{ 'execute': 'human-monitor-command',
'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
{"return": "Block node is read-onlyrn"}
=== Run block-commit on base using the default filter node name
{ 'execute': 'block-commit', 'arguments': {'job-id': 'job0', 'device': 'none1', 'top-node': 'int'}}
{ 'execute': 'block-commit',
'arguments': {'job-id': 'job0', 'device': 'none1', 'top-node': 'int'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"return": {}}
@ -36,6 +41,7 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.
=== Send a write command to a drive opened in read-only mode (3)
{ 'execute': 'human-monitor-command', 'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
{ 'execute': 'human-monitor-command',
'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
{"return": "Block node is read-onlyrn"}
*** done

186
tests/qemu-iotests/298 Normal file
View file

@ -0,0 +1,186 @@
#!/usr/bin/env python3
#
# Test for preallocate filter
#
# Copyright (c) 2020 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import iotests
MiB = 1024 * 1024
disk = os.path.join(iotests.test_dir, 'disk')
overlay = os.path.join(iotests.test_dir, 'overlay')
refdisk = os.path.join(iotests.test_dir, 'refdisk')
drive_opts = f'node-name=disk,driver={iotests.imgfmt},' \
f'file.node-name=filter,file.driver=preallocate,' \
f'file.file.node-name=file,file.file.filename={disk}'
class TestPreallocateBase(iotests.QMPTestCase):
def setUp(self):
iotests.qemu_img_create('-f', iotests.imgfmt, disk, str(10 * MiB))
def tearDown(self):
try:
self.check_small()
check = iotests.qemu_img_check(disk)
self.assertFalse('leaks' in check)
self.assertFalse('corruptions' in check)
self.assertEqual(check['check-errors'], 0)
finally:
os.remove(disk)
def check_big(self):
self.assertTrue(os.path.getsize(disk) > 100 * MiB)
def check_small(self):
self.assertTrue(os.path.getsize(disk) < 10 * MiB)
class TestQemuImg(TestPreallocateBase):
def test_qemu_img(self):
p = iotests.QemuIoInteractive('--image-opts', drive_opts)
p.cmd('write 0 1M')
p.cmd('flush')
self.check_big()
p.close()
class TestPreallocateFilter(TestPreallocateBase):
def setUp(self):
super().setUp()
self.vm = iotests.VM().add_drive(path=None, opts=drive_opts)
self.vm.launch()
def tearDown(self):
self.vm.shutdown()
super().tearDown()
def test_prealloc(self):
self.vm.hmp_qemu_io('drive0', 'write 0 1M')
self.check_big()
def test_external_snapshot(self):
self.test_prealloc()
result = self.vm.qmp('blockdev-snapshot-sync', node_name='disk',
snapshot_file=overlay,
snapshot_node_name='overlay')
self.assert_qmp(result, 'return', {})
# on reopen to r-o base preallocation should be dropped
self.check_small()
self.vm.hmp_qemu_io('drive0', 'write 1M 1M')
result = self.vm.qmp('block-commit', device='overlay')
self.assert_qmp(result, 'return', {})
self.complete_and_wait()
# commit of new megabyte should trigger preallocation
self.check_big()
def test_reopen_opts(self):
result = self.vm.qmp('x-blockdev-reopen', **{
'node-name': 'disk',
'driver': iotests.imgfmt,
'file': {
'node-name': 'filter',
'driver': 'preallocate',
'prealloc-size': 20 * MiB,
'prealloc-align': 5 * MiB,
'file': {
'node-name': 'file',
'driver': 'file',
'filename': disk
}
}
})
self.assert_qmp(result, 'return', {})
self.vm.hmp_qemu_io('drive0', 'write 0 1M')
self.assertTrue(os.path.getsize(disk) == 25 * MiB)
class TestTruncate(iotests.QMPTestCase):
def setUp(self):
iotests.qemu_img_create('-f', iotests.imgfmt, disk, str(10 * MiB))
iotests.qemu_img_create('-f', iotests.imgfmt, refdisk, str(10 * MiB))
def tearDown(self):
os.remove(disk)
os.remove(refdisk)
def do_test(self, prealloc_mode, new_size):
ret = iotests.qemu_io_silent('--image-opts', '-c', 'write 0 10M', '-c',
f'truncate -m {prealloc_mode} {new_size}',
drive_opts)
self.assertEqual(ret, 0)
ret = iotests.qemu_io_silent('-f', iotests.imgfmt, '-c', 'write 0 10M',
'-c',
f'truncate -m {prealloc_mode} {new_size}',
refdisk)
self.assertEqual(ret, 0)
stat = os.stat(disk)
refstat = os.stat(refdisk)
# Probably we'll want preallocate filter to keep align to cluster when
# shrink preallocation, so, ignore small differece
self.assertLess(abs(stat.st_size - refstat.st_size), 64 * 1024)
# Preallocate filter may leak some internal clusters (for example, if
# guest write far over EOF, skipping some clusters - they will remain
# fallocated, preallocate filter don't care about such leaks, it drops
# only trailing preallocation.
self.assertLess(abs(stat.st_blocks - refstat.st_blocks) * 512,
1024 * 1024)
def test_real_shrink(self):
self.do_test('off', '5M')
def test_truncate_inside_preallocated_area__falloc(self):
self.do_test('falloc', '50M')
def test_truncate_inside_preallocated_area__metadata(self):
self.do_test('metadata', '50M')
def test_truncate_inside_preallocated_area__full(self):
self.do_test('full', '50M')
def test_truncate_inside_preallocated_area__off(self):
self.do_test('off', '50M')
def test_truncate_over_preallocated_area__falloc(self):
self.do_test('falloc', '150M')
def test_truncate_over_preallocated_area__metadata(self):
self.do_test('metadata', '150M')
def test_truncate_over_preallocated_area__full(self):
self.do_test('full', '150M')
def test_truncate_over_preallocated_area__off(self):
self.do_test('off', '150M')
if __name__ == '__main__':
iotests.main(supported_fmts=['qcow2'], required_fmts=['preallocate'])

View file

@ -0,0 +1,5 @@
.............
----------------------------------------------------------------------
Ran 13 tests
OK

View file

@ -5,42 +5,91 @@ wrote 67108864/67108864 bytes at offset 0
64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{'execute': 'qmp_capabilities'}
{"return": {}}
{'execute': 'blockdev-add', 'arguments': { 'driver': 'file', 'node-name': 'node-protocol', 'filename': 'TEST_DIR/t.IMGFMT' } }
{'execute': 'blockdev-add',
'arguments': {
'driver': 'file',
'node-name': 'node-protocol',
'filename': 'TEST_DIR/t.IMGFMT'
} }
{"return": {}}
{'execute': 'blockdev-add', 'arguments': { 'driver': 'IMGFMT', 'node-name': 'node-format', 'file': 'node-protocol' } }
{'execute': 'blockdev-add',
'arguments': {
'driver': 'IMGFMT',
'node-name': 'node-format',
'file': 'node-protocol'
} }
{"return": {}}
=== Mountpoint not present ===
{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-err', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse' } }
{'execute': 'block-export-add',
'arguments': {
'type': 'fuse',
'id': 'export-err',
'node-name': 'node-format',
'mountpoint': 'TEST_DIR/t.IMGFMT.fuse'
} }
{"error": {"class": "GenericError", "desc": "Failed to stat 'TEST_DIR/t.IMGFMT.fuse': No such file or directory"}}
=== Mountpoint is a directory ===
{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-err', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse' } }
{'execute': 'block-export-add',
'arguments': {
'type': 'fuse',
'id': 'export-err',
'node-name': 'node-format',
'mountpoint': 'TEST_DIR/t.IMGFMT.fuse'
} }
{"error": {"class": "GenericError", "desc": "'TEST_DIR/t.IMGFMT.fuse' is not a regular file"}}
=== Mountpoint is a regular file ===
{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-mp', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse' } }
{'execute': 'block-export-add',
'arguments': {
'type': 'fuse',
'id': 'export-mp',
'node-name': 'node-format',
'mountpoint': 'TEST_DIR/t.IMGFMT.fuse'
} }
{"return": {}}
Images are identical.
=== Mount over existing file ===
{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-img', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT' } }
{'execute': 'block-export-add',
'arguments': {
'type': 'fuse',
'id': 'export-img',
'node-name': 'node-format',
'mountpoint': 'TEST_DIR/t.IMGFMT'
} }
{"return": {}}
Images are identical.
=== Double export ===
{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-err', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse' } }
{'execute': 'block-export-add',
'arguments': {
'type': 'fuse',
'id': 'export-err',
'node-name': 'node-format',
'mountpoint': 'TEST_DIR/t.IMGFMT.fuse'
} }
{"error": {"class": "GenericError", "desc": "There already is a FUSE export on 'TEST_DIR/t.IMGFMT.fuse'"}}
=== Remove export ===
virtual size: 64 MiB (67108864 bytes)
{'execute': 'block-export-del', 'arguments': { 'id': 'export-mp' } }
{'execute': 'block-export-del',
'arguments': {
'id': 'export-mp'
} }
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export-mp"}}
virtual size: 0 B (0 bytes)
=== Writable export ===
{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-mp', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true } }
{'execute': 'block-export-add',
'arguments': {
'type': 'fuse',
'id': 'export-mp',
'node-name': 'node-format',
'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true
} }
{"return": {}}
write failed: Permission denied
wrote 65536/65536 bytes at offset 1048576
@ -49,15 +98,30 @@ wrote 65536/65536 bytes at offset 1048576
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== Resizing exports ===
{'execute': 'block-export-del', 'arguments': { 'id': 'export-mp' } }
{'execute': 'block-export-del',
'arguments': {
'id': 'export-mp'
} }
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export-mp"}}
{'execute': 'block-export-del', 'arguments': { 'id': 'export-img' } }
{'execute': 'block-export-del',
'arguments': {
'id': 'export-img'
} }
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export-img"}}
{'execute': 'blockdev-del', 'arguments': { 'node-name': 'node-format' } }
{'execute': 'blockdev-del',
'arguments': {
'node-name': 'node-format'
} }
{"return": {}}
{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-mp', 'node-name': 'node-protocol', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true } }
{'execute': 'block-export-add',
'arguments': {
'type': 'fuse',
'id': 'export-mp',
'node-name': 'node-protocol',
'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true
} }
{"return": {}}
--- Try growing non-growable export ---
@ -72,10 +136,19 @@ OK: Post-truncate image size is as expected
OK: Disk usage grew with fallocate
--- Try growing growable export ---
{'execute': 'block-export-del', 'arguments': { 'id': 'export-mp' } }
{'execute': 'block-export-del',
'arguments': {
'id': 'export-mp'
} }
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export-mp"}}
{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-mp', 'node-name': 'node-protocol', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true, 'growable': true } }
{'execute': 'block-export-add',
'arguments': {
'type': 'fuse',
'id': 'export-mp',
'node-name': 'node-protocol',
'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true, 'growable': true
} }
{"return": {}}
65536+0 records in
65536+0 records out

159
tests/qemu-iotests/312 Executable file
View file

@ -0,0 +1,159 @@
#!/usr/bin/env bash
#
# Test drive-mirror with quorum
#
# The goal of this test is to check how the quorum driver reports
# regions that are known to read as zeroes (BDRV_BLOCK_ZERO). The idea
# is that drive-mirror will try the efficient representation of zeroes
# in the destination image instead of writing actual zeroes.
#
# Copyright (C) 2020 Igalia, S.L.
# Author: Alberto Garcia <berto@igalia.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# creator
owner=berto@igalia.com
seq=`basename $0`
echo "QA output created by $seq"
status=1 # failure is the default!
_cleanup()
{
_rm_test_img "$TEST_IMG.0"
_rm_test_img "$TEST_IMG.1"
_rm_test_img "$TEST_IMG.2"
_rm_test_img "$TEST_IMG.3"
_cleanup_qemu
}
trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
. ./common.qemu
_supported_fmt qcow2
_supported_proto file
_supported_os Linux
_unsupported_imgopts cluster_size data_file
echo
echo '### Create all images' # three source (quorum), one destination
echo
TEST_IMG="$TEST_IMG.0" _make_test_img -o cluster_size=64k 10M
TEST_IMG="$TEST_IMG.1" _make_test_img -o cluster_size=64k 10M
TEST_IMG="$TEST_IMG.2" _make_test_img -o cluster_size=64k 10M
TEST_IMG="$TEST_IMG.3" _make_test_img -o cluster_size=64k 10M
quorum="driver=raw,file.driver=quorum,file.vote-threshold=2"
quorum="$quorum,file.children.0.file.filename=$TEST_IMG.0"
quorum="$quorum,file.children.1.file.filename=$TEST_IMG.1"
quorum="$quorum,file.children.2.file.filename=$TEST_IMG.2"
quorum="$quorum,file.children.0.driver=$IMGFMT"
quorum="$quorum,file.children.1.driver=$IMGFMT"
quorum="$quorum,file.children.2.driver=$IMGFMT"
echo
echo '### Output of qemu-img map (empty quorum)'
echo
$QEMU_IMG map --image-opts $quorum | _filter_qemu_img_map
# Now we write data to the quorum. All three images will read as
# zeroes in all cases, but with different ways to represent them
# (unallocated clusters, zero clusters, data clusters with zeroes)
# that will have an effect on how the data will be mirrored and the
# output of qemu-img map on the resulting image.
echo
echo '### Write data to the quorum'
echo
# Test 1: data regions surrounded by unallocated clusters.
# Three data regions, the largest one (0x30000) will be picked, end result:
# offset 0x10000, length 0x30000 -> data
$QEMU_IO -c "write -P 0 $((0x10000)) $((0x10000))" "$TEST_IMG.0" | _filter_qemu_io
$QEMU_IO -c "write -P 0 $((0x10000)) $((0x30000))" "$TEST_IMG.1" | _filter_qemu_io
$QEMU_IO -c "write -P 0 $((0x10000)) $((0x20000))" "$TEST_IMG.2" | _filter_qemu_io
# Test 2: zero regions surrounded by data clusters.
# First we allocate the data clusters.
$QEMU_IO -c "open -o $quorum" -c "write -P 0 $((0x100000)) $((0x40000))" | _filter_qemu_io
# Three zero regions, the smallest one (0x10000) will be picked, end result:
# offset 0x100000, length 0x10000 -> data
# offset 0x110000, length 0x10000 -> zeroes
# offset 0x120000, length 0x20000 -> data
$QEMU_IO -c "write -z $((0x110000)) $((0x10000))" "$TEST_IMG.0" | _filter_qemu_io
$QEMU_IO -c "write -z $((0x110000)) $((0x30000))" "$TEST_IMG.1" | _filter_qemu_io
$QEMU_IO -c "write -z $((0x110000)) $((0x20000))" "$TEST_IMG.2" | _filter_qemu_io
# Test 3: zero clusters surrounded by unallocated clusters.
# Everything reads as zeroes, no effect on the end result.
$QEMU_IO -c "write -z $((0x150000)) $((0x10000))" "$TEST_IMG.0" | _filter_qemu_io
$QEMU_IO -c "write -z $((0x150000)) $((0x30000))" "$TEST_IMG.1" | _filter_qemu_io
$QEMU_IO -c "write -z $((0x150000)) $((0x20000))" "$TEST_IMG.2" | _filter_qemu_io
# Test 4: mix of data and zero clusters.
# The zero region will be ignored in favor of the largest data region
# (0x20000), end result:
# offset 0x200000, length 0x20000 -> data
$QEMU_IO -c "write -P 0 $((0x200000)) $((0x10000))" "$TEST_IMG.0" | _filter_qemu_io
$QEMU_IO -c "write -z $((0x200000)) $((0x30000))" "$TEST_IMG.1" | _filter_qemu_io
$QEMU_IO -c "write -P 0 $((0x200000)) $((0x20000))" "$TEST_IMG.2" | _filter_qemu_io
# Test 5: write data to a region and then zeroize it, doing it
# directly on the quorum device instead of the individual images.
# This has no effect on the end result but proves that the quorum driver
# supports 'write -z'.
$QEMU_IO -c "open -o $quorum" -c "write -P 1 $((0x250000)) $((0x10000))" | _filter_qemu_io
# Verify the data that we just wrote
$QEMU_IO -c "open -o $quorum" -c "read -P 1 $((0x250000)) $((0x10000))" | _filter_qemu_io
$QEMU_IO -c "open -o $quorum" -c "write -z $((0x250000)) $((0x10000))" | _filter_qemu_io
# Now it should read back as zeroes
$QEMU_IO -c "open -o $quorum" -c "read -P 0 $((0x250000)) $((0x10000))" | _filter_qemu_io
echo
echo '### Launch the drive-mirror job'
echo
qemu_comm_method="qmp" _launch_qemu -drive if=virtio,"$quorum"
h=$QEMU_HANDLE
_send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return'
_send_qemu_cmd $h \
"{'execute': 'drive-mirror',
'arguments': {'device': 'virtio0',
'format': '$IMGFMT',
'target': '$TEST_IMG.3',
'sync': 'full',
'mode': 'existing' }}" \
"BLOCK_JOB_READY.*virtio0"
_send_qemu_cmd $h \
"{ 'execute': 'block-job-complete',
'arguments': { 'device': 'virtio0' } }" \
'BLOCK_JOB_COMPLETED'
_send_qemu_cmd $h "{ 'execute': 'quit' }" ''
echo
echo '### Output of qemu-img map (destination image)'
echo
$QEMU_IMG map "$TEST_IMG.3" | _filter_qemu_img_map
# success, all done
echo "*** done"
rm -f $seq.full
status=0

View file

@ -0,0 +1,81 @@
QA output created by 312
### Create all images
Formatting 'TEST_DIR/t.IMGFMT.0', fmt=IMGFMT size=10485760
Formatting 'TEST_DIR/t.IMGFMT.1', fmt=IMGFMT size=10485760
Formatting 'TEST_DIR/t.IMGFMT.2', fmt=IMGFMT size=10485760
Formatting 'TEST_DIR/t.IMGFMT.3', fmt=IMGFMT size=10485760
### Output of qemu-img map (empty quorum)
Offset Length File
### Write data to the quorum
wrote 65536/65536 bytes at offset 65536
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 196608/196608 bytes at offset 65536
192 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 131072/131072 bytes at offset 65536
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 262144/262144 bytes at offset 1048576
256 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 65536/65536 bytes at offset 1114112
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 196608/196608 bytes at offset 1114112
192 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 131072/131072 bytes at offset 1114112
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 65536/65536 bytes at offset 1376256
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 196608/196608 bytes at offset 1376256
192 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 131072/131072 bytes at offset 1376256
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 65536/65536 bytes at offset 2097152
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 196608/196608 bytes at offset 2097152
192 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 131072/131072 bytes at offset 2097152
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 65536/65536 bytes at offset 2424832
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 65536/65536 bytes at offset 2424832
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 65536/65536 bytes at offset 2424832
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 65536/65536 bytes at offset 2424832
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
### Launch the drive-mirror job
{ 'execute': 'qmp_capabilities' }
{"return": {}}
{'execute': 'drive-mirror',
'arguments': {'device': 'virtio0',
'format': 'IMGFMT',
'target': 'TEST_DIR/t.IMGFMT.3',
'sync': 'full',
'mode': 'existing' }}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "virtio0"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "virtio0", "len": 10485760, "offset": 10485760, "speed": 0, "type": "mirror"}}
{ 'execute': 'block-job-complete',
'arguments': { 'device': 'virtio0' } }
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "virtio0", "len": 10485760, "offset": 10485760, "speed": 0, "type": "mirror"}}
{ 'execute': 'quit' }
### Output of qemu-img map (destination image)
Offset Length File
0x10000 0x30000 TEST_DIR/t.IMGFMT.3
0x100000 0x10000 TEST_DIR/t.IMGFMT.3
0x120000 0x20000 TEST_DIR/t.IMGFMT.3
0x200000 0x20000 TEST_DIR/t.IMGFMT.3
*** done

View file

@ -146,14 +146,9 @@ _send_qemu_cmd()
count=${qemu_cmd_repeat}
use_error="no"
fi
# This array element extraction is done to accommodate pathnames with spaces
if [ -z "${success_or_failure}" ]; then
cmd=${@: 1:${#@}-1}
shift $(($# - 1))
else
cmd=${@: 1:${#@}-2}
shift $(($# - 2))
fi
cmd=$1
shift
# Display QMP being sent, but not HMP (since HMP already echoes its
# input back to output); decide based on leading '{'

View file

@ -307,6 +307,7 @@
295 rw
296 rw
297 meta
298
299 auto quick
300 migration
301 backing quick
@ -317,3 +318,4 @@
307 rw quick export
308 rw
309 rw auto quick
312 rw auto quick

View file

@ -205,7 +205,12 @@ def qemu_io_log(*args):
def qemu_io_silent(*args):
'''Run qemu-io and return the exit code, suppressing stdout'''
args = qemu_io_args + list(args)
if '-f' in args or '--image-opts' in args:
default_args = qemu_io_args_no_fmt
else:
default_args = qemu_io_args
args = default_args + list(args)
exitcode = subprocess.call(args, stdout=open('/dev/null', 'w'))
if exitcode < 0:
sys.stderr.write('qemu-io received signal %i: %s\n' %
@ -1118,6 +1123,11 @@ def _verify_aio_mode(supported_aio_modes: Sequence[str] = ()) -> None:
if supported_aio_modes and (aiomode not in supported_aio_modes):
notrun('not suitable for this aio mode: %s' % aiomode)
def _verify_formats(required_formats: Sequence[str] = ()) -> None:
usf_list = list(set(required_formats) - set(supported_formats()))
if usf_list:
notrun(f'formats {usf_list} are not whitelisted')
def supports_quorum():
return 'quorum' in qemu_img_pipe('--help')
@ -1275,7 +1285,8 @@ def execute_setup_common(supported_fmts: Sequence[str] = (),
supported_aio_modes: Sequence[str] = (),
unsupported_fmts: Sequence[str] = (),
supported_protocols: Sequence[str] = (),
unsupported_protocols: Sequence[str] = ()) -> bool:
unsupported_protocols: Sequence[str] = (),
required_fmts: Sequence[str] = ()) -> bool:
"""
Perform necessary setup for either script-style or unittest-style tests.
@ -1301,6 +1312,7 @@ def execute_setup_common(supported_fmts: Sequence[str] = (),
_verify_platform(supported=supported_platforms)
_verify_cache_mode(supported_cache_modes)
_verify_aio_mode(supported_aio_modes)
_verify_formats(required_fmts)
return debug