Block patches (v2) for the block queue.

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQEvBAABCAAZBQJXhikBEhxtcmVpdHpAcmVkaGF0LmNvbQAKCRA7sUIC6DisrT8r
 B/9RRIP1jCtgPIFd/xdlKKURbxZ91ffcfVGZ7atur2x6PZNpvH83dl0haK+KMNq4
 FjHfgvOmVmInWORNvambwaplXqgarp/Cyt0xqtj93bGN14YMw0ByJ8M844jIQZjr
 T4dnH4usU5pG8lQ+jusQXMtThn9zRoi7dbJ7zzxYWJI+ExP/HpIjwoq+tkq84KbR
 /WVJCBLGIVsNTD7Q/cl4lzSoz/HgjBf6wdUZTWhX0cjZBmCFgXvGH7+Blcne2/aX
 kr8CSIfiOHDI5ZwvM2awYzcttAecErh3kOJRsJ1N5TscGagsL0xdno7Ghl6iRT84
 JRxaZ1tIfEiIP/Ov+d9macc0
 =Lvm2
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'mreitz/tags/pull-block-for-kevin-2016-07-13' into queue-block

Block patches (v2) for the block queue.

# gpg: Signature made Wed Jul 13 13:41:53 2016 CEST
# gpg:                using RSA key 0x3BB14202E838ACAD
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>"
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40
#      Subkey fingerprint: 58B3 81CE 2DC8 9CF9 9730  EE64 3BB1 4202 E838 ACAD

* mreitz/tags/pull-block-for-kevin-2016-07-13:
  iotests: Make 157 actually format-agnostic
  vvfat: Fix qcow write target driver specification
  hmp: show all of snapshot info on every block dev in output of 'info snapshots'
  hmp: use snapshot name to determine whether a snapshot is 'fully available'
  qemu-iotests: Test naming of throttling groups
  blockdev: Fix regression with the default naming of throttling groups
  vmdk: fix metadata write regression
  Improve block job rate limiting for small bandwidth values
  qcow2: Fix qcow2_get_cluster_offset()
  qemu-io: Use correct range limitations
  qcow2: Avoid making the L1 table too big
  qemu-img: Use strerror() for generic resize error

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Kevin Wolf 2016-07-13 13:45:55 +02:00
commit 543d7a42ba
15 changed files with 285 additions and 69 deletions

View file

@ -113,6 +113,7 @@ static void coroutine_fn commit_run(void *opaque)
CommitBlockJob *s = opaque;
CommitCompleteData *data;
int64_t sector_num, end;
uint64_t delay_ns = 0;
int ret = 0;
int n = 0;
void *buf = NULL;
@ -142,10 +143,8 @@ static void coroutine_fn commit_run(void *opaque)
buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE);
for (sector_num = 0; sector_num < end; sector_num += n) {
uint64_t delay_ns = 0;
bool copy;
wait:
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
@ -161,12 +160,6 @@ wait:
copy = (ret == 1);
trace_commit_one_iteration(s, sector_num, n, ret);
if (copy) {
if (s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, n);
if (delay_ns > 0) {
goto wait;
}
}
ret = commit_populate(s->top, s->base, sector_num, n, buf);
bytes_written += n * BDRV_SECTOR_SIZE;
}
@ -182,6 +175,10 @@ wait:
}
/* Publish progress */
s->common.offset += n * BDRV_SECTOR_SIZE;
if (copy && s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, n);
}
}
ret = 0;

View file

@ -422,7 +422,9 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
assert(io_sectors);
sector_num += io_sectors;
nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
delay_ns += ratelimit_calculate_delay(&s->limit, io_sectors);
if (s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors);
}
}
return delay_ns;
}

View file

@ -65,7 +65,8 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
}
}
if (new_l1_size > INT_MAX / sizeof(uint64_t)) {
QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
return -EFBIG;
}
@ -482,8 +483,8 @@ int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
unsigned int l2_index;
uint64_t l1_index, l2_offset, *l2_table;
int l1_bits, c;
unsigned int offset_in_cluster, nb_clusters;
uint64_t bytes_available, bytes_needed;
unsigned int offset_in_cluster;
uint64_t bytes_available, bytes_needed, nb_clusters;
int ret;
offset_in_cluster = offset_into_cluster(s, offset);
@ -499,7 +500,6 @@ int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
if (bytes_needed > bytes_available) {
bytes_needed = bytes_available;
}
assert(bytes_needed <= INT_MAX);
*cluster_offset = 0;
@ -536,8 +536,11 @@ int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
*cluster_offset = be64_to_cpu(l2_table[l2_index]);
/* nb_needed <= INT_MAX, thus nb_clusters <= INT_MAX, too */
nb_clusters = size_to_clusters(s, bytes_needed);
/* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
* integers; the minimum cluster size is 512, so this assertion is always
* true */
assert(nb_clusters <= INT_MAX);
ret = qcow2_get_cluster_type(*cluster_offset);
switch (ret) {
@ -584,13 +587,17 @@ int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
bytes_available = (c * s->cluster_size);
bytes_available = (int64_t)c * s->cluster_size;
out:
if (bytes_available > bytes_needed) {
bytes_available = bytes_needed;
}
/* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
* subtracting offset_in_cluster will therefore definitely yield something
* not exceeding UINT_MAX */
assert(bytes_available - offset_in_cluster <= UINT_MAX);
*bytes = bytes_available - offset_in_cluster;
return ret;

View file

@ -95,6 +95,7 @@ static void coroutine_fn stream_run(void *opaque)
BlockDriverState *base = s->base;
int64_t sector_num = 0;
int64_t end = -1;
uint64_t delay_ns = 0;
int error = 0;
int ret = 0;
int n = 0;
@ -123,10 +124,8 @@ static void coroutine_fn stream_run(void *opaque)
}
for (sector_num = 0; sector_num < end; sector_num += n) {
uint64_t delay_ns = 0;
bool copy;
wait:
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
@ -156,12 +155,6 @@ wait:
}
trace_stream_one_iteration(s, sector_num, n, ret);
if (copy) {
if (s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, n);
if (delay_ns > 0) {
goto wait;
}
}
ret = stream_populate(blk, sector_num, n, buf);
}
if (ret < 0) {
@ -182,6 +175,9 @@ wait:
/* Publish progress */
s->common.offset += n * BDRV_SECTOR_SIZE;
if (copy && s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, n);
}
}
if (!base) {

View file

@ -1202,13 +1202,6 @@ static int get_cluster_offset(BlockDriverState *bs,
l2_index = ((offset >> 9) / extent->cluster_sectors) % extent->l2_size;
cluster_sector = le32_to_cpu(l2_table[l2_index]);
if (m_data) {
m_data->valid = 1;
m_data->l1_index = l1_index;
m_data->l2_index = l2_index;
m_data->l2_offset = l2_offset;
m_data->l2_cache_entry = &l2_table[l2_index];
}
if (extent->has_zero_grain && cluster_sector == VMDK_GTE_ZEROED) {
zeroed = true;
}
@ -1231,6 +1224,13 @@ static int get_cluster_offset(BlockDriverState *bs,
if (ret) {
return ret;
}
if (m_data) {
m_data->valid = 1;
m_data->l1_index = l1_index;
m_data->l2_index = l2_index;
m_data->l2_offset = l2_offset;
m_data->l2_cache_entry = &l2_table[l2_index];
}
}
*cluster_offset = cluster_sector << BDRV_SECTOR_BITS;
return VMDK_OK;

View file

@ -3018,9 +3018,10 @@ static int enable_write_target(BlockDriverState *bs, Error **errp)
}
options = qdict_new();
qdict_put(options, "driver", qstring_from_str("qcow"));
qdict_put(options, "write-target.driver", qstring_from_str("qcow"));
s->qcow = bdrv_open_child(s->qcow_filename, options, "write-target", bs,
&child_vvfat_qcow, false, errp);
QDECREF(options);
if (!s->qcow) {
ret = -EINVAL;
goto err;

View file

@ -512,6 +512,8 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
writethrough = !qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true);
id = qemu_opts_id(opts);
qdict_extract_subqdict(bs_opts, &interval_dict, "stats-intervals.");
qdict_array_split(interval_dict, &interval_list);
@ -616,7 +618,7 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
/* disk I/O throttling */
if (throttle_enabled(&cfg)) {
if (!throttling_group) {
throttling_group = blk_name(blk);
throttling_group = id;
}
blk_io_limits_enable(blk, throttling_group);
blk_set_io_limits(blk, &cfg);
@ -625,7 +627,7 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
blk_set_enable_write_cache(blk, !writethrough);
blk_set_on_error(blk, on_read_error, on_write_error);
if (!monitor_add_blk(blk, qemu_opts_id(opts), errp)) {
if (!monitor_add_blk(blk, id, errp)) {
blk_unref(blk);
blk = NULL;
goto err_no_bs_opts;

View file

@ -15,34 +15,59 @@
#define QEMU_RATELIMIT_H
typedef struct {
int64_t next_slice_time;
int64_t slice_start_time;
int64_t slice_end_time;
uint64_t slice_quota;
uint64_t slice_ns;
uint64_t dispatched;
} RateLimit;
/** Calculate and return delay for next request in ns
*
* Record that we sent @p n data units. If we may send more data units
* in the current time slice, return 0 (i.e. no delay). Otherwise
* return the amount of time (in ns) until the start of the next time
* slice that will permit sending the next chunk of data.
*
* Recording sent data units even after exceeding the quota is
* permitted; the time slice will be extended accordingly.
*/
static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
{
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
uint64_t delay_slices;
if (limit->next_slice_time < now) {
limit->next_slice_time = now + limit->slice_ns;
assert(limit->slice_quota && limit->slice_ns);
if (limit->slice_end_time < now) {
/* Previous, possibly extended, time slice finished; reset the
* accounting. */
limit->slice_start_time = now;
limit->slice_end_time = now + limit->slice_ns;
limit->dispatched = 0;
}
if (limit->dispatched == 0 || limit->dispatched + n <= limit->slice_quota) {
limit->dispatched += n;
limit->dispatched += n;
if (limit->dispatched < limit->slice_quota) {
/* We may send further data within the current time slice, no
* need to delay the next request. */
return 0;
} else {
limit->dispatched = n;
return limit->next_slice_time - now;
}
/* Quota exceeded. Calculate the next time slice we may start
* sending data again. */
delay_slices = (limit->dispatched + limit->slice_quota - 1) /
limit->slice_quota;
limit->slice_end_time = limit->slice_start_time +
delay_slices * limit->slice_ns;
return limit->slice_end_time - now;
}
static inline void ratelimit_set_speed(RateLimit *limit, uint64_t speed,
uint64_t slice_ns)
{
limit->slice_ns = slice_ns;
limit->slice_quota = ((double)speed * slice_ns)/1000000000ULL;
limit->slice_quota = MAX(((double)speed * slice_ns) / 1000000000ULL, 1);
}
#endif

View file

@ -2200,12 +2200,31 @@ void hmp_delvm(Monitor *mon, const QDict *qdict)
void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
{
BlockDriverState *bs, *bs1;
BdrvNextIterator it1;
QEMUSnapshotInfo *sn_tab, *sn;
bool no_snapshot = true;
int nb_sns, i;
int total;
int *available_snapshots;
int *global_snapshots;
AioContext *aio_context;
typedef struct SnapshotEntry {
QEMUSnapshotInfo sn;
QTAILQ_ENTRY(SnapshotEntry) next;
} SnapshotEntry;
typedef struct ImageEntry {
const char *imagename;
QTAILQ_ENTRY(ImageEntry) next;
QTAILQ_HEAD(, SnapshotEntry) snapshots;
} ImageEntry;
QTAILQ_HEAD(, ImageEntry) image_list =
QTAILQ_HEAD_INITIALIZER(image_list);
ImageEntry *image_entry, *next_ie;
SnapshotEntry *snapshot_entry;
bs = bdrv_all_find_vmstate_bs();
if (!bs) {
monitor_printf(mon, "No available block device supports snapshots\n");
@ -2222,34 +2241,102 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
return;
}
if (nb_sns == 0) {
for (bs1 = bdrv_first(&it1); bs1; bs1 = bdrv_next(&it1)) {
int bs1_nb_sns = 0;
ImageEntry *ie;
SnapshotEntry *se;
AioContext *ctx = bdrv_get_aio_context(bs1);
aio_context_acquire(ctx);
if (bdrv_can_snapshot(bs1)) {
sn = NULL;
bs1_nb_sns = bdrv_snapshot_list(bs1, &sn);
if (bs1_nb_sns > 0) {
no_snapshot = false;
ie = g_new0(ImageEntry, 1);
ie->imagename = bdrv_get_device_name(bs1);
QTAILQ_INIT(&ie->snapshots);
QTAILQ_INSERT_TAIL(&image_list, ie, next);
for (i = 0; i < bs1_nb_sns; i++) {
se = g_new0(SnapshotEntry, 1);
se->sn = sn[i];
QTAILQ_INSERT_TAIL(&ie->snapshots, se, next);
}
}
g_free(sn);
}
aio_context_release(ctx);
}
if (no_snapshot) {
monitor_printf(mon, "There is no snapshot available.\n");
return;
}
available_snapshots = g_new0(int, nb_sns);
global_snapshots = g_new0(int, nb_sns);
total = 0;
for (i = 0; i < nb_sns; i++) {
if (bdrv_all_find_snapshot(sn_tab[i].id_str, &bs1) == 0) {
available_snapshots[total] = i;
SnapshotEntry *next_sn;
if (bdrv_all_find_snapshot(sn_tab[i].name, &bs1) == 0) {
global_snapshots[total] = i;
total++;
QTAILQ_FOREACH(image_entry, &image_list, next) {
QTAILQ_FOREACH_SAFE(snapshot_entry, &image_entry->snapshots,
next, next_sn) {
if (!strcmp(sn_tab[i].name, snapshot_entry->sn.name)) {
QTAILQ_REMOVE(&image_entry->snapshots, snapshot_entry,
next);
g_free(snapshot_entry);
}
}
}
}
}
monitor_printf(mon, "List of snapshots present on all disks:\n");
if (total > 0) {
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, NULL);
monitor_printf(mon, "\n");
for (i = 0; i < total; i++) {
sn = &sn_tab[available_snapshots[i]];
sn = &sn_tab[global_snapshots[i]];
/* The ID is not guaranteed to be the same on all images, so
* overwrite it.
*/
pstrcpy(sn->id_str, sizeof(sn->id_str), "--");
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, sn);
monitor_printf(mon, "\n");
}
} else {
monitor_printf(mon, "There is no suitable snapshot available\n");
monitor_printf(mon, "None\n");
}
QTAILQ_FOREACH(image_entry, &image_list, next) {
if (QTAILQ_EMPTY(&image_entry->snapshots)) {
continue;
}
monitor_printf(mon,
"\nList of partial (non-loadable) snapshots on '%s':\n",
image_entry->imagename);
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, NULL);
monitor_printf(mon, "\n");
QTAILQ_FOREACH(snapshot_entry, &image_entry->snapshots, next) {
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon,
&snapshot_entry->sn);
monitor_printf(mon, "\n");
}
}
QTAILQ_FOREACH_SAFE(image_entry, &image_list, next, next_ie) {
SnapshotEntry *next_sn;
QTAILQ_FOREACH_SAFE(snapshot_entry, &image_entry->snapshots, next,
next_sn) {
g_free(snapshot_entry);
}
g_free(image_entry);
}
g_free(sn_tab);
g_free(available_snapshots);
g_free(global_snapshots);
}

View file

@ -3283,7 +3283,7 @@ static int img_resize(int argc, char **argv)
error_report("Image is read-only");
break;
default:
error_report("Error resizing image (%d)", -ret);
error_report("Error resizing image: %s", strerror(-ret));
break;
}
out:

View file

@ -389,9 +389,9 @@ create_iovec(BlockBackend *blk, QEMUIOVector *qiov, char **argv, int nr_iov,
goto fail;
}
/* should be SIZE_T_MAX, but that doesn't exist */
if (len > INT_MAX) {
printf("Argument '%s' exceeds maximum size %d\n", arg, INT_MAX);
if (len > SIZE_MAX) {
printf("Argument '%s' exceeds maximum size %llu\n", arg,
(unsigned long long)SIZE_MAX);
goto fail;
}
@ -479,7 +479,7 @@ static int do_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
.done = false,
};
if (count >> BDRV_SECTOR_BITS > INT_MAX) {
if (count > INT_MAX) {
return -ERANGE;
}
@ -500,7 +500,7 @@ static int do_write_compressed(BlockBackend *blk, char *buf, int64_t offset,
{
int ret;
if (count >> 9 > INT_MAX) {
if (count >> 9 > BDRV_REQUEST_MAX_SECTORS) {
return -ERANGE;
}
@ -1688,9 +1688,9 @@ static int discard_f(BlockBackend *blk, int argc, char **argv)
if (count < 0) {
print_cvtnum_err(count, argv[optind]);
return 0;
} else if (count >> BDRV_SECTOR_BITS > INT_MAX) {
} else if (count >> BDRV_SECTOR_BITS > BDRV_REQUEST_MAX_SECTORS) {
printf("length cannot exceed %"PRIu64", given %s\n",
(uint64_t)INT_MAX << BDRV_SECTOR_BITS,
(uint64_t)BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS,
argv[optind]);
return 0;
}

View file

@ -184,5 +184,103 @@ class ThrottleTestCase(iotests.QMPTestCase):
class ThrottleTestCoroutine(ThrottleTestCase):
test_img = "null-co://"
class ThrottleTestGroupNames(iotests.QMPTestCase):
test_img = "null-aio://"
max_drives = 3
def setUp(self):
self.vm = iotests.VM()
for i in range(0, self.max_drives):
self.vm.add_drive(self.test_img, "throttling.iops-total=100")
self.vm.launch()
def tearDown(self):
self.vm.shutdown()
def set_io_throttle(self, device, params):
params["device"] = device
result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
self.assert_qmp(result, 'return', {})
def verify_name(self, device, name):
result = self.vm.qmp("query-block")
for r in result["return"]:
if r["device"] == device:
info = r["inserted"]
if name:
self.assertEqual(info["group"], name)
else:
self.assertFalse(info.has_key('group'))
return
raise Exception("No group information found for '%s'" % device)
def test_group_naming(self):
params = {"bps": 0,
"bps_rd": 0,
"bps_wr": 0,
"iops": 0,
"iops_rd": 0,
"iops_wr": 0}
# Check the drives added using the command line.
# The default throttling group name is the device name.
for i in range(self.max_drives):
devname = "drive%d" % i
self.verify_name(devname, devname)
# Clear throttling settings => the group name is gone.
for i in range(self.max_drives):
devname = "drive%d" % i
self.set_io_throttle(devname, params)
self.verify_name(devname, None)
# Set throttling settings using block_set_io_throttle and
# check the default group names.
params["iops"] = 10
for i in range(self.max_drives):
devname = "drive%d" % i
self.set_io_throttle(devname, params)
self.verify_name(devname, devname)
# Set a custom group name for each device
for i in range(3):
devname = "drive%d" % i
groupname = "group%d" % i
params['group'] = groupname
self.set_io_throttle(devname, params)
self.verify_name(devname, groupname)
# Put drive0 in group1 and check that all other devices remain
# unchanged
params['group'] = 'group1'
self.set_io_throttle('drive0', params)
self.verify_name('drive0', 'group1')
for i in range(1, self.max_drives):
devname = "drive%d" % i
groupname = "group%d" % i
self.verify_name(devname, groupname)
# Put drive0 in group2 and check that all other devices remain
# unchanged
params['group'] = 'group2'
self.set_io_throttle('drive0', params)
self.verify_name('drive0', 'group2')
for i in range(1, self.max_drives):
devname = "drive%d" % i
groupname = "group%d" % i
self.verify_name(devname, groupname)
# Clear throttling settings from drive0 check that all other
# devices remain unchanged
params["iops"] = 0
self.set_io_throttle('drive0', params)
self.verify_name('drive0', None)
for i in range(1, self.max_drives):
devname = "drive%d" % i
groupname = "group%d" % i
self.verify_name(devname, groupname)
if __name__ == '__main__':
iotests.main(supported_fmts=["raw"])

View file

@ -1,5 +1,5 @@
....
.....
----------------------------------------------------------------------
Ran 4 tests
Ran 5 tests
OK

View file

@ -57,7 +57,8 @@ function do_run_qemu()
function run_qemu()
{
do_run_qemu "$@" 2>&1 | _filter_testdir | _filter_qemu | _filter_generated_node_ids
do_run_qemu "$@" 2>&1 | _filter_testdir | _filter_imgfmt \
| _filter_qemu | _filter_generated_node_ids
}

View file

@ -3,20 +3,20 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
=== Setting WCE with qdev and with manually created BB ===
Testing: -drive if=none,file=TEST_DIR/t.qcow2,driver=qcow2,cache=writeback -device virtio-blk,drive=none0
Testing: -drive if=none,file=TEST_DIR/t.IMGFMT,driver=IMGFMT,cache=writeback -device virtio-blk,drive=none0
Cache mode: writeback
Testing: -drive if=none,file=TEST_DIR/t.qcow2,driver=qcow2,cache=writeback -device virtio-blk,drive=none0,write-cache=auto
Testing: -drive if=none,file=TEST_DIR/t.IMGFMT,driver=IMGFMT,cache=writeback -device virtio-blk,drive=none0,write-cache=auto
Cache mode: writeback
Testing: -drive if=none,file=TEST_DIR/t.qcow2,driver=qcow2,cache=writeback -device virtio-blk,drive=none0,write-cache=on
Testing: -drive if=none,file=TEST_DIR/t.IMGFMT,driver=IMGFMT,cache=writeback -device virtio-blk,drive=none0,write-cache=on
Cache mode: writeback
Testing: -drive if=none,file=TEST_DIR/t.qcow2,driver=qcow2,cache=writeback -device virtio-blk,drive=none0,write-cache=off
Testing: -drive if=none,file=TEST_DIR/t.IMGFMT,driver=IMGFMT,cache=writeback -device virtio-blk,drive=none0,write-cache=off
Cache mode: writethrough
Testing: -drive if=none,file=TEST_DIR/t.qcow2,driver=qcow2,cache=writethrough -device virtio-blk,drive=none0
Testing: -drive if=none,file=TEST_DIR/t.IMGFMT,driver=IMGFMT,cache=writethrough -device virtio-blk,drive=none0
Cache mode: writethrough
Testing: -drive if=none,file=TEST_DIR/t.qcow2,driver=qcow2,cache=writethrough -device virtio-blk,drive=none0,write-cache=auto
Testing: -drive if=none,file=TEST_DIR/t.IMGFMT,driver=IMGFMT,cache=writethrough -device virtio-blk,drive=none0,write-cache=auto
Cache mode: writethrough
Testing: -drive if=none,file=TEST_DIR/t.qcow2,driver=qcow2,cache=writethrough -device virtio-blk,drive=none0,write-cache=on
Testing: -drive if=none,file=TEST_DIR/t.IMGFMT,driver=IMGFMT,cache=writethrough -device virtio-blk,drive=none0,write-cache=on
Cache mode: writeback
Testing: -drive if=none,file=TEST_DIR/t.qcow2,driver=qcow2,cache=writethrough -device virtio-blk,drive=none0,write-cache=off
Testing: -drive if=none,file=TEST_DIR/t.IMGFMT,driver=IMGFMT,cache=writethrough -device virtio-blk,drive=none0,write-cache=off
Cache mode: writethrough
*** done