Block patches for 5.2.0-rc1:

- Some nvme fixes (addressing problems spotted by Coverity)
 - Fix nfs compiling on mingw (and enable it in Cirrus)
 - Fix an error path in bdrv_co_invalidate_cache() (permission update
   was initiated, but not aborted)
 - Fix (on-error) roll back in bdrv_drop_intermediate(): Instead of
   inlining bdrv_replace_node() (wrongly), call that function
 - Fix for iotest 240
 - Fix error handling in bdrv_getlength()
 - Be more explicit about how QCowL2Meta objects are handled
 - Cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEkb62CjDbPohX0Rgp9AfbAGHVz0AFAl+pgCISHG1yZWl0ekBy
 ZWRoYXQuY29tAAoJEPQH2wBh1c9An3UIAJgnlgRopgBfaOxwp08fJxOzIllVsqIo
 QLrVxopFhufCL2mOUlP/UNAM/5Lwrti+vt6gWvAP0CqmIXyqnrQCCH1r2VxY5i+X
 qUDbj/3slscVCkGi24W9nfu6waV6dQLpXzKEvNjHaEhTp4H+Kje/TftczzVM7/mQ
 l8raTkzyxC27pHw0vLBCjOl1MoMqSYNxMpqHa+lCHqGIiBG+NthbnXFqdITmSspw
 0vAP1rdmp5pGXThI4Lv7cBkVbjWZSTKi858JzGHR9Uv1PHaU/cVRmhhAyR3gB8VX
 3QHOilEaH+maYwOuhISBRVit75j+lk4E4KZZXSupw+i9D4ttdSluz2o=
 =LV7z
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-11-09-v2' into staging

Block patches for 5.2.0-rc1:
- Some nvme fixes (addressing problems spotted by Coverity)
- Fix nfs compiling on mingw (and enable it in Cirrus)
- Fix an error path in bdrv_co_invalidate_cache() (permission update
  was initiated, but not aborted)
- Fix (on-error) roll back in bdrv_drop_intermediate(): Instead of
  inlining bdrv_replace_node() (wrongly), call that function
- Fix for iotest 240
- Fix error handling in bdrv_getlength()
- Be more explicit about how QCowL2Meta objects are handled
- Cleanups

# gpg: Signature made Mon 09 Nov 2020 17:45:06 GMT
# gpg:                using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40
# gpg:                issuer "mreitz@redhat.com"
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full]
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* remotes/maxreitz/tags/pull-block-2020-11-09-v2:
  block: make bdrv_drop_intermediate() less wrong
  block: add bdrv_replace_node_common()
  block: add forgotten bdrv_abort_perm_update() to bdrv_co_invalidate_cache()
  block: Fix some code style problems, "foo* bar" should be "foo *bar"
  block: Fix integer promotion error in bdrv_getlength()
  block: enable libnfs on msys2/mingw in cirrus.yml
  block: Fixes nfs compiling error on msys2/mingw
  iotests: rewrite iotest 240 in python
  iotests: add filter_qmp_virtio_scsi function
  hw/block/nvme: fix free of array-typed value
  hw/block/nvme: fix uint16_t use of uint32_t sgls member
  hw/block/nvme: fix null ns in register namespace
  qcow2: Document and enforce the QCowL2Meta invariants
  block: Move bdrv_drain_all_end_quiesce() to block_int.h
  block: Remove unused include

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-11-09 19:33:07 +00:00
commit 2b030ce1ed
16 changed files with 243 additions and 222 deletions

View file

@ -109,6 +109,7 @@ windows_msys2_task:
mingw-w64-x86_64-cyrus-sasl \
mingw-w64-x86_64-curl \
mingw-w64-x86_64-gnutls \
mingw-w64-x86_64-libnfs \
"
bitsadmin /transfer msys_download /dynamic /download /priority FOREGROUND `
https://repo.msys2.org/mingw/x86_64/mingw-w64-x86_64-python-sphinx-2.3.1-1-any.pkg.tar.xz `

89
block.c
View file

@ -4563,8 +4563,16 @@ static bool should_update_child(BdrvChild *c, BlockDriverState *to)
return ret;
}
void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
Error **errp)
/*
* With auto_skip=true bdrv_replace_node_common skips updating from parents
* if it creates a parent-child relation loop or if parent is block-job.
*
* With auto_skip=false the error is returned if from has a parent which should
* not be updated.
*/
static void bdrv_replace_node_common(BlockDriverState *from,
BlockDriverState *to,
bool auto_skip, Error **errp)
{
BdrvChild *c, *next;
GSList *list = NULL, *p;
@ -4583,7 +4591,12 @@ void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) {
assert(c->bs == from);
if (!should_update_child(c, to)) {
continue;
if (auto_skip) {
continue;
}
error_setg(errp, "Should not change '%s' link to '%s'",
c->name, from->node_name);
goto out;
}
if (c->frozen) {
error_setg(errp, "Cannot change '%s' link to '%s'",
@ -4623,6 +4636,12 @@ out:
bdrv_unref(from);
}
void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
Error **errp)
{
return bdrv_replace_node_common(from, to, true, errp);
}
/*
* Add new bs contents at the top of an image chain while the chain is
* live, while keeping required fields on the top layer.
@ -4891,9 +4910,11 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
{
BlockDriverState *explicit_top = top;
bool update_inherits_from;
BdrvChild *c, *next;
BdrvChild *c;
Error *local_err = NULL;
int ret = -EIO;
g_autoptr(GSList) updated_children = NULL;
GSList *p;
bdrv_ref(top);
bdrv_subtree_drained_begin(top);
@ -4907,14 +4928,6 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
goto exit;
}
/* This function changes all links that point to top and makes
* them point to base. Check that none of them is frozen. */
QLIST_FOREACH(c, &top->parents, next_parent) {
if (c->frozen) {
goto exit;
}
}
/* If 'base' recursively inherits from 'top' then we should set
* base->inherits_from to top->inherits_from after 'top' and all
* other intermediate nodes have been dropped.
@ -4931,36 +4944,36 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
backing_file_str = base->filename;
}
QLIST_FOREACH_SAFE(c, &top->parents, next_parent, next) {
/* Check whether we are allowed to switch c from top to base */
GSList *ignore_children = g_slist_prepend(NULL, c);
ret = bdrv_check_update_perm(base, NULL, c->perm, c->shared_perm,
ignore_children, NULL, &local_err);
g_slist_free(ignore_children);
if (ret < 0) {
error_report_err(local_err);
goto exit;
}
QLIST_FOREACH(c, &top->parents, next_parent) {
updated_children = g_slist_prepend(updated_children, c);
}
bdrv_replace_node_common(top, base, false, &local_err);
if (local_err) {
error_report_err(local_err);
goto exit;
}
for (p = updated_children; p; p = p->next) {
c = p->data;
/* If so, update the backing file path in the image file */
if (c->klass->update_filename) {
ret = c->klass->update_filename(c, base, backing_file_str,
&local_err);
if (ret < 0) {
bdrv_abort_perm_update(base);
/*
* TODO: Actually, we want to rollback all previous iterations
* of this loop, and (which is almost impossible) previous
* bdrv_replace_node()...
*
* Note, that c->klass->update_filename may lead to permission
* update, so it's a bad idea to call it inside permission
* update transaction of bdrv_replace_node.
*/
error_report_err(local_err);
goto exit;
}
}
/*
* Do the actual switch in the in-memory graph.
* Completes bdrv_check_update_perm() transaction internally.
* c->frozen is false, we have checked that above.
*/
bdrv_ref(base);
bdrv_replace_child(c, base);
bdrv_unref(top);
}
if (update_inherits_from) {
@ -5091,8 +5104,13 @@ int64_t bdrv_getlength(BlockDriverState *bs)
{
int64_t ret = bdrv_nb_sectors(bs);
ret = ret > INT64_MAX / BDRV_SECTOR_SIZE ? -EFBIG : ret;
return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE;
if (ret < 0) {
return ret;
}
if (ret > INT64_MAX / BDRV_SECTOR_SIZE) {
return -EFBIG;
}
return ret * BDRV_SECTOR_SIZE;
}
/* return 0 as number of sectors if no device present or error */
@ -5782,6 +5800,7 @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, NULL, errp);
if (ret < 0) {
bdrv_abort_perm_update(bs);
bs->open_flags |= BDRV_O_INACTIVE;
return ret;
}

View file

@ -173,7 +173,7 @@ static int add_rule(void *opaque, QemuOpts *opts, Error **errp)
{
struct add_rule_data *d = opaque;
BDRVBlkdebugState *s = d->s;
const char* event_name;
const char *event_name;
int event;
struct BlkdebugRule *rule;
int64_t sector;

View file

@ -22,7 +22,6 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "dmg.h"
#include <lzfse.h>

View file

@ -559,7 +559,7 @@ static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
}
static inline int is_sector_in_chunk(BDRVDMGState* s,
static inline int is_sector_in_chunk(BDRVDMGState *s,
uint32_t chunk_num, uint64_t sector_num)
{
if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||

View file

@ -24,7 +24,9 @@
#include "qemu/osdep.h"
#if !defined(_WIN32)
#include <poll.h>
#endif
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
@ -58,7 +60,7 @@ typedef struct NFSClient {
bool has_zero_init;
AioContext *aio_context;
QemuMutex mutex;
blkcnt_t st_blocks;
uint64_t st_blocks;
bool cache_used;
NFSServer *server;
char *path;
@ -545,7 +547,9 @@ static int64_t nfs_client_open(NFSClient *client, BlockdevOptionsNfs *opts,
}
ret = DIV_ROUND_UP(st.st_size, BDRV_SECTOR_SIZE);
#if !defined(_WIN32)
client->st_blocks = st.st_blocks;
#endif
client->has_zero_init = S_ISREG(st.st_mode);
*strp = '/';
goto out;
@ -706,6 +710,7 @@ static int nfs_has_zero_init(BlockDriverState *bs)
return client->has_zero_init;
}
#if !defined(_WIN32)
/* Called (via nfs_service) with QemuMutex held. */
static void
nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
@ -748,6 +753,7 @@ static int64_t nfs_get_allocated_file_size(BlockDriverState *bs)
return (task.ret < 0 ? task.ret : st.st_blocks * 512);
}
#endif
static int coroutine_fn
nfs_file_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
@ -800,7 +806,9 @@ static int nfs_reopen_prepare(BDRVReopenState *state,
nfs_get_error(client->context));
return ret;
}
#if !defined(_WIN32)
client->st_blocks = st.st_blocks;
#endif
}
return 0;
@ -869,7 +877,10 @@ static BlockDriver bdrv_nfs = {
.create_opts = &nfs_create_opts,
.bdrv_has_zero_init = nfs_has_zero_init,
/* libnfs does not provide the allocated filesize of a file on win32. */
#if !defined(_WIN32)
.bdrv_get_allocated_file_size = nfs_get_allocated_file_size,
#endif
.bdrv_co_truncate = nfs_file_co_truncate,
.bdrv_file_open = nfs_file_open,

View file

@ -1049,6 +1049,8 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
assert(l2_index + m->nb_clusters <= s->l2_slice_size);
assert(m->cow_end.offset + m->cow_end.nb_bytes <=
m->nb_clusters << s->cluster_bits);
for (i = 0; i < m->nb_clusters; i++) {
uint64_t offset = cluster_offset + ((uint64_t)i << s->cluster_bits);
/* if two concurrent writes happen to the same unallocated cluster
@ -1070,8 +1072,7 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
if (has_subclusters(s) && !m->prealloc) {
uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i);
unsigned written_from = m->cow_start.offset;
unsigned written_to = m->cow_end.offset + m->cow_end.nb_bytes ?:
m->nb_clusters << s->cluster_bits;
unsigned written_to = m->cow_end.offset + m->cow_end.nb_bytes;
int first_sc, last_sc;
/* Narrow written_from and written_to down to the current cluster */
written_from = MAX(written_from, i << s->cluster_bits);

View file

@ -269,7 +269,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
case QCOW2_EXT_MAGIC_FEATURE_TABLE:
if (p_feature_table != NULL) {
void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
ret = bdrv_pread(bs->file, offset , feature_table, ext.len);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
@ -2361,15 +2361,26 @@ static bool merge_cow(uint64_t offset, unsigned bytes,
continue;
}
/* The data (middle) region must be immediately after the
* start region */
/*
* The write request should start immediately after the first
* COW region. This does not always happen because the area
* touched by the request can be larger than the one defined
* by @m (a single request can span an area consisting of a
* mix of previously unallocated and allocated clusters, that
* is why @l2meta is a list).
*/
if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) {
/* In this case the request starts before this region */
assert(offset < l2meta_cow_start(m));
assert(m->cow_start.nb_bytes == 0);
continue;
}
/* The end region must be immediately after the data (middle)
* region */
/* The write request should end immediately before the second
* COW region (see above for why it does not always happen) */
if (m->offset + m->cow_end.offset != offset + bytes) {
assert(offset + bytes > m->offset + m->cow_end.offset);
assert(m->cow_end.nb_bytes == 0);
continue;
}
@ -3377,7 +3388,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
size_t cluster_size;
int version;
int refcount_order;
uint64_t* refcount_table;
uint64_t *refcount_table;
int ret;
uint8_t compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;

View file

@ -343,8 +343,8 @@ typedef struct BDRVQcow2State {
uint64_t l1_table_offset;
uint64_t *l1_table;
Qcow2Cache* l2_table_cache;
Qcow2Cache* refcount_block_cache;
Qcow2Cache *l2_table_cache;
Qcow2Cache *refcount_block_cache;
QEMUTimer *cache_clean_timer;
unsigned cache_clean_interval;
@ -394,7 +394,7 @@ typedef struct BDRVQcow2State {
uint64_t autoclear_features;
size_t unknown_header_fields_size;
void* unknown_header_fields;
void *unknown_header_fields;
QLIST_HEAD(, Qcow2UnknownHeaderExtension) unknown_header_ext;
QTAILQ_HEAD (, Qcow2DiscardRegion) discards;
bool cache_discards;
@ -435,17 +435,18 @@ typedef struct Qcow2COWRegion {
/**
* Describes an in-flight (part of a) write request that writes to clusters
* that are not referenced in their L2 table yet.
* that need to have their L2 table entries updated (because they are
* newly allocated or need changes in their L2 bitmaps)
*/
typedef struct QCowL2Meta
{
/** Guest offset of the first newly allocated cluster */
/** Guest offset of the first updated cluster */
uint64_t offset;
/** Host offset of the first newly allocated cluster */
/** Host offset of the first updated cluster */
uint64_t alloc_offset;
/** Number of newly allocated clusters */
/** Number of updated clusters */
int nb_clusters;
/** Do not free the old clusters */
@ -458,14 +459,16 @@ typedef struct QCowL2Meta
CoQueue dependent_requests;
/**
* The COW Region between the start of the first allocated cluster and the
* area the guest actually writes to.
* The COW Region immediately before the area the guest actually
* writes to. This (part of the) write request starts at
* cow_start.offset + cow_start.nb_bytes.
*/
Qcow2COWRegion cow_start;
/**
* The COW Region between the area the guest actually writes to and the
* end of the last allocated cluster.
* The COW Region immediately after the area the guest actually
* writes to. This (part of the) write request ends at cow_end.offset
* (which must always be set even when cow_end.nb_bytes is 0).
*/
Qcow2COWRegion cow_end;

View file

@ -172,7 +172,7 @@ static QemuOptsList vpc_runtime_opts = {
static QemuOptsList vpc_create_opts;
static uint32_t vpc_checksum(uint8_t* buf, size_t size)
static uint32_t vpc_checksum(uint8_t *buf, size_t size)
{
uint32_t res = 0;
int i;
@ -528,7 +528,7 @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
*
* Returns 0 on success and < 0 on error
*/
static int rewrite_footer(BlockDriverState* bs)
static int rewrite_footer(BlockDriverState *bs)
{
int ret;
BDRVVPCState *s = bs->opaque;
@ -548,7 +548,7 @@ static int rewrite_footer(BlockDriverState* bs)
*
* Returns the sectors' offset in the image file on success and < 0 on error
*/
static int64_t alloc_block(BlockDriverState* bs, int64_t offset)
static int64_t alloc_block(BlockDriverState *bs, int64_t offset)
{
BDRVVPCState *s = bs->opaque;
int64_t bat_offset;
@ -781,8 +781,8 @@ static int coroutine_fn vpc_co_block_status(BlockDriverState *bs,
* the hardware EIDE and ATA-2 limit of 16 heads (max disk size of 127 GB)
* and instead allow up to 255 heads.
*/
static int calculate_geometry(int64_t total_sectors, uint16_t* cyls,
uint8_t* heads, uint8_t* secs_per_cyl)
static int calculate_geometry(int64_t total_sectors, uint16_t *cyls,
uint8_t *heads, uint8_t *secs_per_cyl)
{
uint32_t cyls_times_heads;

View file

@ -452,7 +452,7 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
* segments and/or descriptors. The controller might accept
* ignoring the rest of the SGL.
*/
uint16_t sgls = le16_to_cpu(n->id_ctrl.sgls);
uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls);
if (sgls & NVME_CTRL_SGLS_EXCESS_LENGTH) {
break;
}
@ -2562,8 +2562,7 @@ int nvme_register_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
if (!nsid) {
for (int i = 1; i <= n->num_namespaces; i++) {
NvmeNamespace *ns = nvme_ns(n, i);
if (!ns) {
if (!nvme_ns(n, i)) {
nsid = ns->params.nsid = i;
break;
}
@ -2800,7 +2799,6 @@ static void nvme_exit(PCIDevice *pci_dev)
NvmeCtrl *n = NVME(pci_dev);
nvme_clear_ctrl(n);
g_free(n->namespaces);
g_free(n->cq);
g_free(n->sq);
g_free(n->aer_reqs);

View file

@ -781,12 +781,6 @@ void bdrv_drained_end(BlockDriverState *bs);
*/
void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter);
/**
* End all quiescent sections started by bdrv_drain_all_begin(). This is
* only needed when deleting a BDS before bdrv_drain_all_end() is called.
*/
void bdrv_drain_all_end_quiesce(BlockDriverState *bs);
/**
* End a quiescent section started by bdrv_subtree_drained_begin().
*/

View file

@ -1407,4 +1407,13 @@ static inline BlockDriverState *bdrv_primary_bs(BlockDriverState *bs)
return child_bs(bdrv_primary_child(bs));
}
/**
* End all quiescent sections started by bdrv_drain_all_begin(). This is
* needed when deleting a BDS before bdrv_drain_all_end() is called.
*
* NOTE: this is an internal helper for bdrv_close() *only*. No one else
* should call it.
*/
void bdrv_drain_all_end_quiesce(BlockDriverState *bs);
#endif /* BLOCK_INT_H */

View file

@ -1,5 +1,5 @@
#!/usr/bin/env bash
#
#!/usr/bin/env python3
# Test hot plugging and unplugging with iothreads
#
# Copyright (C) 2019 Igalia, S.L.
@ -17,133 +17,90 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# creator
owner=berto@igalia.com
import iotests
import os
seq=`basename $0`
echo "QA output created by $seq"
nbd_sock = iotests.file_path('nbd.sock', base_dir=iotests.sock_dir)
status=1 # failure is the default!
class TestCase(iotests.QMPTestCase):
test_driver = "null-co"
_cleanup()
{
rm -f "$SOCK_DIR/nbd"
}
trap "_cleanup; exit \$status" 0 1 2 3 15
def required_drivers(self):
return [self.test_driver]
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
@iotests.skip_if_unsupported(required_drivers)
def setUp(self):
self.vm = iotests.VM()
self.vm.launch()
_supported_fmt generic
_supported_proto generic
def tearDown(self):
self.vm.shutdown()
do_run_qemu()
{
echo Testing: "$@"
$QEMU -nographic -qmp stdio -serial none "$@"
echo
}
def test1(self):
iotests.log('==Unplug a SCSI disk and then plug it again==')
self.vm.qmp_log('blockdev-add', driver='null-co', read_zeroes=True, node_name='hd0')
self.vm.qmp_log('object-add', qom_type='iothread', id="iothread0")
self.vm.qmp_log('device_add', id='scsi0', driver=iotests.get_virtio_scsi_device(), iothread='iothread0', filters=[iotests.filter_qmp_virtio_scsi])
self.vm.qmp_log('device_add', id='scsi-hd0', driver='scsi-hd', drive='hd0')
self.vm.qmp_log('device_del', id='scsi-hd0')
self.vm.event_wait('DEVICE_DELETED')
self.vm.qmp_log('device_add', id='scsi-hd0', driver='scsi-hd', drive='hd0')
self.vm.qmp_log('device_del', id='scsi-hd0')
self.vm.event_wait('DEVICE_DELETED')
self.vm.qmp_log('blockdev-del', node_name='hd0')
# Remove QMP events from (pretty-printed) output. Doesn't handle
# nested dicts correctly, but we don't get any of those in this test.
_filter_qmp_events()
{
tr '\n' '\t' | sed -e \
's/{\s*"timestamp":\s*{[^}]*},\s*"event":[^,}]*\(,\s*"data":\s*{[^}]*}\)\?\s*}\s*//g' \
| tr '\t' '\n'
}
def test2(self):
iotests.log('==Attach two SCSI disks using the same block device and the same iothread==')
self.vm.qmp_log('blockdev-add', driver='null-co', read_zeroes=True, node_name='hd0', read_only=True)
self.vm.qmp_log('object-add', qom_type='iothread', id="iothread0")
self.vm.qmp_log('device_add', id='scsi0', driver=iotests.get_virtio_scsi_device(), iothread='iothread0', filters=[iotests.filter_qmp_virtio_scsi])
run_qemu()
{
do_run_qemu "$@" 2>&1 | _filter_qmp | _filter_qmp_events
}
self.vm.qmp_log('device_add', id='scsi-hd0', driver='scsi-hd', drive='hd0')
self.vm.qmp_log('device_add', id='scsi-hd1', driver='scsi-hd', drive='hd0')
self.vm.qmp_log('device_del', id='scsi-hd0')
self.vm.event_wait('DEVICE_DELETED')
self.vm.qmp_log('device_del', id='scsi-hd1')
self.vm.event_wait('DEVICE_DELETED')
self.vm.qmp_log('blockdev-del', node_name='hd0')
case "$QEMU_DEFAULT_MACHINE" in
s390-ccw-virtio)
virtio_scsi=virtio-scsi-ccw
;;
*)
virtio_scsi=virtio-scsi-pci
;;
esac
def test3(self):
iotests.log('==Attach two SCSI disks using the same block device but different iothreads==')
echo
echo === Unplug a SCSI disk and then plug it again ===
echo
self.vm.qmp_log('blockdev-add', driver='null-co', read_zeroes=True, node_name='hd0', read_only=True)
run_qemu <<EOF
{ "execute": "qmp_capabilities" }
{ "execute": "blockdev-add", "arguments": {"driver": "null-co", "read-zeroes": true, "node-name": "hd0"}}
{ "execute": "object-add", "arguments": {"qom-type": "iothread", "id": "iothread0"}}
{ "execute": "device_add", "arguments": {"id": "scsi0", "driver": "${virtio_scsi}", "iothread": "iothread0"}}
{ "execute": "device_add", "arguments": {"id": "scsi-hd0", "driver": "scsi-hd", "drive": "hd0"}}
{ "execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{ "execute": "device_add", "arguments": {"id": "scsi-hd0", "driver": "scsi-hd", "drive": "hd0"}}
{ "execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{ "execute": "device_del", "arguments": {"id": "scsi0"}}
{ "execute": "blockdev-del", "arguments": {"node-name": "hd0"}}
{ "execute": "quit"}
EOF
self.vm.qmp_log('object-add', qom_type='iothread', id="iothread0")
self.vm.qmp_log('object-add', qom_type='iothread', id="iothread1")
echo
echo === Attach two SCSI disks using the same block device and the same iothread ===
echo
self.vm.qmp_log('device_add', id='scsi0', driver=iotests.get_virtio_scsi_device(), iothread='iothread0', filters=[iotests.filter_qmp_virtio_scsi])
self.vm.qmp_log('device_add', id='scsi1', driver=iotests.get_virtio_scsi_device(), iothread='iothread1', filters=[iotests.filter_qmp_virtio_scsi])
run_qemu <<EOF
{ "execute": "qmp_capabilities" }
{ "execute": "blockdev-add", "arguments": {"driver": "null-co", "read-zeroes": true, "node-name": "hd0", "read-only": true}}
{ "execute": "object-add", "arguments": {"qom-type": "iothread", "id": "iothread0"}}
{ "execute": "device_add", "arguments": {"id": "scsi0", "driver": "${virtio_scsi}", "iothread": "iothread0"}}
{ "execute": "device_add", "arguments": {"id": "scsi-hd0", "driver": "scsi-hd", "drive": "hd0"}}
{ "execute": "device_add", "arguments": {"id": "scsi-hd1", "driver": "scsi-hd", "drive": "hd0"}}
{ "execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{ "execute": "device_del", "arguments": {"id": "scsi-hd1"}}
{ "execute": "device_del", "arguments": {"id": "scsi0"}}
{ "execute": "blockdev-del", "arguments": {"node-name": "hd0"}}
{ "execute": "quit"}
EOF
self.vm.qmp_log('device_add', id='scsi-hd0', driver='scsi-hd', drive='hd0', bus="scsi0.0")
self.vm.qmp_log('device_add', id='scsi-hd1', driver='scsi-hd', drive='hd0', bus="scsi1.0")
echo
echo === Attach two SCSI disks using the same block device but different iothreads ===
echo
self.vm.qmp_log('device_del', id='scsi-hd0')
self.vm.event_wait('DEVICE_DELETED')
self.vm.qmp_log('device_add', id='scsi-hd1', driver='scsi-hd', drive='hd0', bus="scsi1.0")
run_qemu <<EOF
{ "execute": "qmp_capabilities" }
{ "execute": "blockdev-add", "arguments": {"driver": "null-co", "read-zeroes": true, "node-name": "hd0", "read-only": true}}
{ "execute": "object-add", "arguments": {"qom-type": "iothread", "id": "iothread0"}}
{ "execute": "object-add", "arguments": {"qom-type": "iothread", "id": "iothread1"}}
{ "execute": "device_add", "arguments": {"id": "scsi0", "driver": "${virtio_scsi}", "iothread": "iothread0"}}
{ "execute": "device_add", "arguments": {"id": "scsi1", "driver": "${virtio_scsi}", "iothread": "iothread1"}}
{ "execute": "device_add", "arguments": {"id": "scsi-hd0", "driver": "scsi-hd", "drive": "hd0", "bus": "scsi0.0"}}
{ "execute": "device_add", "arguments": {"id": "scsi-hd1", "driver": "scsi-hd", "drive": "hd0", "bus": "scsi1.0"}}
{ "execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{ "execute": "device_add", "arguments": {"id": "scsi-hd1", "driver": "scsi-hd", "drive": "hd0", "bus": "scsi1.0"}}
{ "execute": "device_del", "arguments": {"id": "scsi-hd1"}}
{ "execute": "device_del", "arguments": {"id": "scsi0"}}
{ "execute": "device_del", "arguments": {"id": "scsi1"}}
{ "execute": "blockdev-del", "arguments": {"node-name": "hd0"}}
{ "execute": "quit"}
EOF
self.vm.qmp_log('device_del', id='scsi-hd1')
self.vm.event_wait('DEVICE_DELETED')
self.vm.qmp_log('blockdev-del', node_name='hd0')
echo
echo === Attach a SCSI disks using the same block device as a NBD server ===
echo
def test4(self):
iotests.log('==Attach a SCSI disks using the same block device as a NBD server==')
run_qemu <<EOF
{ "execute": "qmp_capabilities" }
{ "execute": "blockdev-add", "arguments": {"driver": "null-co", "read-zeroes": true, "node-name": "hd0", "read-only": true}}
{ "execute": "nbd-server-start", "arguments": {"addr":{"type":"unix","data":{"path":"$SOCK_DIR/nbd"}}}}
{ "execute": "nbd-server-add", "arguments": {"device":"hd0"}}
{ "execute": "object-add", "arguments": {"qom-type": "iothread", "id": "iothread0"}}
{ "execute": "device_add", "arguments": {"id": "scsi0", "driver": "${virtio_scsi}", "iothread": "iothread0"}}
{ "execute": "device_add", "arguments": {"id": "scsi-hd0", "driver": "scsi-hd", "drive": "hd0", "bus": "scsi0.0"}}
{ "execute": "quit"}
EOF
self.vm.qmp_log('blockdev-add', driver='null-co', read_zeroes=True, node_name='hd0', read_only=True)
# success, all done
echo "*** done"
rm -f $seq.full
status=0
self.vm.qmp_log('nbd-server-start',
filters=[iotests.filter_qmp_testfiles],
addr={'type':'unix', 'data':{'path':nbd_sock}})
self.vm.qmp_log('nbd-server-add', device='hd0')
self.vm.qmp_log('object-add', qom_type='iothread', id="iothread0")
self.vm.qmp_log('device_add', id='scsi0', driver=iotests.get_virtio_scsi_device(), iothread='iothread0', filters=[iotests.filter_qmp_virtio_scsi])
self.vm.qmp_log('device_add', id='scsi-hd0', driver='scsi-hd', drive='hd0')
if __name__ == '__main__':
iotests.activate_logging()
iotests.main()

View file

@ -1,67 +1,75 @@
QA output created by 240
=== Unplug a SCSI disk and then plug it again ===
Testing:
QMP_VERSION
==Unplug a SCSI disk and then plug it again==
{"execute": "blockdev-add", "arguments": {"driver": "null-co", "node-name": "hd0", "read-zeroes": true}}
{"return": {}}
{"execute": "object-add", "arguments": {"id": "iothread0", "qom-type": "iothread"}}
{"return": {}}
{"execute": "device_add", "arguments": {"driver": "virtio-scsi", "id": "scsi0", "iothread": "iothread0"}}
{"return": {}}
{"execute": "device_add", "arguments": {"drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd0"}}
{"return": {}}
{"execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{"return": {}}
{"execute": "device_add", "arguments": {"drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd0"}}
{"return": {}}
{"execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{"return": {}}
{"execute": "blockdev-del", "arguments": {"node-name": "hd0"}}
{"return": {}}
==Attach two SCSI disks using the same block device and the same iothread==
{"execute": "blockdev-add", "arguments": {"driver": "null-co", "node-name": "hd0", "read-only": true, "read-zeroes": true}}
{"return": {}}
{"execute": "object-add", "arguments": {"id": "iothread0", "qom-type": "iothread"}}
{"return": {}}
{"execute": "device_add", "arguments": {"driver": "virtio-scsi", "id": "scsi0", "iothread": "iothread0"}}
{"return": {}}
=== Attach two SCSI disks using the same block device and the same iothread ===
Testing:
QMP_VERSION
{"execute": "device_add", "arguments": {"drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd0"}}
{"return": {}}
{"execute": "device_add", "arguments": {"drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd1"}}
{"return": {}}
{"execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{"return": {}}
{"execute": "device_del", "arguments": {"id": "scsi-hd1"}}
{"return": {}}
{"execute": "blockdev-del", "arguments": {"node-name": "hd0"}}
{"return": {}}
==Attach two SCSI disks using the same block device but different iothreads==
{"execute": "blockdev-add", "arguments": {"driver": "null-co", "node-name": "hd0", "read-only": true, "read-zeroes": true}}
{"return": {}}
{"execute": "object-add", "arguments": {"id": "iothread0", "qom-type": "iothread"}}
{"return": {}}
{"execute": "object-add", "arguments": {"id": "iothread1", "qom-type": "iothread"}}
{"return": {}}
{"execute": "device_add", "arguments": {"driver": "virtio-scsi", "id": "scsi0", "iothread": "iothread0"}}
{"return": {}}
{"execute": "device_add", "arguments": {"driver": "virtio-scsi", "id": "scsi1", "iothread": "iothread1"}}
{"return": {}}
{"execute": "device_add", "arguments": {"bus": "scsi0.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd0"}}
{"return": {}}
=== Attach two SCSI disks using the same block device but different iothreads ===
Testing:
QMP_VERSION
{"return": {}}
{"return": {}}
{"return": {}}
{"return": {}}
{"return": {}}
{"return": {}}
{"return": {}}
{"execute": "device_add", "arguments": {"bus": "scsi1.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd1"}}
{"error": {"class": "GenericError", "desc": "Cannot change iothread of active block backend"}}
{"execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{"return": {}}
{"execute": "device_add", "arguments": {"bus": "scsi1.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd1"}}
{"return": {}}
{"execute": "device_del", "arguments": {"id": "scsi-hd1"}}
{"return": {}}
{"execute": "blockdev-del", "arguments": {"node-name": "hd0"}}
{"return": {}}
==Attach a SCSI disks using the same block device as a NBD server==
{"execute": "blockdev-add", "arguments": {"driver": "null-co", "node-name": "hd0", "read-only": true, "read-zeroes": true}}
{"return": {}}
{"execute": "nbd-server-start", "arguments": {"addr": {"data": {"path": "SOCK_DIR/PID-nbd.sock"}, "type": "unix"}}}
{"return": {}}
{"execute": "nbd-server-add", "arguments": {"device": "hd0"}}
{"return": {}}
{"execute": "object-add", "arguments": {"id": "iothread0", "qom-type": "iothread"}}
{"return": {}}
{"execute": "device_add", "arguments": {"driver": "virtio-scsi", "id": "scsi0", "iothread": "iothread0"}}
{"return": {}}
{"execute": "device_add", "arguments": {"drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd0"}}
{"return": {}}
....
----------------------------------------------------------------------
Ran 4 tests
=== Attach a SCSI disks using the same block device as a NBD server ===
Testing:
QMP_VERSION
{"return": {}}
{"return": {}}
{"return": {}}
{"return": {}}
{"return": {}}
{"return": {}}
{"return": {}}
{"return": {}}
*** done
OK

View file

@ -392,6 +392,16 @@ def filter_qmp_testfiles(qmsg):
return value
return filter_qmp(qmsg, _filter)
def filter_virtio_scsi(output: str) -> str:
return re.sub(r'(virtio-scsi)-(ccw|pci)', r'\1', output)
def filter_qmp_virtio_scsi(qmsg):
def _filter(_key, value):
if is_str(value):
return filter_virtio_scsi(value)
return value
return filter_qmp(qmsg, _filter)
def filter_generated_node_ids(msg):
return re.sub("#block[0-9]+", "NODE_NAME", msg)