Xen queue

xen-block fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQFOBAABCgA4FiEE+AwAYwjiLP2KkueYDPVXL9f7Va8FAlymOSYaHGFudGhvbnku
 cGVyYXJkQGNpdHJpeC5jb20ACgkQDPVXL9f7Va931Qf+Mo2DsmZ+SjUYMaUJdtEz
 Z8BAZfJyeYarWYb1JwJA2d2ammrIiXBIa9iqmJAbw9z5EyTmrVLJP36M3Do7HMqz
 TlMnQdSzz/YcLV+/72BGoxkwmYxX/KqSk2WnKleg7K20gmp3S31uxjSPKp40zGVj
 zj9NYmnvAjwtsG2Twp2xUcEz//vm4u4bra+M5sYBBWNw0mWnqkT0yCLOxEM9ukGt
 zVyqEdO0N9pLm3flxPJAoEP+aU7GHWSmFq+ofqQ6R4B15v9c7YzV2ckKC75v0Bml
 3MsJwmdJQmITYWykitjrQdCXWzUoKc4aroFaR0h7TLGbkFBsh+FdF0f6oWChS9DW
 3Q==
 =JA/8
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/aperard/tags/pull-xen-20190404' into staging

Xen queue

xen-block fixes

# gpg: Signature made Thu 04 Apr 2019 18:04:38 BST
# gpg:                using RSA key F80C006308E22CFD8A92E7980CF5572FD7FB55AF
# gpg:                issuer "anthony.perard@citrix.com"
# gpg: Good signature from "Anthony PERARD <anthony.perard@gmail.com>" [marginal]
# gpg:                 aka "Anthony PERARD <anthony.perard@citrix.com>" [marginal]
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg:          It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 5379 2F71 024C 600F 778A  7161 D8D5 7199 DF83 42C8
#      Subkey fingerprint: F80C 0063 08E2 2CFD 8A92  E798 0CF5 572F D7FB 55AF

* remotes/aperard/tags/pull-xen-20190404:
  xen-block: scale sector based quantities correctly
  xen-block: only advertize discard to the frontend when it is enabled...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-04-05 03:52:05 +01:00
commit bc939abe00
3 changed files with 32 additions and 20 deletions

View file

@ -49,7 +49,6 @@ struct XenBlockDataPlane {
unsigned int *ring_ref;
unsigned int nr_ring_ref;
void *sring;
int64_t file_blk;
int protocol;
blkif_back_rings_t rings;
int more_work;
@ -168,7 +167,7 @@ static int xen_block_parse_request(XenBlockRequest *request)
goto err;
}
request->start = request->req.sector_number * dataplane->file_blk;
request->start = request->req.sector_number * XEN_BLKIF_SECTOR_SIZE;
for (i = 0; i < request->req.nr_segments; i++) {
if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
error_report("error: nr_segments too big");
@ -178,14 +177,14 @@ static int xen_block_parse_request(XenBlockRequest *request)
error_report("error: first > last sector");
goto err;
}
if (request->req.seg[i].last_sect * dataplane->file_blk >=
if (request->req.seg[i].last_sect * XEN_BLKIF_SECTOR_SIZE >=
XC_PAGE_SIZE) {
error_report("error: page crossing");
goto err;
}
len = (request->req.seg[i].last_sect -
request->req.seg[i].first_sect + 1) * dataplane->file_blk;
request->req.seg[i].first_sect + 1) * XEN_BLKIF_SECTOR_SIZE;
request->size += len;
}
if (request->start + request->size > blk_getlength(dataplane->blk)) {
@ -205,7 +204,6 @@ static int xen_block_copy_request(XenBlockRequest *request)
XenDevice *xendev = dataplane->xendev;
XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int i, count;
int64_t file_blk = dataplane->file_blk;
bool to_domain = (request->req.operation == BLKIF_OP_READ);
void *virt = request->buf;
Error *local_err = NULL;
@ -220,16 +218,17 @@ static int xen_block_copy_request(XenBlockRequest *request)
if (to_domain) {
segs[i].dest.foreign.ref = request->req.seg[i].gref;
segs[i].dest.foreign.offset = request->req.seg[i].first_sect *
file_blk;
XEN_BLKIF_SECTOR_SIZE;
segs[i].source.virt = virt;
} else {
segs[i].source.foreign.ref = request->req.seg[i].gref;
segs[i].source.foreign.offset = request->req.seg[i].first_sect *
file_blk;
XEN_BLKIF_SECTOR_SIZE;
segs[i].dest.virt = virt;
}
segs[i].len = (request->req.seg[i].last_sect -
request->req.seg[i].first_sect + 1) * file_blk;
request->req.seg[i].first_sect + 1) *
XEN_BLKIF_SECTOR_SIZE;
virt += segs[i].len;
}
@ -331,22 +330,22 @@ static bool xen_block_split_discard(XenBlockRequest *request,
XenBlockDataPlane *dataplane = request->dataplane;
int64_t byte_offset;
int byte_chunk;
uint64_t byte_remaining, limit;
uint64_t byte_remaining;
uint64_t sec_start = sector_number;
uint64_t sec_count = nr_sectors;
/* Wrap around, or overflowing byte limit? */
if (sec_start + sec_count < sec_count ||
sec_start + sec_count > INT64_MAX / dataplane->file_blk) {
sec_start + sec_count > INT64_MAX / XEN_BLKIF_SECTOR_SIZE) {
return false;
}
limit = BDRV_REQUEST_MAX_SECTORS * dataplane->file_blk;
byte_offset = sec_start * dataplane->file_blk;
byte_remaining = sec_count * dataplane->file_blk;
byte_offset = sec_start * XEN_BLKIF_SECTOR_SIZE;
byte_remaining = sec_count * XEN_BLKIF_SECTOR_SIZE;
do {
byte_chunk = byte_remaining > limit ? limit : byte_remaining;
byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ?
BDRV_REQUEST_MAX_BYTES : byte_remaining;
request->aio_inflight++;
blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk,
xen_block_complete_aio, request);
@ -632,7 +631,6 @@ XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1);
dataplane->xendev = xendev;
dataplane->file_blk = conf->logical_block_size;
dataplane->blk = conf->blk;
QLIST_INIT(&dataplane->inflight);

View file

@ -149,7 +149,7 @@ static void xen_block_set_size(XenBlockDevice *blockdev)
const char *type = object_get_typename(OBJECT(blockdev));
XenBlockVdev *vdev = &blockdev->props.vdev;
BlockConf *conf = &blockdev->props.conf;
int64_t sectors = blk_getlength(conf->blk) / conf->logical_block_size;
int64_t sectors = blk_getlength(conf->blk) / XEN_BLKIF_SECTOR_SIZE;
XenDevice *xendev = XEN_DEVICE(blockdev);
trace_xen_block_size(type, vdev->disk, vdev->partition, sectors);
@ -223,6 +223,12 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)
blkconf_blocksizes(conf);
if (conf->logical_block_size != XEN_BLKIF_SECTOR_SIZE) {
error_setg(errp, "logical_block_size != %u not supported",
XEN_BLKIF_SECTOR_SIZE);
return;
}
if (conf->logical_block_size > conf->physical_block_size) {
error_setg(
errp, "logical_block_size > physical_block_size not supported");
@ -232,8 +238,14 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)
blk_set_dev_ops(conf->blk, &xen_block_dev_ops, blockdev);
blk_set_guest_block_size(conf->blk, conf->logical_block_size);
if (conf->discard_granularity > 0) {
if (conf->discard_granularity == -1) {
conf->discard_granularity = conf->physical_block_size;
}
if (blk_get_flags(conf->blk) & BDRV_O_UNMAP) {
xen_device_backend_printf(xendev, "feature-discard", "%u", 1);
xen_device_backend_printf(xendev, "discard-granularity", "%u",
conf->discard_granularity);
}
xen_device_backend_printf(xendev, "feature-flush-cache", "%u", 1);
@ -247,7 +259,7 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)
blockdev->device_type);
xen_device_backend_printf(xendev, "sector-size", "%u",
conf->logical_block_size);
XEN_BLKIF_SECTOR_SIZE);
xen_block_set_size(blockdev);
@ -755,6 +767,7 @@ static XenBlockDrive *xen_block_drive_create(const char *id,
drive->id = g_strdup(id);
file_layer = qdict_new();
driver_layer = qdict_new();
qdict_put_str(file_layer, "driver", "file");
qdict_put_str(file_layer, "filename", filename);
@ -782,6 +795,7 @@ static XenBlockDrive *xen_block_drive_create(const char *id,
if (!qemu_strtoul(discard_enable, NULL, 2, &value) && !!value) {
qdict_put_str(file_layer, "discard", "unmap");
qdict_put_str(driver_layer, "discard", "unmap");
}
}
@ -791,8 +805,6 @@ static XenBlockDrive *xen_block_drive_create(const char *id,
*/
qdict_put_str(file_layer, "locking", "off");
driver_layer = qdict_new();
qdict_put_str(driver_layer, "driver", driver);
g_free(driver);

View file

@ -143,4 +143,6 @@ static inline void blkif_get_x86_64_req(blkif_request_t *dst,
}
}
#define XEN_BLKIF_SECTOR_SIZE 512
#endif /* XEN_BLKIF_H */