consolidate qemu_iovec_copy() and qemu_iovec_concat() and make them consistent

qemu_iovec_concat() is currently a wrapper for
qemu_iovec_copy(), use the former (with extra
"0" arg) in a few places where it is used.

Change skip argument of qemu_iovec_copy() from
uint64_t to size_t, since size of qiov itself
is size_t, so there's no way to skip larger
sizes.  Rename it to soffset, to make it clear
that the offset is applied to src.

Also change the only usage of uint64_t in
hw/9pfs/virtio-9p.c, in v9fs_init_qiov_from_pdu() -
all callers of it actually uses size_t too,
not uint64_t.

One added restriction: as for all other iovec-related
functions, soffset must point inside src.

Order of argumens is already good:
 qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
                   int c, size_t bytes)
vs:
 qemu_iovec_concat(QEMUIOVector *dst,
                   QEMUIOVector *src,
                   size_t soffset, size_t sbytes)
(note soffset is after _src_ not dst, since it applies to src;
for memset it applies to qiov).

Note that in many places where this function is used,
the previous call is qemu_iovec_reset(), which means
many callers actually want copy (replacing dst content),
not concat.  So we may want to add a wrapper like
qemu_iovec_copy() with the same arguments but which
calls qemu_iovec_reset() before _concat().

Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
This commit is contained in:
Michael Tokarev 2012-03-12 21:28:06 +04:00
parent 03396148bc
commit 1b093c480a
6 changed files with 33 additions and 48 deletions

View file

@ -3101,13 +3101,13 @@ static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
// Add the first request to the merged one. If the requests are
// overlapping, drop the last sectors of the first request.
size = (reqs[i].sector - reqs[outidx].sector) << 9;
qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
// We should need to add any zeros between the two requests
assert (reqs[i].sector <= oldreq_last);
// Add the second request
qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
reqs[outidx].nb_sectors = qiov->size >> 9;
reqs[outidx].qiov = qiov;

View file

@ -549,7 +549,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
index_in_cluster = sector_num & (s->cluster_sectors - 1);
qemu_iovec_reset(&hd_qiov);
qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
qemu_iovec_concat(&hd_qiov, qiov, bytes_done,
cur_nr_sectors * 512);
switch (ret) {
@ -720,7 +720,7 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState *bs,
assert((cluster_offset & 511) == 0);
qemu_iovec_reset(&hd_qiov);
qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
qemu_iovec_concat(&hd_qiov, qiov, bytes_done,
cur_nr_sectors * 512);
if (s->crypt_method) {

View file

@ -1131,7 +1131,7 @@ static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
acb->cur_nclusters = qed_bytes_to_clusters(s,
qed_offset_into_cluster(s, acb->cur_pos) + len);
qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
if (acb->flags & QED_AIOCB_ZERO) {
/* Skip ahead if the clusters are already zero */
@ -1177,7 +1177,7 @@ static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
/* Calculate the I/O vector */
acb->cur_cluster = offset;
qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
/* Do the actual write */
qed_aio_write_main(acb, 0);
@ -1247,7 +1247,7 @@ static void qed_aio_read_data(void *opaque, int ret,
goto err;
}
qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
/* Handle zero cluster and backing file reads */
if (ret == QED_CLUSTER_ZERO) {

View file

@ -172,48 +172,34 @@ void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len)
}
/*
* Copies iovecs from src to the end of dst. It starts copying after skipping
* the given number of bytes in src and copies until src is completely copied
* or the total size of the copied iovec reaches size.The size of the last
* copied iovec is changed in order to fit the specified total size if it isn't
* a perfect fit already.
* Concatenates (partial) iovecs from src to the end of dst.
* It starts copying after skipping `soffset' bytes at the
* beginning of src and adds individual vectors from src to
* dst copies up to `sbytes' bytes total, or up to the end
* of src if it comes first. This way, it is okay to specify
* very large value for `sbytes' to indicate "up to the end
* of src".
* Only vector pointers are processed, not the actual data buffers.
*/
void qemu_iovec_copy(QEMUIOVector *dst, QEMUIOVector *src, uint64_t skip,
size_t size)
void qemu_iovec_concat(QEMUIOVector *dst,
QEMUIOVector *src, size_t soffset, size_t sbytes)
{
int i;
size_t done;
void *iov_base;
uint64_t iov_len;
struct iovec *siov = src->iov;
assert(dst->nalloc != -1);
done = 0;
for (i = 0; (i < src->niov) && (done != size); i++) {
if (skip >= src->iov[i].iov_len) {
/* Skip the whole iov */
skip -= src->iov[i].iov_len;
continue;
assert(src->size >= soffset);
for (i = 0, done = 0; done < sbytes && i < src->niov; i++) {
if (soffset < siov[i].iov_len) {
size_t len = MIN(siov[i].iov_len - soffset, sbytes - done);
qemu_iovec_add(dst, siov[i].iov_base + soffset, len);
done += len;
soffset = 0;
} else {
/* Skip only part (or nothing) of the iov */
iov_base = (uint8_t*) src->iov[i].iov_base + skip;
iov_len = src->iov[i].iov_len - skip;
skip = 0;
soffset -= siov[i].iov_len;
}
if (done + iov_len > size) {
qemu_iovec_add(dst, iov_base, size - done);
break;
} else {
qemu_iovec_add(dst, iov_base, iov_len);
}
done += iov_len;
}
}
void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size)
{
qemu_iovec_copy(dst, src, 0, size);
/* return done; */
}
void qemu_iovec_destroy(QEMUIOVector *qiov)

View file

@ -1648,7 +1648,7 @@ out:
* with qemu_iovec_destroy().
*/
static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
uint64_t skip, size_t size,
size_t skip, size_t size,
bool is_write)
{
QEMUIOVector elem;
@ -1665,7 +1665,7 @@ static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
qemu_iovec_init_external(&elem, iov, niov);
qemu_iovec_init(qiov, niov);
qemu_iovec_copy(qiov, &elem, skip, size);
qemu_iovec_concat(qiov, &elem, skip, size);
}
static void v9fs_read(void *opaque)
@ -1715,7 +1715,7 @@ static void v9fs_read(void *opaque)
qemu_iovec_init(&qiov, qiov_full.niov);
do {
qemu_iovec_reset(&qiov);
qemu_iovec_copy(&qiov, &qiov_full, count, qiov_full.size - count);
qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
if (0) {
print_sg(qiov.iov, qiov.niov);
}
@ -1970,7 +1970,7 @@ static void v9fs_write(void *opaque)
qemu_iovec_init(&qiov, qiov_full.niov);
do {
qemu_iovec_reset(&qiov);
qemu_iovec_copy(&qiov, &qiov_full, total, qiov_full.size - total);
qemu_iovec_concat(&qiov, &qiov_full, total, qiov_full.size - total);
if (0) {
print_sg(qiov.iov, qiov.niov);
}

View file

@ -340,9 +340,8 @@ typedef struct QEMUIOVector {
void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
void qemu_iovec_copy(QEMUIOVector *dst, QEMUIOVector *src, uint64_t skip,
size_t size);
void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size);
void qemu_iovec_concat(QEMUIOVector *dst,
QEMUIOVector *src, size_t soffset, size_t sbytes);
void qemu_iovec_destroy(QEMUIOVector *qiov);
void qemu_iovec_reset(QEMUIOVector *qiov);
void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf);