xen_disk: use bdrv_aio_flush instead of bdrv_flush

Use bdrv_aio_flush instead of bdrv_flush.

Make sure to call bdrv_aio_writev/readv after the presync bdrv_aio_flush is fully
completed and make sure to call the postsync bdrv_aio_flush after
bdrv_aio_writev/readv is fully completed.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
This commit is contained in:
Stefano Stabellini 2012-04-26 16:35:53 +00:00
parent ba1dffed63
commit c6961b7d38

View file

@ -66,6 +66,7 @@ struct ioreq {
QEMUIOVector v; QEMUIOVector v;
int presync; int presync;
int postsync; int postsync;
uint8_t mapped;
/* grant mapping */ /* grant mapping */
uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@ -242,7 +243,7 @@ static void ioreq_unmap(struct ioreq *ioreq)
XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
int i; int i;
if (ioreq->v.niov == 0) { if (ioreq->v.niov == 0 || ioreq->mapped == 0) {
return; return;
} }
if (batch_maps) { if (batch_maps) {
@ -268,6 +269,7 @@ static void ioreq_unmap(struct ioreq *ioreq)
ioreq->page[i] = NULL; ioreq->page[i] = NULL;
} }
} }
ioreq->mapped = 0;
} }
static int ioreq_map(struct ioreq *ioreq) static int ioreq_map(struct ioreq *ioreq)
@ -275,7 +277,7 @@ static int ioreq_map(struct ioreq *ioreq)
XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
int i; int i;
if (ioreq->v.niov == 0) { if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
return 0; return 0;
} }
if (batch_maps) { if (batch_maps) {
@ -307,9 +309,12 @@ static int ioreq_map(struct ioreq *ioreq)
ioreq->blkdev->cnt_map++; ioreq->blkdev->cnt_map++;
} }
} }
ioreq->mapped = 1;
return 0; return 0;
} }
static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
static void qemu_aio_complete(void *opaque, int ret) static void qemu_aio_complete(void *opaque, int ret)
{ {
struct ioreq *ioreq = opaque; struct ioreq *ioreq = opaque;
@ -321,11 +326,19 @@ static void qemu_aio_complete(void *opaque, int ret)
} }
ioreq->aio_inflight--; ioreq->aio_inflight--;
if (ioreq->presync) {
ioreq->presync = 0;
ioreq_runio_qemu_aio(ioreq);
return;
}
if (ioreq->aio_inflight > 0) { if (ioreq->aio_inflight > 0) {
return; return;
} }
if (ioreq->postsync) { if (ioreq->postsync) {
bdrv_flush(ioreq->blkdev->bs); ioreq->postsync = 0;
ioreq->aio_inflight++;
bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
return;
} }
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
@ -345,7 +358,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
ioreq->aio_inflight++; ioreq->aio_inflight++;
if (ioreq->presync) { if (ioreq->presync) {
bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
return 0;
} }
switch (ioreq->req.operation) { switch (ioreq->req.operation) {