Convert IDE to use new dma helpers (Avi Kivity)

Use the new dma block helpers to perform dma disk I/O.

Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6525 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
aliguori 2009-02-05 21:24:02 +00:00
parent 59a703ebaa
commit 1fb8648d4a

View file

@ -33,6 +33,7 @@
#include "ppc_mac.h" #include "ppc_mac.h"
#include "mac_dbdma.h" #include "mac_dbdma.h"
#include "sh.h" #include "sh.h"
#include "dma.h"
/* debug IDE devices */ /* debug IDE devices */
//#define DEBUG_IDE //#define DEBUG_IDE
@ -423,7 +424,7 @@ typedef struct IDEState {
int atapi_dma; /* true if dma is requested for the packet cmd */ int atapi_dma; /* true if dma is requested for the packet cmd */
/* ATA DMA state */ /* ATA DMA state */
int io_buffer_size; int io_buffer_size;
QEMUIOVector iovec; QEMUSGList sg;
/* PIO transfer handling */ /* PIO transfer handling */
int req_nb_sectors; /* number of sectors per interrupt */ int req_nb_sectors; /* number of sectors per interrupt */
EndTransferFunc *end_transfer_func; EndTransferFunc *end_transfer_func;
@ -876,10 +877,8 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
uint32_t size; uint32_t size;
} prd; } prd;
int l, len; int l, len;
void *mem;
target_phys_addr_t l1;
qemu_iovec_init(&s->iovec, s->nsector / (TARGET_PAGE_SIZE/512) + 1); qemu_sglist_init(&s->sg, s->nsector / (TARGET_PAGE_SIZE/512) + 1);
s->io_buffer_size = 0; s->io_buffer_size = 0;
for(;;) { for(;;) {
if (bm->cur_prd_len == 0) { if (bm->cur_prd_len == 0) {
@ -900,15 +899,10 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
} }
l = bm->cur_prd_len; l = bm->cur_prd_len;
if (l > 0) { if (l > 0) {
l1 = l; qemu_sglist_add(&s->sg, bm->cur_prd_addr, l);
mem = cpu_physical_memory_map(bm->cur_prd_addr, &l1, is_write); bm->cur_prd_addr += l;
if (!mem) { bm->cur_prd_len -= l;
break; s->io_buffer_size += l;
}
qemu_iovec_add(&s->iovec, mem, l1);
bm->cur_prd_addr += l1;
bm->cur_prd_len -= l1;
s->io_buffer_size += l1;
} }
} }
return 1; return 1;
@ -916,14 +910,7 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
static void dma_buf_commit(IDEState *s, int is_write) static void dma_buf_commit(IDEState *s, int is_write)
{ {
int i; qemu_sglist_destroy(&s->sg);
for (i = 0; i < s->iovec.niov; ++i) {
cpu_physical_memory_unmap(s->iovec.iov[i].iov_base,
s->iovec.iov[i].iov_len, is_write,
s->iovec.iov[i].iov_len);
}
qemu_iovec_destroy(&s->iovec);
} }
static void ide_dma_error(IDEState *s) static void ide_dma_error(IDEState *s)
@ -1006,39 +993,6 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
return 1; return 1;
} }
typedef struct {
BMDMAState *bm;
void (*cb)(void *opaque, int ret);
QEMUBH *bh;
} MapFailureContinuation;
static void reschedule_dma(void *opaque)
{
MapFailureContinuation *cont = opaque;
cont->cb(cont->bm, 0);
qemu_bh_delete(cont->bh);
qemu_free(cont);
}
static void continue_after_map_failure(void *opaque)
{
MapFailureContinuation *cont = opaque;
cont->bh = qemu_bh_new(reschedule_dma, opaque);
qemu_bh_schedule(cont->bh);
}
static void wait_for_bounce_buffer(BMDMAState *bmdma,
void (*cb)(void *opaque, int ret))
{
MapFailureContinuation *cont = qemu_malloc(sizeof(*cont));
cont->bm = bmdma;
cont->cb = cb;
cpu_register_map_client(cont, continue_after_map_failure);
}
static void ide_read_dma_cb(void *opaque, int ret) static void ide_read_dma_cb(void *opaque, int ret)
{ {
BMDMAState *bm = opaque; BMDMAState *bm = opaque;
@ -1080,15 +1034,10 @@ static void ide_read_dma_cb(void *opaque, int ret)
s->io_buffer_size = n * 512; s->io_buffer_size = n * 512;
if (dma_buf_prepare(bm, 1) == 0) if (dma_buf_prepare(bm, 1) == 0)
goto eot; goto eot;
if (!s->iovec.niov) {
wait_for_bounce_buffer(bm, ide_read_dma_cb);
return;
}
#ifdef DEBUG_AIO #ifdef DEBUG_AIO
printf("aio_read: sector_num=%" PRId64 " n=%d\n", sector_num, n); printf("aio_read: sector_num=%" PRId64 " n=%d\n", sector_num, n);
#endif #endif
bm->aiocb = bdrv_aio_readv(s->bs, sector_num, &s->iovec, n, bm->aiocb = dma_bdrv_read(s->bs, &s->sg, sector_num, ide_read_dma_cb, bm);
ide_read_dma_cb, bm);
ide_dma_submit_check(s, ide_read_dma_cb, bm); ide_dma_submit_check(s, ide_read_dma_cb, bm);
} }
@ -1209,15 +1158,10 @@ static void ide_write_dma_cb(void *opaque, int ret)
/* launch next transfer */ /* launch next transfer */
if (dma_buf_prepare(bm, 0) == 0) if (dma_buf_prepare(bm, 0) == 0)
goto eot; goto eot;
if (!s->iovec.niov) {
wait_for_bounce_buffer(bm, ide_write_dma_cb);
return;
}
#ifdef DEBUG_AIO #ifdef DEBUG_AIO
printf("aio_write: sector_num=%" PRId64 " n=%d\n", sector_num, n); printf("aio_write: sector_num=%" PRId64 " n=%d\n", sector_num, n);
#endif #endif
bm->aiocb = bdrv_aio_writev(s->bs, sector_num, &s->iovec, n, bm->aiocb = dma_bdrv_write(s->bs, &s->sg, sector_num, ide_write_dma_cb, bm);
ide_write_dma_cb, bm);
ide_dma_submit_check(s, ide_write_dma_cb, bm); ide_dma_submit_check(s, ide_write_dma_cb, bm);
} }