dma: Let dma_memory_map() take MemTxAttrs argument

Let devices specify transaction attributes when calling
dma_memory_map().

Patch created mechanically using spatch with this script:

  @@
  expression E1, E2, E3, E4;
  @@
  - dma_memory_map(E1, E2, E3, E4)
  + dma_memory_map(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Li Qiang <liq3ea@gmail.com>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20211223115554.3155328-7-philmd@redhat.com>
staging
Philippe Mathieu-Daudé 2020-09-03 11:00:47 +02:00
parent ba06fe8add
commit a1d4b0a305
8 changed files with 29 additions and 17 deletions

View File

@ -814,8 +814,9 @@ int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
do {
len = l;
map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
a, &len, DMA_DIRECTION_TO_DEVICE);
map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
DMA_DIRECTION_TO_DEVICE,
MEMTXATTRS_UNSPECIFIED);
if (!map) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
" element %d\n", __func__, e);
@ -1252,8 +1253,9 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
for (i = 0; i < res->iov_cnt; i++) {
hwaddr len = res->iov[i].iov_len;
res->iov[i].iov_base =
dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
DMA_DIRECTION_TO_DEVICE,
MEMTXATTRS_UNSPECIFIED);
if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
/* Clean up the half-a-mapping we just created... */

View File

@ -373,7 +373,8 @@ static ssize_t gpadl_iter_io(GpadlIter *iter, void *buf, uint32_t len)
maddr = (iter->gpadl->gfns[idx] << TARGET_PAGE_BITS) | off_in_page;
iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir);
iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir,
MEMTXATTRS_UNSPECIFIED);
if (mlen != pgleft) {
dma_memory_unmap(iter->as, iter->map, mlen, iter->dir, 0);
iter->map = NULL;
@ -490,7 +491,8 @@ int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
goto err;
}
iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir);
iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir,
MEMTXATTRS_UNSPECIFIED);
if (!l) {
ret = -EFAULT;
goto err;
@ -566,7 +568,7 @@ static vmbus_ring_buffer *ringbuf_map_hdr(VMBusRingBufCommon *ringbuf)
dma_addr_t mlen = sizeof(*rb);
rb = dma_memory_map(ringbuf->as, ringbuf->rb_addr, &mlen,
DMA_DIRECTION_FROM_DEVICE);
DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED);
if (mlen != sizeof(*rb)) {
dma_memory_unmap(ringbuf->as, rb, mlen,
DMA_DIRECTION_FROM_DEVICE, 0);

View File

@ -249,7 +249,8 @@ static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr,
dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
}
*ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE);
*ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE,
MEMTXATTRS_UNSPECIFIED);
if (len < wanted && *ptr) {
dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
*ptr = NULL;
@ -939,7 +940,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
/* map PRDT */
if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len,
DMA_DIRECTION_TO_DEVICE))){
DMA_DIRECTION_TO_DEVICE,
MEMTXATTRS_UNSPECIFIED))){
trace_ahci_populate_sglist_no_map(ad->hba, ad->port_no);
return -1;
}
@ -1301,7 +1303,7 @@ static int handle_cmd(AHCIState *s, int port, uint8_t slot)
tbl_addr = le64_to_cpu(cmd->tbl_addr);
cmd_len = 0x80;
cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len,
DMA_DIRECTION_TO_DEVICE);
DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
if (!cmd_fis) {
trace_handle_cmd_badfis(s, port);
return -1;

View File

@ -36,7 +36,8 @@ int usb_packet_map(USBPacket *p, QEMUSGList *sgl)
while (len) {
dma_addr_t xlen = len;
mem = dma_memory_map(sgl->as, base, &xlen, dir);
mem = dma_memory_map(sgl->as, base, &xlen, dir,
MEMTXATTRS_UNSPECIFIED);
if (!mem) {
goto err;
}

View File

@ -1306,7 +1306,8 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
is_write ?
DMA_DIRECTION_FROM_DEVICE :
DMA_DIRECTION_TO_DEVICE);
DMA_DIRECTION_TO_DEVICE,
MEMTXATTRS_UNSPECIFIED);
if (!iov[num_sg].iov_base) {
virtio_error(vdev, "virtio: bogus descriptor or out of resources");
goto out;
@ -1355,7 +1356,8 @@ static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
sg[i].iov_base = dma_memory_map(vdev->dma_as,
addr[i], &len, is_write ?
DMA_DIRECTION_FROM_DEVICE :
DMA_DIRECTION_TO_DEVICE);
DMA_DIRECTION_TO_DEVICE,
MEMTXATTRS_UNSPECIFIED);
if (!sg[i].iov_base) {
error_report("virtio: error trying to map MMIO memory");
exit(1);

View File

@ -875,7 +875,8 @@ static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
{
void *buf;
buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir);
buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir,
MEMTXATTRS_UNSPECIFIED);
return buf;
}

View File

@ -202,16 +202,17 @@ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
* @addr: address within that address space
* @len: pointer to length of buffer; updated on return
* @dir: indicates the transfer direction
* @attrs: memory attributes
*/
static inline void *dma_memory_map(AddressSpace *as,
dma_addr_t addr, dma_addr_t *len,
DMADirection dir)
DMADirection dir, MemTxAttrs attrs)
{
hwaddr xlen = *len;
void *p;
p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
MEMTXATTRS_UNSPECIFIED);
attrs);
*len = xlen;
return p;
}

View File

@ -143,7 +143,8 @@ static void dma_blk_cb(void *opaque, int ret)
while (dbs->sg_cur_index < dbs->sg->nsg) {
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir,
MEMTXATTRS_UNSPECIFIED);
/*
* Make reads deterministic in icount mode. Windows sometimes issues
* disk read requests with overlapping SGs. It leads