io_uring/zcrx: allocate sgtable for umem areas

Currently, dma addresses for umem areas are stored directly in niovs.
It's memory efficient but inconvenient. I need a better format 1) to
share code with dmabuf areas, and 2) for disentangling page, folio and
niov sizes. dmabuf already provides sg_table, create one for user memory
as well.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: David Wei <dw@davidwei.uk>
Link: https://lore.kernel.org/r/f3c15081827c1bf5427d3a2e693bc526476b87ee.1751466461.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2025-07-02 15:29:07 +01:00 committed by Jens Axboe
parent 54e89a93ef
commit b84621d96e
2 changed files with 29 additions and 52 deletions

View file

@ -161,7 +161,7 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
struct io_uring_zcrx_area_reg *area_reg)
{
struct page **pages;
int nr_pages;
int nr_pages, ret;
if (area_reg->dmabuf_fd)
return -EINVAL;
@ -172,6 +172,12 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
0, nr_pages << PAGE_SHIFT,
GFP_KERNEL_ACCOUNT);
if (ret)
return ret;
mem->pages = pages;
mem->nr_folios = nr_pages;
mem->size = area_reg->len;
@ -186,6 +192,7 @@ static void io_release_area_mem(struct io_zcrx_mem *mem)
}
if (mem->pages) {
unpin_user_pages(mem->pages, mem->nr_folios);
sg_free_table(&mem->page_sg_table);
kvfree(mem->pages);
}
}
@ -207,67 +214,36 @@ static int io_import_area(struct io_zcrx_ifq *ifq,
return io_import_umem(ifq, mem, area_reg);
}
static void io_zcrx_unmap_umem(struct io_zcrx_ifq *ifq,
struct io_zcrx_area *area, int nr_mapped)
static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
struct io_zcrx_area *area)
{
int i;
for (i = 0; i < nr_mapped; i++) {
netmem_ref netmem = net_iov_to_netmem(&area->nia.niovs[i]);
dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem);
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
DMA_FROM_DEVICE, IO_DMA_ATTR);
}
}
static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
struct io_zcrx_area *area, int nr_mapped)
{
int i;
if (area->mem.is_dmabuf)
io_release_dmabuf(&area->mem);
else
io_zcrx_unmap_umem(ifq, area, nr_mapped);
guard(mutex)(&ifq->dma_lock);
if (!area->is_mapped)
return;
area->is_mapped = false;
for (i = 0; i < area->nia.num_niovs; i++)
net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
if (area->mem.is_dmabuf) {
io_release_dmabuf(&area->mem);
} else {
dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
DMA_FROM_DEVICE, IO_DMA_ATTR);
}
}
static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
static unsigned io_zcrx_map_area_umem(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
{
guard(mutex)(&ifq->dma_lock);
int ret;
if (area->is_mapped)
__io_zcrx_unmap_area(ifq, area, area->nia.num_niovs);
area->is_mapped = false;
}
static int io_zcrx_map_area_umem(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
{
int i;
for (i = 0; i < area->nia.num_niovs; i++) {
struct net_iov *niov = &area->nia.niovs[i];
dma_addr_t dma;
dma = dma_map_page_attrs(ifq->dev, area->mem.pages[i], 0,
PAGE_SIZE, DMA_FROM_DEVICE, IO_DMA_ATTR);
if (dma_mapping_error(ifq->dev, dma))
break;
if (net_mp_niov_set_dma_addr(niov, dma)) {
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
DMA_FROM_DEVICE, IO_DMA_ATTR);
break;
}
}
if (i != area->nia.num_niovs) {
__io_zcrx_unmap_area(ifq, area, i);
return -EINVAL;
}
return 0;
ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
DMA_FROM_DEVICE, IO_DMA_ATTR);
if (ret < 0)
return ret;
return io_populate_area_dma(ifq, area, &area->mem.page_sg_table, 0);
}
static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)

View file

@ -14,6 +14,7 @@ struct io_zcrx_mem {
struct page **pages;
unsigned long nr_folios;
struct sg_table page_sg_table;
struct dma_buf_attachment *attach;
struct dma_buf *dmabuf;