iommu/riscv: Update to use iommu_alloc_pages_node_lg2()

One part of RISCV already has a computed size, however the queue
allocation must be aligned to 4k. The other objects are 4k by spec.

Reviewed-by: Tomasz Jeznach <tjeznach@rivosinc.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/18-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jason Gunthorpe 2025-04-08 13:54:06 -03:00 committed by Joerg Roedel
parent 5faa04c4ed
commit 9dda3f01dd

View file

@ -65,13 +65,14 @@ static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p
return devres->addr == target->addr;
}
static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu,
unsigned int size)
{
struct riscv_iommu_devres *devres;
void *addr;
addr = iommu_alloc_pages_node(dev_to_node(iommu->dev),
GFP_KERNEL_ACCOUNT, order);
addr = iommu_alloc_pages_node_sz(dev_to_node(iommu->dev),
GFP_KERNEL_ACCOUNT, size);
if (unlikely(!addr))
return NULL;
@ -161,9 +162,9 @@ static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu,
} else {
do {
const size_t queue_size = entry_size << (logsz + 1);
const int order = get_order(queue_size);
queue->base = riscv_iommu_get_pages(iommu, order);
queue->base = riscv_iommu_get_pages(
iommu, max(queue_size, SZ_4K));
queue->phys = __pa(queue->base);
} while (!queue->base && logsz-- > 0);
}
@ -618,7 +619,7 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm
break;
}
ptr = riscv_iommu_get_pages(iommu, 0);
ptr = riscv_iommu_get_pages(iommu, SZ_4K);
if (!ptr)
return NULL;
@ -698,7 +699,7 @@ static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu)
}
if (!iommu->ddt_root) {
iommu->ddt_root = riscv_iommu_get_pages(iommu, 0);
iommu->ddt_root = riscv_iommu_get_pages(iommu, SZ_4K);
iommu->ddt_phys = __pa(iommu->ddt_root);
}