mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
iommu: Update various drivers to pass in lg2sz instead of order to iommu pages
Convert most of the places calling get_order() as an argument to the iommu-pages allocator into order_base_2() or the _sz flavour instead. These places already have an exact size, there is no particular reason to use order here. Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/19-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
9dda3f01dd
commit
d50aaa4a9f
5 changed files with 32 additions and 31 deletions
|
@ -626,8 +626,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
|
|||
/* Allocate per PCI segment device table */
|
||||
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
|
||||
pci_seg->dev_table_size);
|
||||
if (!pci_seg->dev_table)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -707,8 +707,7 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
|
|||
*/
|
||||
static int __init alloc_command_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
|
||||
get_order(CMD_BUFFER_SIZE));
|
||||
iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
|
||||
|
||||
return iommu->cmd_buf ? 0 : -ENOMEM;
|
||||
}
|
||||
|
@ -811,14 +810,16 @@ static void __init free_command_buffer(struct amd_iommu *iommu)
|
|||
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
|
||||
size_t size)
|
||||
{
|
||||
int order = get_order(size);
|
||||
void *buf = iommu_alloc_pages(gfp, order);
|
||||
void *buf;
|
||||
|
||||
if (buf &&
|
||||
check_feature(FEATURE_SNP) &&
|
||||
set_memory_4k((unsigned long)buf, (1 << order))) {
|
||||
size = PAGE_ALIGN(size);
|
||||
buf = iommu_alloc_pages_sz(gfp, size);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
if (check_feature(FEATURE_SNP) &&
|
||||
set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
|
||||
iommu_free_pages(buf);
|
||||
buf = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return buf;
|
||||
|
@ -913,11 +914,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
|
|||
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
|
||||
return 0;
|
||||
|
||||
iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
|
||||
iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE);
|
||||
if (!iommu->ga_log)
|
||||
goto err_out;
|
||||
|
||||
iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
|
||||
iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8);
|
||||
if (!iommu->ga_log_tail)
|
||||
goto err_out;
|
||||
|
||||
|
@ -1012,8 +1013,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
|
|||
if (!old_devtb)
|
||||
return false;
|
||||
|
||||
pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
|
||||
GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
|
||||
if (pci_seg->old_dev_tbl_cpy == NULL) {
|
||||
pr_err("Failed to allocate memory for copying old device table!\n");
|
||||
memunmap(old_devtb);
|
||||
|
|
|
@ -1681,7 +1681,6 @@ int dmar_enable_qi(struct intel_iommu *iommu)
|
|||
{
|
||||
struct q_inval *qi;
|
||||
void *desc;
|
||||
int order;
|
||||
|
||||
if (!ecap_qis(iommu->ecap))
|
||||
return -ENOENT;
|
||||
|
@ -1702,8 +1701,9 @@ int dmar_enable_qi(struct intel_iommu *iommu)
|
|||
* Need two pages to accommodate 256 descriptors of 256 bits each
|
||||
* if the remapping hardware supports scalable mode translation.
|
||||
*/
|
||||
order = ecap_smts(iommu->ecap) ? 1 : 0;
|
||||
desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
|
||||
desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
|
||||
ecap_smts(iommu->ecap) ? SZ_8K :
|
||||
SZ_4K);
|
||||
if (!desc) {
|
||||
kfree(qi);
|
||||
iommu->qi = NULL;
|
||||
|
|
|
@ -263,14 +263,20 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
|||
void *cookie)
|
||||
{
|
||||
struct device *dev = cfg->iommu_dev;
|
||||
int order = get_order(size);
|
||||
size_t alloc_size;
|
||||
dma_addr_t dma;
|
||||
void *pages;
|
||||
|
||||
/*
|
||||
* For very small starting-level translation tables the HW requires a
|
||||
* minimum alignment of at least 64 to cover all cases.
|
||||
*/
|
||||
alloc_size = max(size, 64);
|
||||
if (cfg->alloc)
|
||||
pages = cfg->alloc(cookie, size, gfp);
|
||||
pages = cfg->alloc(cookie, alloc_size, gfp);
|
||||
else
|
||||
pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
|
||||
pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp,
|
||||
alloc_size);
|
||||
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
|
|
@ -107,13 +107,6 @@ static phys_addr_t iopte_to_paddr(dart_iopte pte,
|
|||
return paddr;
|
||||
}
|
||||
|
||||
static void *__dart_alloc_pages(size_t size, gfp_t gfp)
|
||||
{
|
||||
int order = get_order(size);
|
||||
|
||||
return iommu_alloc_pages(gfp, order);
|
||||
}
|
||||
|
||||
static int dart_init_pte(struct dart_io_pgtable *data,
|
||||
unsigned long iova, phys_addr_t paddr,
|
||||
dart_iopte prot, int num_entries,
|
||||
|
@ -255,7 +248,7 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
|||
|
||||
/* no L2 table present */
|
||||
if (!pte) {
|
||||
cptep = __dart_alloc_pages(tblsz, gfp);
|
||||
cptep = iommu_alloc_pages_sz(gfp, tblsz);
|
||||
if (!cptep)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -412,7 +405,8 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
|||
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
|
||||
|
||||
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
|
||||
data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
|
||||
data->pgd[i] =
|
||||
iommu_alloc_pages_sz(GFP_KERNEL, DART_GRANULE(data));
|
||||
if (!data->pgd[i])
|
||||
goto out_free_data;
|
||||
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
|
||||
|
|
|
@ -690,8 +690,8 @@ sun50i_iommu_domain_alloc_paging(struct device *dev)
|
|||
if (!sun50i_domain)
|
||||
return NULL;
|
||||
|
||||
sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
|
||||
get_order(DT_SIZE));
|
||||
sun50i_domain->dt =
|
||||
iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, DT_SIZE);
|
||||
if (!sun50i_domain->dt)
|
||||
goto err_free_domain;
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue