2025-04-08 13:53:55 -03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2024, Google LLC.
|
|
|
|
* Pasha Tatashin <pasha.tatashin@soleen.com>
|
|
|
|
*/
|
|
|
|
#include "iommu-pages.h"
|
|
|
|
#include <linux/gfp.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
|
2025-04-08 13:54:01 -03:00
|
|
|
#define IOPTDESC_MATCH(pg_elm, elm) \
|
|
|
|
static_assert(offsetof(struct page, pg_elm) == \
|
|
|
|
offsetof(struct ioptdesc, elm))
|
|
|
|
IOPTDESC_MATCH(flags, __page_flags);
|
|
|
|
IOPTDESC_MATCH(lru, iopt_freelist_elm); /* Ensure bit 0 is clear */
|
|
|
|
IOPTDESC_MATCH(mapping, __page_mapping);
|
|
|
|
IOPTDESC_MATCH(private, _private);
|
|
|
|
IOPTDESC_MATCH(page_type, __page_type);
|
|
|
|
IOPTDESC_MATCH(_refcount, __page_refcount);
|
|
|
|
#ifdef CONFIG_MEMCG
|
|
|
|
IOPTDESC_MATCH(memcg_data, memcg_data);
|
|
|
|
#endif
|
|
|
|
#undef IOPTDESC_MATCH
|
|
|
|
static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
|
|
|
|
|
2025-04-08 13:53:55 -03:00
|
|
|
/**
|
2025-04-08 13:54:03 -03:00
|
|
|
* iommu_alloc_pages_node_sz - Allocate a zeroed page of a given size from
|
|
|
|
* specific NUMA node
|
2025-04-08 13:53:55 -03:00
|
|
|
* @nid: memory NUMA node id
|
|
|
|
* @gfp: buddy allocator flags
|
2025-04-08 13:54:03 -03:00
|
|
|
* @size: Memory size to allocate, rounded up to a power of 2
|
2025-04-08 13:53:55 -03:00
|
|
|
*
|
2025-04-08 13:54:03 -03:00
|
|
|
* Returns the virtual address of the allocated page. The page must be freed
|
|
|
|
* either by calling iommu_free_pages() or via iommu_put_pages_list(). The
|
|
|
|
* returned allocation is round_up_pow_two(size) big, and is physically aligned
|
|
|
|
* to its size.
|
2025-04-08 13:53:55 -03:00
|
|
|
*/
|
2025-04-08 13:54:03 -03:00
|
|
|
void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size)
|
2025-04-08 13:53:55 -03:00
|
|
|
{
|
2025-04-08 13:54:03 -03:00
|
|
|
unsigned long pgcnt;
|
2025-04-08 13:54:01 -03:00
|
|
|
struct folio *folio;
|
2025-04-08 13:54:03 -03:00
|
|
|
unsigned int order;
|
2025-04-08 13:53:55 -03:00
|
|
|
|
2025-04-08 13:54:02 -03:00
|
|
|
/* This uses page_address() on the memory. */
|
|
|
|
if (WARN_ON(gfp & __GFP_HIGHMEM))
|
|
|
|
return NULL;
|
|
|
|
|
2025-04-08 13:54:03 -03:00
|
|
|
/*
|
|
|
|
* Currently sub page allocations result in a full page being returned.
|
|
|
|
*/
|
|
|
|
order = get_order(size);
|
|
|
|
|
2025-04-08 13:54:01 -03:00
|
|
|
/*
|
|
|
|
* __folio_alloc_node() does not handle NUMA_NO_NODE like
|
|
|
|
* alloc_pages_node() did.
|
|
|
|
*/
|
|
|
|
if (nid == NUMA_NO_NODE)
|
|
|
|
nid = numa_mem_id();
|
|
|
|
|
|
|
|
folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid);
|
|
|
|
if (unlikely(!folio))
|
2025-04-08 13:53:55 -03:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All page allocations that should be reported to as "iommu-pagetables"
|
|
|
|
* to userspace must use one of the functions below. This includes
|
|
|
|
* allocations of page-tables and other per-iommu_domain configuration
|
|
|
|
* structures.
|
|
|
|
*
|
|
|
|
* This is necessary for the proper accounting as IOMMU state can be
|
|
|
|
* rather large, i.e. multiple gigabytes in size.
|
|
|
|
*/
|
2025-04-08 13:54:03 -03:00
|
|
|
pgcnt = 1UL << order;
|
2025-04-08 13:54:01 -03:00
|
|
|
mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt);
|
|
|
|
lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt);
|
2025-04-08 13:53:55 -03:00
|
|
|
|
2025-04-08 13:54:01 -03:00
|
|
|
return folio_address(folio);
|
2025-04-08 13:53:55 -03:00
|
|
|
}
|
2025-04-08 13:54:03 -03:00
|
|
|
EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz);
|
2025-04-08 13:53:55 -03:00
|
|
|
|
2025-04-08 13:54:01 -03:00
|
|
|
static void __iommu_free_desc(struct ioptdesc *iopt)
|
2025-04-08 13:53:55 -03:00
|
|
|
{
|
2025-04-08 13:54:01 -03:00
|
|
|
struct folio *folio = ioptdesc_folio(iopt);
|
|
|
|
const unsigned long pgcnt = 1UL << folio_order(folio);
|
2025-04-08 13:53:55 -03:00
|
|
|
|
2025-04-08 13:54:01 -03:00
|
|
|
mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt);
|
|
|
|
lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt);
|
|
|
|
folio_put(folio);
|
2025-04-08 13:53:55 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iommu_free_pages - free pages
|
|
|
|
* @virt: virtual address of the page to be freed.
|
|
|
|
*
|
2025-04-08 13:54:03 -03:00
|
|
|
* The page must have have been allocated by iommu_alloc_pages_node_sz()
|
2025-04-08 13:53:55 -03:00
|
|
|
*/
|
|
|
|
void iommu_free_pages(void *virt)
|
|
|
|
{
|
|
|
|
if (!virt)
|
|
|
|
return;
|
2025-04-08 13:54:01 -03:00
|
|
|
__iommu_free_desc(virt_to_ioptdesc(virt));
|
2025-04-08 13:53:55 -03:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_free_pages);
|
|
|
|
|
|
|
|
/**
|
2025-04-08 13:54:00 -03:00
|
|
|
* iommu_put_pages_list - free a list of pages.
|
2025-04-08 13:53:56 -03:00
|
|
|
* @list: The list of pages to be freed
|
2025-04-08 13:53:55 -03:00
|
|
|
*
|
2025-05-14 09:48:55 -03:00
|
|
|
* Frees a list of pages allocated by iommu_alloc_pages_node_sz(). On return the
|
|
|
|
* passed list is invalid, the caller must use IOMMU_PAGES_LIST_INIT to reinit
|
|
|
|
* the list if it expects to use it again.
|
2025-04-08 13:53:55 -03:00
|
|
|
*/
|
2025-04-08 13:54:00 -03:00
|
|
|
void iommu_put_pages_list(struct iommu_pages_list *list)
|
2025-04-08 13:53:55 -03:00
|
|
|
{
|
2025-04-08 13:54:01 -03:00
|
|
|
struct ioptdesc *iopt, *tmp;
|
2025-04-08 13:53:55 -03:00
|
|
|
|
2025-04-08 13:54:01 -03:00
|
|
|
list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm)
|
|
|
|
__iommu_free_desc(iopt);
|
2025-04-08 13:53:55 -03:00
|
|
|
}
|
2025-04-08 13:54:00 -03:00
|
|
|
EXPORT_SYMBOL_GPL(iommu_put_pages_list);
|