slab: allocate frozen pages

Since slab does not use the page refcount, it can allocate and free frozen
pages, saving one atomic operation per free.

Link: https://lkml.kernel.org/r/20241125210149.2976098-16-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-11-25 21:01:47 +00:00 committed by Andrew Morton
parent 642975242e
commit 9aec2fb0fd

View file

@ -2420,9 +2420,9 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
unsigned int order = oo_order(oo); unsigned int order = oo_order(oo);
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
folio = (struct folio *)alloc_pages(flags, order); folio = (struct folio *)alloc_frozen_pages(flags, order);
else else
folio = (struct folio *)__alloc_pages_node(node, flags, order); folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL);
if (!folio) if (!folio)
return NULL; return NULL;
@ -2656,7 +2656,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
__folio_clear_slab(folio); __folio_clear_slab(folio);
mm_account_reclaimed_pages(pages); mm_account_reclaimed_pages(pages);
unaccount_slab(slab, order, s); unaccount_slab(slab, order, s);
__free_pages(&folio->page, order); free_frozen_pages(&folio->page, order);
} }
static void rcu_free_slab(struct rcu_head *h) static void rcu_free_slab(struct rcu_head *h)