mm/page_alloc: export free_frozen_pages() instead of free_unref_page()

We already have the concept of "frozen pages" (eg page_ref_freeze()), so
let's not complicate things by also having the concept of "unref pages".

Link: https://lkml.kernel.org/r/20241125210149.2976098-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-11-25 21:01:35 +00:00 committed by Andrew Morton
parent 38558b2460
commit 520128a1d1
4 changed files with 14 additions and 14 deletions

View file

@ -741,7 +741,7 @@ extern bool free_pages_prepare(struct page *page, unsigned int order);
extern int user_min_free_kbytes; extern int user_min_free_kbytes;
void free_unref_page(struct page *page, unsigned int order); void free_frozen_pages(struct page *page, unsigned int order);
void free_unref_folios(struct folio_batch *fbatch); void free_unref_folios(struct folio_batch *fbatch);
extern void zone_pcp_reset(struct zone *zone); extern void zone_pcp_reset(struct zone *zone);

View file

@ -2592,9 +2592,9 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
return high; return high;
} }
static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, static void free_frozen_page_commit(struct zone *zone,
struct page *page, int migratetype, struct per_cpu_pages *pcp, struct page *page, int migratetype,
unsigned int order) unsigned int order)
{ {
int high, batch; int high, batch;
int pindex; int pindex;
@ -2643,7 +2643,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
/* /*
* Free a pcp page * Free a pcp page
*/ */
void free_unref_page(struct page *page, unsigned int order) void free_frozen_pages(struct page *page, unsigned int order)
{ {
unsigned long __maybe_unused UP_flags; unsigned long __maybe_unused UP_flags;
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
@ -2679,7 +2679,7 @@ void free_unref_page(struct page *page, unsigned int order)
pcp_trylock_prepare(UP_flags); pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset); pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (pcp) { if (pcp) {
free_unref_page_commit(zone, pcp, page, migratetype, order); free_frozen_page_commit(zone, pcp, page, migratetype, order);
pcp_spin_unlock(pcp); pcp_spin_unlock(pcp);
} else { } else {
free_one_page(zone, page, pfn, order, FPI_NONE); free_one_page(zone, page, pfn, order, FPI_NONE);
@ -2743,7 +2743,7 @@ void free_unref_folios(struct folio_batch *folios)
/* /*
* Free isolated pages directly to the * Free isolated pages directly to the
* allocator, see comment in free_unref_page. * allocator, see comment in free_frozen_pages.
*/ */
if (is_migrate_isolate(migratetype)) { if (is_migrate_isolate(migratetype)) {
free_one_page(zone, &folio->page, pfn, free_one_page(zone, &folio->page, pfn,
@ -2774,7 +2774,7 @@ void free_unref_folios(struct folio_batch *folios)
migratetype = MIGRATE_MOVABLE; migratetype = MIGRATE_MOVABLE;
trace_mm_page_free_batched(&folio->page); trace_mm_page_free_batched(&folio->page);
free_unref_page_commit(zone, pcp, &folio->page, migratetype, free_frozen_page_commit(zone, pcp, &folio->page, migratetype,
order); order);
} }
@ -4837,11 +4837,11 @@ void __free_pages(struct page *page, unsigned int order)
struct alloc_tag *tag = pgalloc_tag_get(page); struct alloc_tag *tag = pgalloc_tag_get(page);
if (put_page_testzero(page)) if (put_page_testzero(page))
free_unref_page(page, order); free_frozen_pages(page, order);
else if (!head) { else if (!head) {
pgalloc_tag_sub_pages(tag, (1 << order) - 1); pgalloc_tag_sub_pages(tag, (1 << order) - 1);
while (order-- > 0) while (order-- > 0)
free_unref_page(page + (1 << order), order); free_frozen_pages(page + (1 << order), order);
} }
} }
EXPORT_SYMBOL(__free_pages); EXPORT_SYMBOL(__free_pages);

View file

@ -86,7 +86,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
if (page_ref_sub_and_test(page, count)) if (page_ref_sub_and_test(page, count))
free_unref_page(page, compound_order(page)); free_frozen_pages(page, compound_order(page));
} }
EXPORT_SYMBOL(__page_frag_cache_drain); EXPORT_SYMBOL(__page_frag_cache_drain);
@ -138,7 +138,7 @@ refill:
goto refill; goto refill;
if (unlikely(encoded_page_decode_pfmemalloc(encoded_page))) { if (unlikely(encoded_page_decode_pfmemalloc(encoded_page))) {
free_unref_page(page, free_frozen_pages(page,
encoded_page_decode_order(encoded_page)); encoded_page_decode_order(encoded_page));
goto refill; goto refill;
} }
@ -166,6 +166,6 @@ void page_frag_free(void *addr)
struct page *page = virt_to_head_page(addr); struct page *page = virt_to_head_page(addr);
if (unlikely(put_page_testzero(page))) if (unlikely(put_page_testzero(page)))
free_unref_page(page, compound_order(page)); free_frozen_pages(page, compound_order(page));
} }
EXPORT_SYMBOL(page_frag_free); EXPORT_SYMBOL(page_frag_free);

View file

@ -109,7 +109,7 @@ void __folio_put(struct folio *folio)
page_cache_release(folio); page_cache_release(folio);
folio_unqueue_deferred_split(folio); folio_unqueue_deferred_split(folio);
mem_cgroup_uncharge(folio); mem_cgroup_uncharge(folio);
free_unref_page(&folio->page, folio_order(folio)); free_frozen_pages(&folio->page, folio_order(folio));
} }
EXPORT_SYMBOL(__folio_put); EXPORT_SYMBOL(__folio_put);