mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
mm: use folio more widely in __split_huge_page
We already have a folio; use it instead of the head page where reasonable. Saves a couple of calls to compound_head() and elimimnates a few references to page->mapping. Link: https://lkml.kernel.org/r/20240228164326.1355045-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d3246b6ee4
commit
435a755481
1 changed files with 11 additions and 10 deletions
|
@ -2919,7 +2919,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
||||||
if (head[i].index >= end) {
|
if (head[i].index >= end) {
|
||||||
struct folio *tail = page_folio(head + i);
|
struct folio *tail = page_folio(head + i);
|
||||||
|
|
||||||
if (shmem_mapping(head->mapping))
|
if (shmem_mapping(folio->mapping))
|
||||||
nr_dropped++;
|
nr_dropped++;
|
||||||
else if (folio_test_clear_dirty(tail))
|
else if (folio_test_clear_dirty(tail))
|
||||||
folio_account_cleaned(tail,
|
folio_account_cleaned(tail,
|
||||||
|
@ -2927,7 +2927,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
||||||
__filemap_remove_folio(tail, NULL);
|
__filemap_remove_folio(tail, NULL);
|
||||||
folio_put(tail);
|
folio_put(tail);
|
||||||
} else if (!PageAnon(page)) {
|
} else if (!PageAnon(page)) {
|
||||||
__xa_store(&head->mapping->i_pages, head[i].index,
|
__xa_store(&folio->mapping->i_pages, head[i].index,
|
||||||
head + i, 0);
|
head + i, 0);
|
||||||
} else if (swap_cache) {
|
} else if (swap_cache) {
|
||||||
__xa_store(&swap_cache->i_pages, offset + i,
|
__xa_store(&swap_cache->i_pages, offset + i,
|
||||||
|
@ -2948,23 +2948,23 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
||||||
split_page_owner(head, order, new_order);
|
split_page_owner(head, order, new_order);
|
||||||
|
|
||||||
/* See comment in __split_huge_page_tail() */
|
/* See comment in __split_huge_page_tail() */
|
||||||
if (PageAnon(head)) {
|
if (folio_test_anon(folio)) {
|
||||||
/* Additional pin to swap cache */
|
/* Additional pin to swap cache */
|
||||||
if (PageSwapCache(head)) {
|
if (folio_test_swapcache(folio)) {
|
||||||
page_ref_add(head, 1 + new_nr);
|
folio_ref_add(folio, 1 + new_nr);
|
||||||
xa_unlock(&swap_cache->i_pages);
|
xa_unlock(&swap_cache->i_pages);
|
||||||
} else {
|
} else {
|
||||||
page_ref_inc(head);
|
folio_ref_inc(folio);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Additional pin to page cache */
|
/* Additional pin to page cache */
|
||||||
page_ref_add(head, 1 + new_nr);
|
folio_ref_add(folio, 1 + new_nr);
|
||||||
xa_unlock(&head->mapping->i_pages);
|
xa_unlock(&folio->mapping->i_pages);
|
||||||
}
|
}
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
if (nr_dropped)
|
if (nr_dropped)
|
||||||
shmem_uncharge(head->mapping->host, nr_dropped);
|
shmem_uncharge(folio->mapping->host, nr_dropped);
|
||||||
remap_page(folio, nr);
|
remap_page(folio, nr);
|
||||||
|
|
||||||
if (folio_test_swapcache(folio))
|
if (folio_test_swapcache(folio))
|
||||||
|
@ -2980,9 +2980,10 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
||||||
|
|
||||||
for (i = 0; i < nr; i += new_nr) {
|
for (i = 0; i < nr; i += new_nr) {
|
||||||
struct page *subpage = head + i;
|
struct page *subpage = head + i;
|
||||||
|
struct folio *new_folio = page_folio(subpage);
|
||||||
if (subpage == page)
|
if (subpage == page)
|
||||||
continue;
|
continue;
|
||||||
unlock_page(subpage);
|
folio_unlock(new_folio);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Subpages may be freed if there wasn't any mapping
|
* Subpages may be freed if there wasn't any mapping
|
||||||
|
|
Loading…
Add table
Reference in a new issue