mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
mm: rename page->index to page->__folio_index
All users of page->index have been converted to not refer to it any more. Update a few pieces of documentation that were missed and prevent new users from appearing (or at least make them easy to grep for). Link: https://lkml.kernel.org/r/20250514181508.3019795-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
e94715982c
commit
acc53a0b4c
9 changed files with 19 additions and 19 deletions
|
@ -227,9 +227,9 @@ void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
|
|||
}
|
||||
|
||||
/*
|
||||
* If this is a page cache page, and we have an aliasing VIPT cache,
|
||||
* If this is a page cache folio, and we have an aliasing VIPT cache,
|
||||
* we only need to do one flush - which would be at the relevant
|
||||
* userspace colour, which is congruent with page->index.
|
||||
* userspace colour, which is congruent with folio->index.
|
||||
*/
|
||||
if (mapping && cache_is_vipt_aliasing())
|
||||
flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
|
||||
|
|
|
@ -1276,9 +1276,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf);
|
|||
* the page's disk buffers. PG_private must be set to tell the VM to call
|
||||
* into the filesystem to release these pages.
|
||||
*
|
||||
* A page may belong to an inode's memory mapping. In this case, page->mapping
|
||||
* is the pointer to the inode, and page->index is the file offset of the page,
|
||||
* in units of PAGE_SIZE.
|
||||
* A folio may belong to an inode's memory mapping. In this case,
|
||||
* folio->mapping points to the inode, and folio->index is the file
|
||||
* offset of the folio, in units of PAGE_SIZE.
|
||||
*
|
||||
* If pagecache pages are not associated with an inode, they are said to be
|
||||
* anonymous pages. These may become associated with the swapcache, and in that
|
||||
|
|
|
@ -107,7 +107,7 @@ struct page {
|
|||
/* See page-flags.h for PAGE_MAPPING_FLAGS */
|
||||
struct address_space *mapping;
|
||||
union {
|
||||
pgoff_t index; /* Our offset within mapping. */
|
||||
pgoff_t __folio_index; /* Our offset within mapping. */
|
||||
unsigned long share; /* share count for fsdax */
|
||||
};
|
||||
/**
|
||||
|
@ -488,7 +488,7 @@ FOLIO_MATCH(flags, flags);
|
|||
FOLIO_MATCH(lru, lru);
|
||||
FOLIO_MATCH(mapping, mapping);
|
||||
FOLIO_MATCH(compound_head, lru);
|
||||
FOLIO_MATCH(index, index);
|
||||
FOLIO_MATCH(__folio_index, index);
|
||||
FOLIO_MATCH(private, private);
|
||||
FOLIO_MATCH(_mapcount, _mapcount);
|
||||
FOLIO_MATCH(_refcount, _refcount);
|
||||
|
@ -589,7 +589,7 @@ TABLE_MATCH(flags, __page_flags);
|
|||
TABLE_MATCH(compound_head, pt_list);
|
||||
TABLE_MATCH(compound_head, _pt_pad_1);
|
||||
TABLE_MATCH(mapping, __page_mapping);
|
||||
TABLE_MATCH(index, pt_index);
|
||||
TABLE_MATCH(__folio_index, pt_index);
|
||||
TABLE_MATCH(rcu_head, pt_rcu_head);
|
||||
TABLE_MATCH(page_type, __page_type);
|
||||
TABLE_MATCH(_refcount, __page_refcount);
|
||||
|
|
|
@ -206,7 +206,7 @@ static u64 get_inode_sequence_number(struct inode *inode)
|
|||
*
|
||||
* For shared mappings (when @fshared), the key is:
|
||||
*
|
||||
* ( inode->i_sequence, page->index, offset_within_page )
|
||||
* ( inode->i_sequence, page offset within mapping, offset_within_page )
|
||||
*
|
||||
* [ also see get_inode_sequence_number() ]
|
||||
*
|
||||
|
|
|
@ -142,7 +142,7 @@ static void page_cache_delete(struct address_space *mapping,
|
|||
xas_init_marks(&xas);
|
||||
|
||||
folio->mapping = NULL;
|
||||
/* Leave page->index set: truncation lookup relies upon it */
|
||||
/* Leave folio->index set: truncation lookup relies upon it */
|
||||
mapping->nrpages -= nr;
|
||||
}
|
||||
|
||||
|
@ -949,7 +949,7 @@ unlock:
|
|||
return 0;
|
||||
error:
|
||||
folio->mapping = NULL;
|
||||
/* Leave page->index set: truncation relies upon it */
|
||||
/* Leave folio->index set: truncation relies upon it */
|
||||
folio_put_refs(folio, nr);
|
||||
return xas_error(&xas);
|
||||
}
|
||||
|
|
|
@ -4668,8 +4668,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|||
|
||||
/*
|
||||
* KSM sometimes has to copy on read faults, for example, if
|
||||
* page->index of !PageKSM() pages would be nonlinear inside the
|
||||
* anon VMA -- PageKSM() is lost on actual swapout.
|
||||
* folio->index of non-ksm folios would be nonlinear inside the
|
||||
* anon VMA -- the ksm flag is lost on actual swapout.
|
||||
*/
|
||||
folio = ksm_might_need_to_copy(folio, vma, vmf->address);
|
||||
if (unlikely(!folio)) {
|
||||
|
|
|
@ -2565,11 +2565,11 @@ struct folio *writeback_iter(struct address_space *mapping,
|
|||
if (!folio) {
|
||||
/*
|
||||
* To avoid deadlocks between range_cyclic writeback and callers
|
||||
* that hold pages in PageWriteback to aggregate I/O until
|
||||
* that hold folios in writeback to aggregate I/O until
|
||||
* the writeback iteration finishes, we do not loop back to the
|
||||
* start of the file. Doing so causes a page lock/page
|
||||
* start of the file. Doing so causes a folio lock/folio
|
||||
* writeback access order inversion - we should only ever lock
|
||||
* multiple pages in ascending page->index order, and looping
|
||||
* multiple folios in ascending folio->index order, and looping
|
||||
* back to the start of the file violates that rule and causes
|
||||
* deadlocks.
|
||||
*/
|
||||
|
|
|
@ -421,7 +421,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|||
for (i = 0; i < folio_batch_count(&fbatch); i++) {
|
||||
struct folio *folio = fbatch.folios[i];
|
||||
|
||||
/* We rely upon deletion not changing page->index */
|
||||
/* We rely upon deletion not changing folio->index */
|
||||
|
||||
if (xa_is_value(folio))
|
||||
continue;
|
||||
|
|
|
@ -54,8 +54,8 @@ struct zpdesc {
|
|||
ZPDESC_MATCH(flags, flags);
|
||||
ZPDESC_MATCH(lru, lru);
|
||||
ZPDESC_MATCH(mapping, movable_ops);
|
||||
ZPDESC_MATCH(index, next);
|
||||
ZPDESC_MATCH(index, handle);
|
||||
ZPDESC_MATCH(__folio_index, next);
|
||||
ZPDESC_MATCH(__folio_index, handle);
|
||||
ZPDESC_MATCH(private, zspage);
|
||||
ZPDESC_MATCH(page_type, first_obj_offset);
|
||||
ZPDESC_MATCH(_refcount, _refcount);
|
||||
|
|
Loading…
Add table
Reference in a new issue