mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
mm: fixup documentation regarding pte_numa() and PROT_NUMA
pte_numa() no longer exists -- replaced by pte_protnone() -- and PROT_NUMA probably never existed: MM_CP_PROT_NUMA also ends up using PROT_NONE. Let's fixup the doc. Link: https://lkml.kernel.org/r/20220825164659.89824-4-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0cf459866a
commit
7014887a01
1 changed files with 6 additions and 6 deletions
|
@ -614,22 +614,22 @@ struct mm_struct {
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
/*
|
/*
|
||||||
* numa_next_scan is the next time that the PTEs will be marked
|
* numa_next_scan is the next time that PTEs will be remapped
|
||||||
* pte_numa. NUMA hinting faults will gather statistics and
|
* PROT_NONE to trigger NUMA hinting faults; such faults gather
|
||||||
* migrate pages to new nodes if necessary.
|
* statistics and migrate pages to new nodes if necessary.
|
||||||
*/
|
*/
|
||||||
unsigned long numa_next_scan;
|
unsigned long numa_next_scan;
|
||||||
|
|
||||||
/* Restart point for scanning and setting pte_numa */
|
/* Restart point for scanning and remapping PTEs. */
|
||||||
unsigned long numa_scan_offset;
|
unsigned long numa_scan_offset;
|
||||||
|
|
||||||
/* numa_scan_seq prevents two threads setting pte_numa */
|
/* numa_scan_seq prevents two threads remapping PTEs. */
|
||||||
int numa_scan_seq;
|
int numa_scan_seq;
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* An operation with batched TLB flushing is going on. Anything
|
* An operation with batched TLB flushing is going on. Anything
|
||||||
* that can move process memory needs to flush the TLB when
|
* that can move process memory needs to flush the TLB when
|
||||||
* moving a PROT_NONE or PROT_NUMA mapped page.
|
* moving a PROT_NONE mapped page.
|
||||||
*/
|
*/
|
||||||
atomic_t tlb_flush_pending;
|
atomic_t tlb_flush_pending;
|
||||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||||||
|
|
Loading…
Add table
Reference in a new issue