mm: do_zap_pte_range: return any_skipped information to the caller

Let the caller of do_zap_pte_range() know whether we skip zap ptes or
reinstall uffd-wp ptes through any_skipped parameter, so that subsequent
commits can use this information in zap_pte_range() to detect whether the
PTE page can be reclaimed.

Link: https://lkml.kernel.org/r/59f33ec9f74e9f058ed319b0bfadd76b0f7adf9b.1733305182.git.zhengqi.arch@bytedance.com
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Cc: Zach O'Keefe <zokeefe@google.com>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Qi Zheng 2024-12-04 19:09:47 +08:00 committed by Andrew Morton
parent 735fad44b5
commit 4059971c79

View file

@ -1501,7 +1501,7 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct folio *folio,
struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
unsigned long addr, struct zap_details *details, int *rss,
bool *force_flush, bool *force_break)
bool *force_flush, bool *force_break, bool *any_skipped)
{
struct mm_struct *mm = tlb->mm;
bool delay_rmap = false;
@ -1527,8 +1527,8 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entries(tlb, pte, nr, addr);
if (unlikely(userfaultfd_pte_wp(vma, ptent)))
zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details,
ptent);
*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
nr, details, ptent);
if (!delay_rmap) {
folio_remove_rmap_ptes(folio, page, nr, vma);
@ -1552,7 +1552,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
unsigned int max_nr, unsigned long addr,
struct zap_details *details, int *rss, bool *force_flush,
bool *force_break)
bool *force_break, bool *any_skipped)
{
const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
struct mm_struct *mm = tlb->mm;
@ -1567,15 +1567,17 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
if (userfaultfd_pte_wp(vma, ptent))
zap_install_uffd_wp_if_needed(vma, addr, pte, 1,
details, ptent);
*any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
pte, 1, details, ptent);
ksm_might_unmap_zero_page(mm, ptent);
return 1;
}
folio = page_folio(page);
if (unlikely(!should_zap_folio(details, folio)))
if (unlikely(!should_zap_folio(details, folio))) {
*any_skipped = true;
return 1;
}
/*
* Make sure that the common "small folio" case is as fast as possible
@ -1587,22 +1589,23 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
addr, details, rss, force_flush,
force_break);
force_break, any_skipped);
return nr;
}
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
details, rss, force_flush, force_break);
details, rss, force_flush, force_break, any_skipped);
return 1;
}
static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
unsigned int max_nr, unsigned long addr,
struct zap_details *details, int *rss)
struct zap_details *details, int *rss, bool *any_skipped)
{
swp_entry_t entry;
int nr = 1;
*any_skipped = true;
entry = pte_to_swp_entry(ptent);
if (is_device_private_entry(entry) ||
is_device_exclusive_entry(entry)) {
@ -1660,7 +1663,7 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
WARN_ON_ONCE(1);
}
clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
return nr;
}
@ -1669,7 +1672,8 @@ static inline int do_zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, unsigned long end,
struct zap_details *details, int *rss,
bool *force_flush, bool *force_break)
bool *force_flush, bool *force_break,
bool *any_skipped)
{
pte_t ptent = ptep_get(pte);
int max_nr = (end - addr) / PAGE_SIZE;
@ -1691,10 +1695,11 @@ static inline int do_zap_pte_range(struct mmu_gather *tlb,
if (pte_present(ptent))
nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
details, rss, force_flush, force_break);
details, rss, force_flush, force_break,
any_skipped);
else
nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
details, rss);
details, rss, any_skipped);
return nr;
}
@ -1705,6 +1710,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct zap_details *details)
{
bool force_flush = false, force_break = false;
bool any_skipped = false;
struct mm_struct *mm = tlb->mm;
int rss[NR_MM_COUNTERS];
spinlock_t *ptl;
@ -1725,7 +1731,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
break;
nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
&force_flush, &force_break);
&force_flush, &force_break, &any_skipped);
if (unlikely(force_break)) {
addr += nr * PAGE_SIZE;
break;