mm: use for_each_valid_pfn() in memory_hotplug

Link: https://lkml.kernel.org/r/20250423133821.789413-7-dwmw2@infradead.org
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marc Rutland <mark.rutland@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Ruihan Li <lrh2000@pku.edu.cn>
Cc: Will Deacon <will@kernel.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Woodhouse 2025-04-23 14:33:42 +01:00 committed by Andrew Morton
parent 49d8d78f8c
commit 6f544e41d9

View file

@ -1756,12 +1756,10 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
{
unsigned long pfn;
for (pfn = start; pfn < end; pfn++) {
for_each_valid_pfn(pfn, start, end) {
struct page *page;
struct folio *folio;
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (PageLRU(page))
goto found;
@ -1805,11 +1803,9 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
for_each_valid_pfn(pfn, start_pfn, end_pfn) {
struct page *page;
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
folio = page_folio(page);