mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

In the FLATMEM case, the default pfn_valid() just checks that the PFN is within the range [ ARCH_PFN_OFFSET .. ARCH_PFN_OFFSET + max_mapnr ). The for_each_valid_pfn() function can therefore be a simple for() loop using those as min/max respectively. Link: https://lkml.kernel.org/r/20250423133821.789413-3-dwmw2@infradead.org Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Acked-by: David Hildenbrand <david@redhat.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Marc Rutland <mark.rutland@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Ruihan Li <lrh2000@pku.edu.cn> Cc: Will Deacon <will@kernel.org> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
91 lines
2.3 KiB
C
91 lines
2.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_MEMORY_MODEL_H
|
|
#define __ASM_MEMORY_MODEL_H
|
|
|
|
#include <linux/pfn.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
/*
|
|
* supports 3 memory models.
|
|
*/
|
|
#if defined(CONFIG_FLATMEM)
|
|
|
|
#ifndef ARCH_PFN_OFFSET
|
|
#define ARCH_PFN_OFFSET (0UL)
|
|
#endif
|
|
|
|
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
|
|
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
|
|
ARCH_PFN_OFFSET)
|
|
|
|
/* avoid <linux/mm.h> include hell */
|
|
extern unsigned long max_mapnr;
|
|
|
|
#ifndef pfn_valid
|
|
static inline int pfn_valid(unsigned long pfn)
|
|
{
|
|
unsigned long pfn_offset = ARCH_PFN_OFFSET;
|
|
|
|
return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
|
|
}
|
|
#define pfn_valid pfn_valid
|
|
|
|
#ifndef for_each_valid_pfn
|
|
#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
|
|
for ((pfn) = max_t(unsigned long, (start_pfn), ARCH_PFN_OFFSET); \
|
|
(pfn) < min_t(unsigned long, (end_pfn), \
|
|
ARCH_PFN_OFFSET + max_mapnr); \
|
|
(pfn)++)
|
|
#endif /* for_each_valid_pfn */
|
|
#endif /* valid_pfn */
|
|
|
|
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
|
|
|
|
/* memmap is virtually contiguous. */
|
|
#define __pfn_to_page(pfn) (vmemmap + (pfn))
|
|
#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
|
|
|
|
#elif defined(CONFIG_SPARSEMEM)
|
|
/*
|
|
* Note: section's mem_map is encoded to reflect its start_pfn.
|
|
* section[i].section_mem_map == mem_map's address - start_pfn;
|
|
*/
|
|
#define __page_to_pfn(pg) \
|
|
({ const struct page *__pg = (pg); \
|
|
int __sec = page_to_section(__pg); \
|
|
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
|
|
})
|
|
|
|
#define __pfn_to_page(pfn) \
|
|
({ unsigned long __pfn = (pfn); \
|
|
struct mem_section *__sec = __pfn_to_section(__pfn); \
|
|
__section_mem_map_addr(__sec) + __pfn; \
|
|
})
|
|
#endif /* CONFIG_FLATMEM/SPARSEMEM */
|
|
|
|
/*
|
|
* Convert a physical address to a Page Frame Number and back
|
|
*/
|
|
#define __phys_to_pfn(paddr) PHYS_PFN(paddr)
|
|
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
|
|
|
|
#define page_to_pfn __page_to_pfn
|
|
#define pfn_to_page __pfn_to_page
|
|
|
|
#ifdef CONFIG_DEBUG_VIRTUAL
|
|
#define page_to_phys(page) \
|
|
({ \
|
|
unsigned long __pfn = page_to_pfn(page); \
|
|
\
|
|
WARN_ON_ONCE(!pfn_valid(__pfn)); \
|
|
PFN_PHYS(__pfn); \
|
|
})
|
|
#else
|
|
#define page_to_phys(page) PFN_PHYS(page_to_pfn(page))
|
|
#endif /* CONFIG_DEBUG_VIRTUAL */
|
|
#define phys_to_page(phys) pfn_to_page(PHYS_PFN(phys))
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif
|