mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
arch: remove mk_pmd()
There are now no callers of mk_huge_pmd() and mk_pmd(). Remove them. Link: https://lkml.kernel.org/r/20250402181709.2386022-12-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Zi Yan <ziy@nvidia.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Muchun Song <muchun.song@linux.dev> Cc: Richard Weinberger <richard@nod.at> Cc: <x86@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
e3981db444
commit
5071ea3d7b
16 changed files with 0 additions and 51 deletions
|
@ -40,8 +40,6 @@ static inline pmd_t pte_pmd(pte_t pte)
|
|||
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
|
||||
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
|
||||
|
||||
#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
|
||||
|
||||
#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
|
||||
|
||||
#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
||||
|
|
|
@ -142,7 +142,6 @@
|
|||
|
||||
#define pmd_pfn(pmd) ((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
|
||||
#define pfn_pmd(pfn,prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
||||
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -209,7 +209,6 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
|
|||
|
||||
#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
|
||||
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
||||
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
|
||||
|
||||
/* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
|
||||
#define pmdp_establish generic_pmdp_establish
|
||||
|
|
|
@ -609,7 +609,6 @@ static inline pmd_t pmd_mkspecial(pmd_t pmd)
|
|||
#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
|
||||
#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
|
||||
#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
||||
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
|
||||
|
||||
#define pud_young(pud) pte_young(pud_pte(pud))
|
||||
#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
|
||||
|
|
|
@ -255,7 +255,6 @@ static inline void pmd_clear(pmd_t *pmdp)
|
|||
|
||||
#define pmd_page_vaddr(pmd) pmd_val(pmd)
|
||||
|
||||
extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
|
||||
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
|
||||
|
||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
||||
|
|
|
@ -135,15 +135,6 @@ void kernel_pte_init(void *addr)
|
|||
} while (p != end);
|
||||
}
|
||||
|
||||
pmd_t mk_pmd(struct page *page, pgprot_t prot)
|
||||
{
|
||||
pmd_t pmd;
|
||||
|
||||
pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
|
|
|
@ -713,9 +713,6 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
|
|||
|
||||
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
|
||||
|
||||
/* Extern to avoid header file madness */
|
||||
extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
|
||||
|
||||
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
{
|
||||
pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
|
||||
|
|
|
@ -31,16 +31,6 @@ void pgd_init(void *addr)
|
|||
}
|
||||
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
pmd_t mk_pmd(struct page *page, pgprot_t prot)
|
||||
{
|
||||
pmd_t pmd;
|
||||
|
||||
pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
|
|
|
@ -90,15 +90,6 @@ void pud_init(void *addr)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
pmd_t mk_pmd(struct page *page, pgprot_t prot)
|
||||
{
|
||||
pmd_t pmd;
|
||||
|
||||
pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
|
|
|
@ -1096,7 +1096,6 @@ static inline bool pmd_access_permitted(pmd_t pmd, bool write)
|
|||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
|
||||
extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot);
|
||||
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
|
||||
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
|
||||
extern pud_t pud_modify(pud_t pud, pgprot_t newprot);
|
||||
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
|
|
@ -269,11 +269,6 @@ pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
|
|||
return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
|
||||
}
|
||||
|
||||
pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
|
||||
{
|
||||
return pfn_pmd(page_to_pfn(page), pgprot);
|
||||
}
|
||||
|
||||
pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
{
|
||||
unsigned long pmdv;
|
||||
|
|
|
@ -262,8 +262,6 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
|
|||
return __page_val_to_pfn(pmd_val(pmd));
|
||||
}
|
||||
|
||||
#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
|
||||
|
||||
#define pmd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
|
||||
|
|
|
@ -1869,7 +1869,6 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
|
|||
#define pmdp_collapse_flush pmdp_collapse_flush
|
||||
|
||||
#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
|
||||
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
||||
|
||||
static inline int pmd_trans_huge(pmd_t pmd)
|
||||
{
|
||||
|
|
|
@ -233,7 +233,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
|
|||
|
||||
return __pmd(pte_val(pte));
|
||||
}
|
||||
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
||||
#endif
|
||||
|
||||
/* This one can be done with two shifts. */
|
||||
|
|
|
@ -1347,8 +1347,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
|
|||
|
||||
#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
|
||||
|
||||
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
|
||||
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp,
|
||||
|
|
|
@ -495,8 +495,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
|
|||
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
|
||||
void mm_put_huge_zero_folio(struct mm_struct *mm);
|
||||
|
||||
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
|
||||
|
||||
static inline bool thp_migration_supported(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
|
||||
|
|
Loading…
Add table
Reference in a new issue