mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
mm/treewide: rename CONFIG_HAVE_FAST_GUP to CONFIG_HAVE_GUP_FAST
Nowadays, we call it "GUP-fast", the external interface includes functions like "get_user_pages_fast()", and we renamed all internal functions to reflect that as well. Let's make the config option reflect that. Link: https://lkml.kernel.org/r/20240402125516.223131-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
23babe1934
commit
25176ad09c
14 changed files with 22 additions and 22 deletions
|
@ -99,7 +99,7 @@ config ARM
|
||||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
|
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
|
||||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
|
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
|
||||||
select HAVE_EXIT_THREAD
|
select HAVE_EXIT_THREAD
|
||||||
select HAVE_FAST_GUP if ARM_LPAE
|
select HAVE_GUP_FAST if ARM_LPAE
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||||
select HAVE_FUNCTION_ERROR_INJECTION
|
select HAVE_FUNCTION_ERROR_INJECTION
|
||||||
select HAVE_FUNCTION_GRAPH_TRACER
|
select HAVE_FUNCTION_GRAPH_TRACER
|
||||||
|
|
|
@ -205,7 +205,7 @@ config ARM64
|
||||||
select HAVE_SAMPLE_FTRACE_DIRECT
|
select HAVE_SAMPLE_FTRACE_DIRECT
|
||||||
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
|
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
|
||||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||||
select HAVE_FAST_GUP
|
select HAVE_GUP_FAST
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
select HAVE_FUNCTION_TRACER
|
select HAVE_FUNCTION_TRACER
|
||||||
select HAVE_FUNCTION_ERROR_INJECTION
|
select HAVE_FUNCTION_ERROR_INJECTION
|
||||||
|
|
|
@ -119,7 +119,7 @@ config LOONGARCH
|
||||||
select HAVE_EBPF_JIT
|
select HAVE_EBPF_JIT
|
||||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
|
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
|
||||||
select HAVE_EXIT_THREAD
|
select HAVE_EXIT_THREAD
|
||||||
select HAVE_FAST_GUP
|
select HAVE_GUP_FAST
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||||
select HAVE_FUNCTION_ERROR_INJECTION
|
select HAVE_FUNCTION_ERROR_INJECTION
|
||||||
|
|
|
@ -68,7 +68,7 @@ config MIPS
|
||||||
select HAVE_DYNAMIC_FTRACE
|
select HAVE_DYNAMIC_FTRACE
|
||||||
select HAVE_EBPF_JIT if !CPU_MICROMIPS
|
select HAVE_EBPF_JIT if !CPU_MICROMIPS
|
||||||
select HAVE_EXIT_THREAD
|
select HAVE_EXIT_THREAD
|
||||||
select HAVE_FAST_GUP
|
select HAVE_GUP_FAST
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
select HAVE_FUNCTION_GRAPH_TRACER
|
select HAVE_FUNCTION_GRAPH_TRACER
|
||||||
select HAVE_FUNCTION_TRACER
|
select HAVE_FUNCTION_TRACER
|
||||||
|
|
|
@ -236,7 +236,7 @@ config PPC
|
||||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if ARCH_USING_PATCHABLE_FUNCTION_ENTRY || MPROFILE_KERNEL || PPC32
|
select HAVE_DYNAMIC_FTRACE_WITH_REGS if ARCH_USING_PATCHABLE_FUNCTION_ENTRY || MPROFILE_KERNEL || PPC32
|
||||||
select HAVE_EBPF_JIT
|
select HAVE_EBPF_JIT
|
||||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||||
select HAVE_FAST_GUP
|
select HAVE_GUP_FAST
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||||
select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
|
select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
|
||||||
|
|
|
@ -132,7 +132,7 @@ config RISCV
|
||||||
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
|
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
|
||||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
|
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
|
||||||
select HAVE_EBPF_JIT if MMU
|
select HAVE_EBPF_JIT if MMU
|
||||||
select HAVE_FAST_GUP if MMU
|
select HAVE_GUP_FAST if MMU
|
||||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||||
select HAVE_FUNCTION_ERROR_INJECTION
|
select HAVE_FUNCTION_ERROR_INJECTION
|
||||||
select HAVE_GCC_PLUGINS
|
select HAVE_GCC_PLUGINS
|
||||||
|
|
|
@ -174,7 +174,7 @@ config S390
|
||||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||||
select HAVE_EBPF_JIT if HAVE_MARCH_Z196_FEATURES
|
select HAVE_EBPF_JIT if HAVE_MARCH_Z196_FEATURES
|
||||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||||
select HAVE_FAST_GUP
|
select HAVE_GUP_FAST
|
||||||
select HAVE_FENTRY
|
select HAVE_FENTRY
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||||
|
|
|
@ -38,7 +38,7 @@ config SUPERH
|
||||||
select HAVE_DEBUG_BUGVERBOSE
|
select HAVE_DEBUG_BUGVERBOSE
|
||||||
select HAVE_DEBUG_KMEMLEAK
|
select HAVE_DEBUG_KMEMLEAK
|
||||||
select HAVE_DYNAMIC_FTRACE
|
select HAVE_DYNAMIC_FTRACE
|
||||||
select HAVE_FAST_GUP if MMU
|
select HAVE_GUP_FAST if MMU
|
||||||
select HAVE_FUNCTION_GRAPH_TRACER
|
select HAVE_FUNCTION_GRAPH_TRACER
|
||||||
select HAVE_FUNCTION_TRACER
|
select HAVE_FUNCTION_TRACER
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
|
|
|
@ -221,7 +221,7 @@ config X86
|
||||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||||
select HAVE_EISA
|
select HAVE_EISA
|
||||||
select HAVE_EXIT_THREAD
|
select HAVE_EXIT_THREAD
|
||||||
select HAVE_FAST_GUP
|
select HAVE_GUP_FAST
|
||||||
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
|
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
|
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
|
||||||
|
|
|
@ -284,7 +284,7 @@ static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
|
||||||
VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
|
VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
|
||||||
|
|
||||||
/* Paired with the memory barrier in try_grab_folio(). */
|
/* Paired with the memory barrier in try_grab_folio(). */
|
||||||
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
|
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
if (unlikely(folio_maybe_dma_pinned(folio)))
|
if (unlikely(folio_maybe_dma_pinned(folio)))
|
||||||
|
@ -295,7 +295,7 @@ static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
|
||||||
* This is conceptually a smp_wmb() paired with the smp_rmb() in
|
* This is conceptually a smp_wmb() paired with the smp_rmb() in
|
||||||
* gup_must_unshare().
|
* gup_must_unshare().
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
|
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -541,7 +541,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Paired with the memory barrier in try_grab_folio(). */
|
/* Paired with the memory barrier in try_grab_folio(). */
|
||||||
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
|
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
if (unlikely(folio_maybe_dma_pinned(folio)))
|
if (unlikely(folio_maybe_dma_pinned(folio)))
|
||||||
|
@ -552,7 +552,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
|
||||||
* This is conceptually a smp_wmb() paired with the smp_rmb() in
|
* This is conceptually a smp_wmb() paired with the smp_rmb() in
|
||||||
* gup_must_unshare().
|
* gup_must_unshare().
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
|
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -7539,7 +7539,7 @@ static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
u64 size = 0;
|
u64 size = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_FAST_GUP
|
#ifdef CONFIG_HAVE_GUP_FAST
|
||||||
pgd_t *pgdp, pgd;
|
pgd_t *pgdp, pgd;
|
||||||
p4d_t *p4dp, p4d;
|
p4d_t *p4dp, p4d;
|
||||||
pud_t *pudp, pud;
|
pud_t *pudp, pud;
|
||||||
|
@ -7587,7 +7587,7 @@ again:
|
||||||
if (pte_present(pte))
|
if (pte_present(pte))
|
||||||
size = pte_leaf_size(pte);
|
size = pte_leaf_size(pte);
|
||||||
pte_unmap(ptep);
|
pte_unmap(ptep);
|
||||||
#endif /* CONFIG_HAVE_FAST_GUP */
|
#endif /* CONFIG_HAVE_GUP_FAST */
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
|
@ -473,7 +473,7 @@ config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
|
||||||
config HAVE_MEMBLOCK_PHYS_MAP
|
config HAVE_MEMBLOCK_PHYS_MAP
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config HAVE_FAST_GUP
|
config HAVE_GUP_FAST
|
||||||
depends on MMU
|
depends on MMU
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
|
10
mm/gup.c
10
mm/gup.c
|
@ -501,7 +501,7 @@ static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
|
||||||
#if defined(CONFIG_ARCH_HAS_HUGEPD) || defined(CONFIG_HAVE_FAST_GUP)
|
#if defined(CONFIG_ARCH_HAS_HUGEPD) || defined(CONFIG_HAVE_GUP_FAST)
|
||||||
static int record_subpages(struct page *page, unsigned long sz,
|
static int record_subpages(struct page *page, unsigned long sz,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
struct page **pages)
|
struct page **pages)
|
||||||
|
@ -515,7 +515,7 @@ static int record_subpages(struct page *page, unsigned long sz,
|
||||||
|
|
||||||
return nr;
|
return nr;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_ARCH_HAS_HUGEPD || CONFIG_HAVE_FAST_GUP */
|
#endif /* CONFIG_ARCH_HAS_HUGEPD || CONFIG_HAVE_GUP_FAST */
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_HUGEPD
|
#ifdef CONFIG_ARCH_HAS_HUGEPD
|
||||||
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
|
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
|
||||||
|
@ -2782,7 +2782,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
|
||||||
*
|
*
|
||||||
* This code is based heavily on the PowerPC implementation by Nick Piggin.
|
* This code is based heavily on the PowerPC implementation by Nick Piggin.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_HAVE_FAST_GUP
|
#ifdef CONFIG_HAVE_GUP_FAST
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Used in the GUP-fast path to determine whether GUP is permitted to work on
|
* Used in the GUP-fast path to determine whether GUP is permitted to work on
|
||||||
|
@ -3361,7 +3361,7 @@ static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end,
|
||||||
unsigned int flags, struct page **pages, int *nr)
|
unsigned int flags, struct page **pages, int *nr)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_HAVE_FAST_GUP */
|
#endif /* CONFIG_HAVE_GUP_FAST */
|
||||||
|
|
||||||
#ifndef gup_fast_permitted
|
#ifndef gup_fast_permitted
|
||||||
/*
|
/*
|
||||||
|
@ -3381,7 +3381,7 @@ static unsigned long gup_fast(unsigned long start, unsigned long end,
|
||||||
int nr_pinned = 0;
|
int nr_pinned = 0;
|
||||||
unsigned seq;
|
unsigned seq;
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
|
if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
|
||||||
!gup_fast_permitted(start, end))
|
!gup_fast_permitted(start, end))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -1265,7 +1265,7 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
|
/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
|
||||||
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
|
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Add table
Reference in a new issue