mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-04-13 09:59:31 +00:00

There are two main use cases for mmu notifiers. One is by KVM which uses mmu_notifier_invalidate_range_start()/end() to manage a software TLB. The other is to manage hardware TLBs which need to use the invalidate_range() callback because HW can establish new TLB entries at any time. Hence using start/end() can lead to memory corruption as these callbacks happen too soon/late during page unmap. mmu notifier users should therefore either use the start()/end() callbacks or the invalidate_range() callbacks. To make this usage clearer rename the invalidate_range() callback to arch_invalidate_secondary_tlbs() and update documention. Link: https://lkml.kernel.org/r/6f77248cd25545c8020a54b4e567e8b72be4dca1.1690292440.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple <apopple@nvidia.com> Suggested-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Cc: Andrew Donnellan <ajd@linux.ibm.com> Cc: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com> Cc: Frederic Barrat <fbarrat@linux.ibm.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kevin Tian <kevin.tian@intel.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolin Chen <nicolinc@nvidia.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Sean Christopherson <seanjc@google.com> Cc: SeongJae Park <sj@kernel.org> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Cc: Will Deacon <will@kernel.org> Cc: Zhi Wang <zhi.wang.linux@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
62 lines
1.8 KiB
C
62 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/security.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/machdep.h>
|
|
#include <asm/mman.h>
|
|
#include <asm/tlb.h>
|
|
|
|
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
|
{
|
|
int psize;
|
|
struct hstate *hstate = hstate_file(vma->vm_file);
|
|
|
|
psize = hstate_get_psize(hstate);
|
|
radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
|
|
}
|
|
|
|
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
|
{
|
|
int psize;
|
|
struct hstate *hstate = hstate_file(vma->vm_file);
|
|
|
|
psize = hstate_get_psize(hstate);
|
|
radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
|
|
}
|
|
|
|
void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
int psize;
|
|
struct hstate *hstate = hstate_file(vma->vm_file);
|
|
|
|
psize = hstate_get_psize(hstate);
|
|
/*
|
|
* Flush PWC even if we get PUD_SIZE hugetlb invalidate to keep this simpler.
|
|
*/
|
|
if (end - start >= PUD_SIZE)
|
|
radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize);
|
|
else
|
|
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
|
|
mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
|
|
}
|
|
|
|
void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t old_pte, pte_t pte)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
/*
|
|
* POWER9 NMMU must flush the TLB after clearing the PTE before
|
|
* installing a PTE with more relaxed access permissions, see
|
|
* radix__ptep_set_access_flags.
|
|
*/
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
|
|
is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
|
|
atomic_read(&mm->context.copros) > 0)
|
|
radix__flush_hugetlb_page(vma, addr);
|
|
|
|
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
|
}
|