mm: move FAULT_FLAG_VMA_LOCK check down from do_fault()

Perform the check at the start of do_read_fault(), do_cow_fault() and
do_shared_fault() instead.  Should be no performance change from the last
commit.

Link: https://lkml.kernel.org/r/20230724185410.1124082-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Arjun Roy <arjunroy@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-07-24 19:54:07 +01:00 committed by Andrew Morton
parent 0c2e394ab2
commit 61a4b8d320

View file

@ -4533,6 +4533,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
vm_fault_t ret = 0;
struct folio *folio;
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
vma_end_read(vmf->vma);
return VM_FAULT_RETRY;
}
/*
* Let's call ->map_pages() first and use ->fault() as fallback
* if page by the offset is not ready to be mapped (cold cache or
@ -4561,6 +4566,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret;
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
vma_end_read(vma);
return VM_FAULT_RETRY;
}
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
@ -4601,6 +4611,11 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
vm_fault_t ret, tmp;
struct folio *folio;
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
vma_end_read(vma);
return VM_FAULT_RETRY;
}
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
@ -4647,11 +4662,6 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
struct mm_struct *vm_mm = vma->vm_mm;
vm_fault_t ret;
if (vmf->flags & FAULT_FLAG_VMA_LOCK){
vma_end_read(vma);
return VM_FAULT_RETRY;
}
/*
* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
*/