mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
mm/khugepaged: write-lock VMA while collapsing a huge page
Protect VMA from concurrent page fault handler while collapsing a huge page. Page fault handler needs a stable PMD to use PTL and relies on per-VMA lock to prevent concurrent PMD changes. pmdp_collapse_flush(), set_huge_pmd() and collapse_and_free_pmd() can modify a PMD, which will not be detected by a page fault handler without proper locking. Before this patch, page tables can be walked under any one of the mmap_lock, the mapping lock, and the anon_vma lock; so when khugepaged unlinks and frees page tables, it must ensure that all of those either are locked or don't exist. This patch adds a fourth lock under which page tables can be traversed, and so khugepaged must also lock out that one. [surenb@google.com: vm_lock/i_mmap_rwsem inversion in retract_page_tables] Link: https://lkml.kernel.org/r/20230303213250.3555716-1-surenb@google.com [surenb@google.com: build fix] Link: https://lkml.kernel.org/r/CAJuCfpFjWhtzRE1X=J+_JjgJzNKhq-=JT8yTBSTHthwp0pqWZw@mail.gmail.com Link: https://lkml.kernel.org/r/20230227173632.3292573-16-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ccf1d78d8b
commit
55fd6fccad
3 changed files with 54 additions and 26 deletions
|
@ -665,18 +665,23 @@ static inline void vma_end_read(struct vm_area_struct *vma)
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void vma_start_write(struct vm_area_struct *vma)
|
static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
|
||||||
{
|
{
|
||||||
int mm_lock_seq;
|
|
||||||
|
|
||||||
mmap_assert_write_locked(vma->vm_mm);
|
mmap_assert_write_locked(vma->vm_mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
|
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
|
||||||
* mm->mm_lock_seq can't be concurrently modified.
|
* mm->mm_lock_seq can't be concurrently modified.
|
||||||
*/
|
*/
|
||||||
mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
|
*mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
|
||||||
if (vma->vm_lock_seq == mm_lock_seq)
|
return (vma->vm_lock_seq == *mm_lock_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void vma_start_write(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
int mm_lock_seq;
|
||||||
|
|
||||||
|
if (__is_vma_write_locked(vma, &mm_lock_seq))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
down_write(&vma->lock);
|
down_write(&vma->lock);
|
||||||
|
@ -684,14 +689,26 @@ static inline void vma_start_write(struct vm_area_struct *vma)
|
||||||
up_write(&vma->lock);
|
up_write(&vma->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool vma_try_start_write(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
int mm_lock_seq;
|
||||||
|
|
||||||
|
if (__is_vma_write_locked(vma, &mm_lock_seq))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (!down_write_trylock(&vma->vm_lock->lock))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
vma->vm_lock_seq = mm_lock_seq;
|
||||||
|
up_write(&vma->vm_lock->lock);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
|
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
mmap_assert_write_locked(vma->vm_mm);
|
int mm_lock_seq;
|
||||||
/*
|
|
||||||
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
|
VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
|
||||||
* mm->mm_lock_seq can't be concurrently modified.
|
|
||||||
*/
|
|
||||||
VM_BUG_ON_VMA(vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq), vma);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_PER_VMA_LOCK */
|
#else /* CONFIG_PER_VMA_LOCK */
|
||||||
|
@ -701,6 +718,8 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
|
||||||
{ return false; }
|
{ return false; }
|
||||||
static inline void vma_end_read(struct vm_area_struct *vma) {}
|
static inline void vma_end_read(struct vm_area_struct *vma) {}
|
||||||
static inline void vma_start_write(struct vm_area_struct *vma) {}
|
static inline void vma_start_write(struct vm_area_struct *vma) {}
|
||||||
|
static inline bool vma_try_start_write(struct vm_area_struct *vma)
|
||||||
|
{ return true; }
|
||||||
static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
|
static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
|
||||||
|
|
||||||
#endif /* CONFIG_PER_VMA_LOCK */
|
#endif /* CONFIG_PER_VMA_LOCK */
|
||||||
|
|
|
@ -1056,6 +1056,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
|
||||||
if (result != SCAN_SUCCEED)
|
if (result != SCAN_SUCCEED)
|
||||||
goto out_up_write;
|
goto out_up_write;
|
||||||
|
|
||||||
|
vma_start_write(vma);
|
||||||
anon_vma_lock_write(vma->anon_vma);
|
anon_vma_lock_write(vma->anon_vma);
|
||||||
|
|
||||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
|
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
|
||||||
|
@ -1517,6 +1518,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
|
||||||
goto drop_hpage;
|
goto drop_hpage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Lock the vma before taking i_mmap and page table locks */
|
||||||
|
vma_start_write(vma);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to lock the mapping so that from here on, only GUP-fast and
|
* We need to lock the mapping so that from here on, only GUP-fast and
|
||||||
* hardware page walks can access the parts of the page tables that
|
* hardware page walks can access the parts of the page tables that
|
||||||
|
@ -1694,6 +1698,10 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
|
||||||
result = SCAN_PTE_MAPPED_HUGEPAGE;
|
result = SCAN_PTE_MAPPED_HUGEPAGE;
|
||||||
if ((cc->is_khugepaged || is_target) &&
|
if ((cc->is_khugepaged || is_target) &&
|
||||||
mmap_write_trylock(mm)) {
|
mmap_write_trylock(mm)) {
|
||||||
|
/* trylock for the same lock inversion as above */
|
||||||
|
if (!vma_try_start_write(vma))
|
||||||
|
goto unlock_next;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Re-check whether we have an ->anon_vma, because
|
* Re-check whether we have an ->anon_vma, because
|
||||||
* collapse_and_free_pmd() requires that either no
|
* collapse_and_free_pmd() requires that either no
|
||||||
|
|
31
mm/rmap.c
31
mm/rmap.c
|
@ -25,21 +25,22 @@
|
||||||
* mapping->invalidate_lock (in filemap_fault)
|
* mapping->invalidate_lock (in filemap_fault)
|
||||||
* page->flags PG_locked (lock_page)
|
* page->flags PG_locked (lock_page)
|
||||||
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
|
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
|
||||||
* mapping->i_mmap_rwsem
|
* vma_start_write
|
||||||
* anon_vma->rwsem
|
* mapping->i_mmap_rwsem
|
||||||
* mm->page_table_lock or pte_lock
|
* anon_vma->rwsem
|
||||||
* swap_lock (in swap_duplicate, swap_info_get)
|
* mm->page_table_lock or pte_lock
|
||||||
* mmlist_lock (in mmput, drain_mmlist and others)
|
* swap_lock (in swap_duplicate, swap_info_get)
|
||||||
* mapping->private_lock (in block_dirty_folio)
|
* mmlist_lock (in mmput, drain_mmlist and others)
|
||||||
* folio_lock_memcg move_lock (in block_dirty_folio)
|
* mapping->private_lock (in block_dirty_folio)
|
||||||
* i_pages lock (widely used)
|
* folio_lock_memcg move_lock (in block_dirty_folio)
|
||||||
* lruvec->lru_lock (in folio_lruvec_lock_irq)
|
* i_pages lock (widely used)
|
||||||
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)
|
* lruvec->lru_lock (in folio_lruvec_lock_irq)
|
||||||
* bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
|
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)
|
||||||
* sb_lock (within inode_lock in fs/fs-writeback.c)
|
* bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
|
||||||
* i_pages lock (widely used, in set_page_dirty,
|
* sb_lock (within inode_lock in fs/fs-writeback.c)
|
||||||
* in arch-dependent flush_dcache_mmap_lock,
|
* i_pages lock (widely used, in set_page_dirty,
|
||||||
* within bdi.wb->list_lock in __sync_single_inode)
|
* in arch-dependent flush_dcache_mmap_lock,
|
||||||
|
* within bdi.wb->list_lock in __sync_single_inode)
|
||||||
*
|
*
|
||||||
* anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
|
* anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
|
||||||
* ->tasklist_lock
|
* ->tasklist_lock
|
||||||
|
|
Loading…
Add table
Reference in a new issue