mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
x86/mm/pat: clear VM_PAT if copy_p4d_range failed
Syzbot reports a warning in untrack_pfn(). Digging into the root we found that this is due to memory allocation failure in pmd_alloc_one. And this failure is produced due to failslab. In copy_page_range(), memory alloaction for pmd failed. During the error handling process in copy_page_range(), mmput() is called to remove all vmas. While untrack_pfn this empty pfn, warning happens. Here's a simplified flow: dup_mm dup_mmap copy_page_range copy_p4d_range copy_pud_range copy_pmd_range pmd_alloc __pmd_alloc pmd_alloc_one page = alloc_pages(gfp, 0); if (!page) return NULL; mmput exit_mmap unmap_vmas unmap_single_vma untrack_pfn follow_phys WARN_ON_ONCE(1); Since this vma is not generate successfully, we can clear flag VM_PAT. In this case, untrack_pfn() will not be called while cleaning this vma. Function untrack_pfn_moved() has also been renamed to fit the new logic. Link: https://lkml.kernel.org/r/20230217025615.1595558-1-mawupeng1@huawei.com Signed-off-by: Ma Wupeng <mawupeng1@huawei.com> Reported-by: <syzbot+5f488e922d047d8f00cc@syzkaller.appspotmail.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@suse.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Toshi Kani <toshi.kani@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a1b92a3f14
commit
d155df53f3
4 changed files with 14 additions and 8 deletions
|
@ -1073,11 +1073,15 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
|
* untrack_pfn_clear is called if the following situation fits:
|
||||||
* with the old vma after its pfnmap page table has been removed. The new
|
*
|
||||||
* vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
|
* 1) while mremapping a pfnmap for a new region, with the old vma after
|
||||||
|
* its pfnmap page table has been removed. The new vma has a new pfnmap
|
||||||
|
* to the same pfn & cache type with VM_PAT set.
|
||||||
|
* 2) while duplicating vm area, the new vma fails to copy the pgtable from
|
||||||
|
* old vma.
|
||||||
*/
|
*/
|
||||||
void untrack_pfn_moved(struct vm_area_struct *vma)
|
void untrack_pfn_clear(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
vm_flags_clear(vma, VM_PAT);
|
vm_flags_clear(vma, VM_PAT);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1191,9 +1191,10 @@ static inline void untrack_pfn(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* untrack_pfn_moved is called while mremapping a pfnmap for a new region.
|
* untrack_pfn_clear is called while mremapping a pfnmap for a new region
|
||||||
|
* or fails to copy pgtable during duplicate vm area.
|
||||||
*/
|
*/
|
||||||
static inline void untrack_pfn_moved(struct vm_area_struct *vma)
|
static inline void untrack_pfn_clear(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
@ -1205,7 +1206,7 @@ extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
|
||||||
extern int track_pfn_copy(struct vm_area_struct *vma);
|
extern int track_pfn_copy(struct vm_area_struct *vma);
|
||||||
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||||
unsigned long size, bool mm_wr_locked);
|
unsigned long size, bool mm_wr_locked);
|
||||||
extern void untrack_pfn_moved(struct vm_area_struct *vma);
|
extern void untrack_pfn_clear(struct vm_area_struct *vma);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
|
|
@ -1290,6 +1290,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
|
||||||
continue;
|
continue;
|
||||||
if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
|
if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
|
||||||
addr, next))) {
|
addr, next))) {
|
||||||
|
untrack_pfn_clear(dst_vma);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -683,7 +683,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
|
||||||
|
|
||||||
/* Tell pfnmap has moved from this vma */
|
/* Tell pfnmap has moved from this vma */
|
||||||
if (unlikely(vma->vm_flags & VM_PFNMAP))
|
if (unlikely(vma->vm_flags & VM_PFNMAP))
|
||||||
untrack_pfn_moved(vma);
|
untrack_pfn_clear(vma);
|
||||||
|
|
||||||
if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
|
if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
|
||||||
/* We always clear VM_LOCKED[ONFAULT] on the old vma */
|
/* We always clear VM_LOCKED[ONFAULT] on the old vma */
|
||||||
|
|
Loading…
Add table
Reference in a new issue