linux/arch/arm64/mm/copypage.c

70 lines
1.6 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/mm/copypage.c
*
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/bitops.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
void copy_highpage(struct page *to, struct page *from)
{
void *kto = page_address(to);
void *kfrom = page_address(from);
struct folio *src = page_folio(from);
struct folio *dst = page_folio(to);
unsigned int i, nr_pages;
copy_page(kto, kfrom);
if (kasan_hw_tags_enabled())
page_kasan_tag_reset(to);
if (!system_supports_mte())
return;
arm64: mte: Fix copy_highpage() warning on hugetlb folios Commit 25c17c4b55de ("hugetlb: arm64: add mte support") improved the copy_highpage() function to update the tags in the destination hugetlb folio. However, when the source folio isn't tagged, the code takes the non-hugetlb path where try_page_mte_tagging() warns as the destination is a hugetlb folio: WARNING: CPU: 0 PID: 363 at arch/arm64/include/asm/mte.h:58 copy_highpage+0x1d4/0x2d8 [...] pc : copy_highpage+0x1d4/0x2d8 lr : copy_highpage+0x78/0x2d8 [...] Call trace: copy_highpage+0x1d4/0x2d8 (P) copy_highpage+0x78/0x2d8 (L) copy_user_highpage+0x20/0x48 copy_user_large_folio+0x1bc/0x268 hugetlb_wp+0x190/0x860 hugetlb_fault+0xa28/0xc10 handle_mm_fault+0x2a0/0x2c0 do_page_fault+0x12c/0x578 do_mem_abort+0x4c/0xa8 el0_da+0x44/0xb0 el0t_64_sync_handler+0xc4/0x138 el0t_64_sync+0x198/0x1a0 Change the check for the tagged status of the source folio so that it does not fall through the non-hugetlb case. In addition, only perform the copy (for the full folio) if the source page is the folio head and warn if the destination folio is already tagged, for symmetry with the non-hugetlb case. Fixes: 25c17c4b55de ("hugetlb: arm64: add mte support") Reported-by: Sasha Levin <sashal@kernel.org> Cc: Yang Shi <yang@os.amperecomputing.com> Cc: David Hildenbrand <david@redhat.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/Z0STR6VLt2MCalnY@sashalap Link: https://lore.kernel.org/r/20241204175004.906754-1-catalin.marinas@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2024-12-04 17:50:04 +00:00
if (folio_test_hugetlb(src)) {
if (!folio_test_hugetlb_mte_tagged(src) ||
from != folio_page(src, 0))
return;
arm64: mte: Fix copy_highpage() warning on hugetlb folios Commit 25c17c4b55de ("hugetlb: arm64: add mte support") improved the copy_highpage() function to update the tags in the destination hugetlb folio. However, when the source folio isn't tagged, the code takes the non-hugetlb path where try_page_mte_tagging() warns as the destination is a hugetlb folio: WARNING: CPU: 0 PID: 363 at arch/arm64/include/asm/mte.h:58 copy_highpage+0x1d4/0x2d8 [...] pc : copy_highpage+0x1d4/0x2d8 lr : copy_highpage+0x78/0x2d8 [...] Call trace: copy_highpage+0x1d4/0x2d8 (P) copy_highpage+0x78/0x2d8 (L) copy_user_highpage+0x20/0x48 copy_user_large_folio+0x1bc/0x268 hugetlb_wp+0x190/0x860 hugetlb_fault+0xa28/0xc10 handle_mm_fault+0x2a0/0x2c0 do_page_fault+0x12c/0x578 do_mem_abort+0x4c/0xa8 el0_da+0x44/0xb0 el0t_64_sync_handler+0xc4/0x138 el0t_64_sync+0x198/0x1a0 Change the check for the tagged status of the source folio so that it does not fall through the non-hugetlb case. In addition, only perform the copy (for the full folio) if the source page is the folio head and warn if the destination folio is already tagged, for symmetry with the non-hugetlb case. Fixes: 25c17c4b55de ("hugetlb: arm64: add mte support") Reported-by: Sasha Levin <sashal@kernel.org> Cc: Yang Shi <yang@os.amperecomputing.com> Cc: David Hildenbrand <david@redhat.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/Z0STR6VLt2MCalnY@sashalap Link: https://lore.kernel.org/r/20241204175004.906754-1-catalin.marinas@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2024-12-04 17:50:04 +00:00
WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
/*
* Populate tags for all subpages.
*
* Don't assume the first page is head page since
* huge page copy may start from any subpage.
*/
nr_pages = folio_nr_pages(src);
for (i = 0; i < nr_pages; i++) {
kfrom = page_address(folio_page(src, i));
kto = page_address(folio_page(dst, i));
mte_copy_page_tags(kto, kfrom);
}
folio_set_hugetlb_mte_tagged(dst);
} else if (page_mte_tagged(from)) {
/* It's a new page, shouldn't have been tagged yet */
WARN_ON_ONCE(!try_page_mte_tagging(to));
mte_copy_page_tags(kto, kfrom);
arm64: mte: Fix/clarify the PG_mte_tagged semantics Currently the PG_mte_tagged page flag mostly means the page contains valid tags and it should be set after the tags have been cleared or restored. However, in mte_sync_tags() it is set before setting the tags to avoid, in theory, a race with concurrent mprotect(PROT_MTE) for shared pages. However, a concurrent mprotect(PROT_MTE) with a copy on write in another thread can cause the new page to have stale tags. Similarly, tag reading via ptrace() can read stale tags if the PG_mte_tagged flag is set before actually clearing/restoring the tags. Fix the PG_mte_tagged semantics so that it is only set after the tags have been cleared or restored. This is safe for swap restoring into a MAP_SHARED or CoW page since the core code takes the page lock. Add two functions to test and set the PG_mte_tagged flag with acquire and release semantics. The downside is that concurrent mprotect(PROT_MTE) on a MAP_SHARED page may cause tag loss. This is already the case for KVM guests if a VMM changes the page protection while the guest triggers a user_mem_abort(). Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> [pcc@google.com: fix build with CONFIG_ARM64_MTE disabled] Signed-off-by: Peter Collingbourne <pcc@google.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com> Reviewed-by: Steven Price <steven.price@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Peter Collingbourne <pcc@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221104011041.290951-3-pcc@google.com
2022-11-03 18:10:35 -07:00
set_page_mte_tagged(to);
}
}
EXPORT_SYMBOL(copy_highpage);
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
copy_highpage(to, from);
flush_dcache_page(to);
}
EXPORT_SYMBOL_GPL(copy_user_highpage);