linux/arch/arm64/kernel/mte.c

428 lines
9.6 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 ARM Ltd.
*/
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/prctl.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/string.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/thread_info.h>
#include <linux/types.h>
#include <linux/uio.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
#include <asm/mte-kasan.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
u64 gcr_kernel_excl __ro_after_init;
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
{
pte_t old_pte = READ_ONCE(*ptep);
if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);
if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
return;
}
arm64: mte: reset the page tag in page->flags The hardware tag-based KASAN for compatibility with the other modes stores the tag associated to a page in page->flags. Due to this the kernel faults on access when it allocates a page with an initial tag and the user changes the tags. Reset the tag associated by the kernel to a page in all the meaningful places to prevent kernel faults on access. Note: An alternative to this approach could be to modify page_to_virt(). This though could end up being racy, in fact if a CPU checks the PG_mte_tagged bit and decides that the page is not tagged but another CPU maps the same with PROT_MTE and becomes tagged the subsequent kernel access would fail. Link: https://lkml.kernel.org/r/9073d4e973747a6f78d5bdd7ebe17f290d087096.1606161801.git.andreyknvl@google.com Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Marco Elver <elver@google.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-12-22 12:01:31 -08:00
page_kasan_tag_reset(page);
/*
* We need smp_wmb() in between setting the flags and clearing the
* tags because if another thread reads page->flags and builds a
* tagged address out of it, there is an actual dependency to the
* memory access, but on the current thread we do not guarantee that
* the new page->flags are visible before the tags were updated.
*/
smp_wmb();
mte_clear_page_tags(page_address(page));
}
void mte_sync_tags(pte_t *ptep, pte_t pte)
{
struct page *page = pte_page(pte);
long i, nr_pages = compound_nr(page);
bool check_swap = nr_pages == 1;
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
mte_sync_page_tags(page, ptep, check_swap);
}
}
int memcmp_pages(struct page *page1, struct page *page2)
{
char *addr1, *addr2;
int ret;
addr1 = page_address(page1);
addr2 = page_address(page2);
ret = memcmp(addr1, addr2, PAGE_SIZE);
if (!system_supports_mte() || ret)
return ret;
/*
* If the page content is identical but at least one of the pages is
* tagged, return non-zero to avoid KSM merging. If only one of the
* pages is tagged, set_pte_at() may zero or change the tags of the
* other page via mte_sync_tags().
*/
if (test_bit(PG_mte_tagged, &page1->flags) ||
test_bit(PG_mte_tagged, &page2->flags))
return addr1 != addr2;
return ret;
}
u8 mte_get_mem_tag(void *addr)
{
if (!system_supports_mte())
return 0xFF;
asm(__MTE_PREAMBLE "ldg %0, [%0]"
: "+r" (addr));
return mte_get_ptr_tag(addr);
}
u8 mte_get_random_tag(void)
{
void *addr;
if (!system_supports_mte())
return 0xFF;
asm(__MTE_PREAMBLE "irg %0, %0"
: "+r" (addr));
return mte_get_ptr_tag(addr);
}
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
void *ptr = addr;
if ((!system_supports_mte()) || (size == 0))
return addr;
/* Make sure that size is MTE granule aligned. */
WARN_ON(size & (MTE_GRANULE_SIZE - 1));
/* Make sure that the address is MTE granule aligned. */
WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
tag = 0xF0 | tag;
ptr = (void *)__tag_set(ptr, tag);
mte_assign_mem_tag_range(ptr, size);
return ptr;
}
void mte_init_tags(u64 max_tag)
{
static bool gcr_kernel_excl_initialized;
if (!gcr_kernel_excl_initialized) {
/*
* The format of the tags in KASAN is 0xFF and in MTE is 0xF.
* This conversion extracts an MTE tag from a KASAN tag.
*/
u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
max_tag), 0);
gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
gcr_kernel_excl_initialized = true;
}
/* Enable the kernel exclude mask for random tags generation. */
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
}
arm64: kasan: allow enabling in-kernel MTE Hardware tag-based KASAN relies on Memory Tagging Extension (MTE) feature and requires it to be enabled. MTE supports This patch adds a new mte_enable_kernel() helper, that enables MTE in Synchronous mode in EL1 and is intended to be called from KASAN runtime during initialization. The Tag Checking operation causes a synchronous data abort as a consequence of a tag check fault when MTE is configured in synchronous mode. As part of this change enable match-all tag for EL1 to allow the kernel to access user pages without faulting. This is required because the kernel does not have knowledge of the tags set by the user in a page. Note: For MTE, the TCF bit field in SCTLR_EL1 affects only EL1 in a similar way as TCF0 affects EL0. MTE that is built on top of the Top Byte Ignore (TBI) feature hence we enable it as part of this patch as well. Link: https://lkml.kernel.org/r/7352b0a0899af65c2785416c8ca6bf3845b66fa1.1606161801.git.andreyknvl@google.com Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Co-developed-by: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Marco Elver <elver@google.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-12-22 12:01:38 -08:00
void mte_enable_kernel(void)
{
/* Enable MTE Sync Mode for EL1. */
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
isb();
}
static void update_sctlr_el1_tcf0(u64 tcf0)
{
/* ISB required for the kernel uaccess routines */
sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
isb();
}
static void set_sctlr_el1_tcf0(u64 tcf0)
{
/*
* mte_thread_switch() checks current->thread.sctlr_tcf0 as an
* optimisation. Disable preemption so that it does not see
* the variable update before the SCTLR_EL1.TCF0 one.
*/
preempt_disable();
current->thread.sctlr_tcf0 = tcf0;
update_sctlr_el1_tcf0(tcf0);
preempt_enable();
}
static void update_gcr_el1_excl(u64 excl)
{
/*
* Note that the mask controlled by the user via prctl() is an
* include while GCR_EL1 accepts an exclude mask.
* No need for ISB since this only affects EL0 currently, implicit
* with ERET.
*/
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
}
static void set_gcr_el1_excl(u64 excl)
{
current->thread.gcr_user_excl = excl;
/*
* SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
* by mte_set_user_gcr() in kernel_exit,
*/
}
void flush_mte_state(void)
{
if (!system_supports_mte())
return;
/* clear any pending asynchronous tag fault */
dsb(ish);
write_sysreg_s(0, SYS_TFSRE0_EL1);
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
/* disable tag checking */
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
/* reset tag generation mask */
set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
}
void mte_thread_switch(struct task_struct *next)
{
if (!system_supports_mte())
return;
/* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
}
void mte_suspend_exit(void)
{
if (!system_supports_mte())
return;
update_gcr_el1_excl(gcr_kernel_excl);
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
u64 tcf0;
u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
switch (arg & PR_MTE_TCF_MASK) {
case PR_MTE_TCF_NONE:
tcf0 = SCTLR_EL1_TCF0_NONE;
break;
case PR_MTE_TCF_SYNC:
tcf0 = SCTLR_EL1_TCF0_SYNC;
break;
case PR_MTE_TCF_ASYNC:
tcf0 = SCTLR_EL1_TCF0_ASYNC;
break;
default:
return -EINVAL;
}
if (task != current) {
task->thread.sctlr_tcf0 = tcf0;
task->thread.gcr_user_excl = gcr_excl;
} else {
set_sctlr_el1_tcf0(tcf0);
set_gcr_el1_excl(gcr_excl);
}
return 0;
}
long get_mte_ctrl(struct task_struct *task)
{
unsigned long ret;
u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
ret = incl << PR_MTE_TAG_SHIFT;
switch (task->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE:
ret |= PR_MTE_TCF_NONE;
break;
case SCTLR_EL1_TCF0_SYNC:
ret |= PR_MTE_TCF_SYNC;
break;
case SCTLR_EL1_TCF0_ASYNC:
ret |= PR_MTE_TCF_ASYNC;
break;
}
return ret;
}
/*
* Access MTE tags in another process' address space as given in mm. Update
* the number of tags copied. Return 0 if any tags copied, error otherwise.
* Inspired by __access_remote_vm().
*/
static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
struct iovec *kiov, unsigned int gup_flags)
{
struct vm_area_struct *vma;
void __user *buf = kiov->iov_base;
size_t len = kiov->iov_len;
int ret;
int write = gup_flags & FOLL_WRITE;
if (!access_ok(buf, len))
return -EFAULT;
if (mmap_read_lock_killable(mm))
return -EIO;
while (len) {
unsigned long tags, offset;
void *maddr;
struct page *page = NULL;
ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
&vma, NULL);
if (ret <= 0)
break;
/*
* Only copy tags if the page has been mapped as PROT_MTE
* (PG_mte_tagged set). Otherwise the tags are not valid and
* not accessible to user. Moreover, an mprotect(PROT_MTE)
* would cause the existing tags to be cleared if the page
* was never mapped with PROT_MTE.
*/
if (!test_bit(PG_mte_tagged, &page->flags)) {
ret = -EOPNOTSUPP;
put_page(page);
break;
}
/* limit access to the end of the page */
offset = offset_in_page(addr);
tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
maddr = page_address(page);
if (write) {
tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
set_page_dirty_lock(page);
} else {
tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
}
put_page(page);
/* error accessing the tracer's buffer */
if (!tags)
break;
len -= tags;
buf += tags;
addr += tags * MTE_GRANULE_SIZE;
}
mmap_read_unlock(mm);
/* return an error if no tags copied */
kiov->iov_len = buf - kiov->iov_base;
if (!kiov->iov_len) {
/* check for error accessing the tracee's address space */
if (ret <= 0)
return -EIO;
else
return -EFAULT;
}
return 0;
}
/*
* Copy MTE tags in another process' address space at 'addr' to/from tracer's
* iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
*/
static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
struct iovec *kiov, unsigned int gup_flags)
{
struct mm_struct *mm;
int ret;
mm = get_task_mm(tsk);
if (!mm)
return -EPERM;
if (!tsk->ptrace || (current != tsk->parent) ||
((get_dumpable(mm) != SUID_DUMP_USER) &&
!ptracer_capable(tsk, mm->user_ns))) {
mmput(mm);
return -EPERM;
}
ret = __access_remote_tags(mm, addr, kiov, gup_flags);
mmput(mm);
return ret;
}
int mte_ptrace_copy_tags(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
struct iovec kiov;
struct iovec __user *uiov = (void __user *)data;
unsigned int gup_flags = FOLL_FORCE;
if (!system_supports_mte())
return -EIO;
if (get_user(kiov.iov_base, &uiov->iov_base) ||
get_user(kiov.iov_len, &uiov->iov_len))
return -EFAULT;
if (request == PTRACE_POKEMTETAGS)
gup_flags |= FOLL_WRITE;
/* align addr to the MTE tag granule */
addr &= MTE_GRANULE_MASK;
ret = access_remote_tags(child, addr, &kiov, gup_flags);
if (!ret)
ret = put_user(kiov.iov_len, &uiov->iov_len);
return ret;
}