2020-03-16 16:50:45 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __ASM_MMAN_H__
|
|
|
|
#define __ASM_MMAN_H__
|
|
|
|
|
2024-09-03 12:09:17 +00:00
|
|
|
#include <uapi/asm/mman.h>
|
|
|
|
|
|
|
|
#ifndef BUILD_VDSO
|
2020-03-16 16:50:45 +00:00
|
|
|
#include <linux/compiler.h>
|
mm: refactor arch_calc_vm_flag_bits() and arm64 MTE handling
Currently MTE is permitted in two circumstances (desiring to use MTE
having been specified by the VM_MTE flag) - where MAP_ANONYMOUS is
specified, as checked by arch_calc_vm_flag_bits() and actualised by
setting the VM_MTE_ALLOWED flag, or if the file backing the mapping is
shmem, in which case we set VM_MTE_ALLOWED in shmem_mmap() when the mmap
hook is activated in mmap_region().
The function that checks that, if VM_MTE is set, VM_MTE_ALLOWED is also
set is the arm64 implementation of arch_validate_flags().
Unfortunately, we intend to refactor mmap_region() to perform this check
earlier, meaning that in the case of a shmem backing we will not have
invoked shmem_mmap() yet, causing the mapping to fail spuriously.
It is inappropriate to set this architecture-specific flag in general mm
code anyway, so a sensible resolution of this issue is to instead move the
check somewhere else.
We resolve this by setting VM_MTE_ALLOWED much earlier in do_mmap(), via
the arch_calc_vm_flag_bits() call.
This is an appropriate place to do this as we already check for the
MAP_ANONYMOUS case here, and the shmem file case is simply a variant of
the same idea - we permit RAM-backed memory.
This requires a modification to the arch_calc_vm_flag_bits() signature to
pass in a pointer to the struct file associated with the mapping, however
this is not too egregious as this is only used by two architectures anyway
- arm64 and parisc.
So this patch performs this adjustment and removes the unnecessary
assignment of VM_MTE_ALLOWED in shmem_mmap().
[akpm@linux-foundation.org: fix whitespace, per Catalin]
Link: https://lkml.kernel.org/r/ec251b20ba1964fb64cf1607d2ad80c47f3873df.1730224667.git.lorenzo.stoakes@oracle.com
Fixes: deb0f6562884 ("mm/mmap: undo ->mmap() when arch_validate_flags() fails")
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Jann Horn <jannh@google.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Helge Deller <deller@gmx.de>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-10-29 18:11:47 +00:00
|
|
|
#include <linux/fs.h>
|
2024-11-19 12:09:14 -08:00
|
|
|
#include <linux/hugetlb.h>
|
mm: refactor arch_calc_vm_flag_bits() and arm64 MTE handling
Currently MTE is permitted in two circumstances (desiring to use MTE
having been specified by the VM_MTE flag) - where MAP_ANONYMOUS is
specified, as checked by arch_calc_vm_flag_bits() and actualised by
setting the VM_MTE_ALLOWED flag, or if the file backing the mapping is
shmem, in which case we set VM_MTE_ALLOWED in shmem_mmap() when the mmap
hook is activated in mmap_region().
The function that checks that, if VM_MTE is set, VM_MTE_ALLOWED is also
set is the arm64 implementation of arch_validate_flags().
Unfortunately, we intend to refactor mmap_region() to perform this check
earlier, meaning that in the case of a shmem backing we will not have
invoked shmem_mmap() yet, causing the mapping to fail spuriously.
It is inappropriate to set this architecture-specific flag in general mm
code anyway, so a sensible resolution of this issue is to instead move the
check somewhere else.
We resolve this by setting VM_MTE_ALLOWED much earlier in do_mmap(), via
the arch_calc_vm_flag_bits() call.
This is an appropriate place to do this as we already check for the
MAP_ANONYMOUS case here, and the shmem file case is simply a variant of
the same idea - we permit RAM-backed memory.
This requires a modification to the arch_calc_vm_flag_bits() signature to
pass in a pointer to the struct file associated with the mapping, however
this is not too egregious as this is only used by two architectures anyway
- arm64 and parisc.
So this patch performs this adjustment and removes the unnecessary
assignment of VM_MTE_ALLOWED in shmem_mmap().
[akpm@linux-foundation.org: fix whitespace, per Catalin]
Link: https://lkml.kernel.org/r/ec251b20ba1964fb64cf1607d2ad80c47f3873df.1730224667.git.lorenzo.stoakes@oracle.com
Fixes: deb0f6562884 ("mm/mmap: undo ->mmap() when arch_validate_flags() fails")
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Jann Horn <jannh@google.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Helge Deller <deller@gmx.de>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-10-29 18:11:47 +00:00
|
|
|
#include <linux/shmem_fs.h>
|
2020-03-16 16:50:45 +00:00
|
|
|
#include <linux/types.h>
|
|
|
|
|
2025-06-18 20:42:54 +01:00
|
|
|
static inline vm_flags_t arch_calc_vm_prot_bits(unsigned long prot,
|
2024-08-22 16:10:57 +01:00
|
|
|
unsigned long pkey)
|
2020-03-16 16:50:45 +00:00
|
|
|
{
|
2025-06-18 20:42:54 +01:00
|
|
|
vm_flags_t ret = 0;
|
2019-11-27 10:00:27 +00:00
|
|
|
|
2020-03-16 16:50:45 +00:00
|
|
|
if (system_supports_bti() && (prot & PROT_BTI))
|
2019-11-27 10:00:27 +00:00
|
|
|
ret |= VM_ARM64_BTI;
|
2020-03-16 16:50:45 +00:00
|
|
|
|
2019-11-27 10:00:27 +00:00
|
|
|
if (system_supports_mte() && (prot & PROT_MTE))
|
|
|
|
ret |= VM_MTE;
|
|
|
|
|
2024-08-22 16:10:57 +01:00
|
|
|
#ifdef CONFIG_ARCH_HAS_PKEYS
|
|
|
|
if (system_supports_poe()) {
|
|
|
|
ret |= pkey & BIT(0) ? VM_PKEY_BIT0 : 0;
|
|
|
|
ret |= pkey & BIT(1) ? VM_PKEY_BIT1 : 0;
|
|
|
|
ret |= pkey & BIT(2) ? VM_PKEY_BIT2 : 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-11-27 10:00:27 +00:00
|
|
|
return ret;
|
2020-03-16 16:50:45 +00:00
|
|
|
}
|
|
|
|
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
|
|
|
|
|
2025-06-18 20:42:54 +01:00
|
|
|
static inline vm_flags_t arch_calc_vm_flag_bits(struct file *file,
|
|
|
|
unsigned long flags)
|
2019-11-27 10:00:27 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Only allow MTE on anonymous mappings as these are guaranteed to be
|
|
|
|
* backed by tags-capable memory. The vm_flags may be overridden by a
|
|
|
|
* filesystem supporting MTE (RAM-based).
|
|
|
|
*/
|
2024-11-18 18:10:37 -08:00
|
|
|
if (system_supports_mte()) {
|
|
|
|
if (flags & (MAP_ANONYMOUS | MAP_HUGETLB))
|
|
|
|
return VM_MTE_ALLOWED;
|
2024-11-19 12:09:14 -08:00
|
|
|
if (shmem_file(file) || is_file_hugepages(file))
|
2024-11-18 18:10:37 -08:00
|
|
|
return VM_MTE_ALLOWED;
|
|
|
|
}
|
2019-11-27 10:00:27 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
mm: refactor arch_calc_vm_flag_bits() and arm64 MTE handling
Currently MTE is permitted in two circumstances (desiring to use MTE
having been specified by the VM_MTE flag) - where MAP_ANONYMOUS is
specified, as checked by arch_calc_vm_flag_bits() and actualised by
setting the VM_MTE_ALLOWED flag, or if the file backing the mapping is
shmem, in which case we set VM_MTE_ALLOWED in shmem_mmap() when the mmap
hook is activated in mmap_region().
The function that checks that, if VM_MTE is set, VM_MTE_ALLOWED is also
set is the arm64 implementation of arch_validate_flags().
Unfortunately, we intend to refactor mmap_region() to perform this check
earlier, meaning that in the case of a shmem backing we will not have
invoked shmem_mmap() yet, causing the mapping to fail spuriously.
It is inappropriate to set this architecture-specific flag in general mm
code anyway, so a sensible resolution of this issue is to instead move the
check somewhere else.
We resolve this by setting VM_MTE_ALLOWED much earlier in do_mmap(), via
the arch_calc_vm_flag_bits() call.
This is an appropriate place to do this as we already check for the
MAP_ANONYMOUS case here, and the shmem file case is simply a variant of
the same idea - we permit RAM-backed memory.
This requires a modification to the arch_calc_vm_flag_bits() signature to
pass in a pointer to the struct file associated with the mapping, however
this is not too egregious as this is only used by two architectures anyway
- arm64 and parisc.
So this patch performs this adjustment and removes the unnecessary
assignment of VM_MTE_ALLOWED in shmem_mmap().
[akpm@linux-foundation.org: fix whitespace, per Catalin]
Link: https://lkml.kernel.org/r/ec251b20ba1964fb64cf1607d2ad80c47f3873df.1730224667.git.lorenzo.stoakes@oracle.com
Fixes: deb0f6562884 ("mm/mmap: undo ->mmap() when arch_validate_flags() fails")
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Jann Horn <jannh@google.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Helge Deller <deller@gmx.de>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-10-29 18:11:47 +00:00
|
|
|
#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags)
|
2019-11-27 10:00:27 +00:00
|
|
|
|
2020-03-16 16:50:45 +00:00
|
|
|
static inline bool arch_validate_prot(unsigned long prot,
|
|
|
|
unsigned long addr __always_unused)
|
|
|
|
{
|
|
|
|
unsigned long supported = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM;
|
|
|
|
|
|
|
|
if (system_supports_bti())
|
|
|
|
supported |= PROT_BTI;
|
|
|
|
|
2019-11-27 10:00:27 +00:00
|
|
|
if (system_supports_mte())
|
|
|
|
supported |= PROT_MTE;
|
|
|
|
|
2020-03-16 16:50:45 +00:00
|
|
|
return (prot & ~supported) == 0;
|
|
|
|
}
|
|
|
|
#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
|
|
|
|
|
2025-06-18 20:42:54 +01:00
|
|
|
static inline bool arch_validate_flags(vm_flags_t vm_flags)
|
2019-08-09 15:48:46 +01:00
|
|
|
{
|
2024-10-01 23:58:42 +01:00
|
|
|
if (system_supports_mte()) {
|
|
|
|
/*
|
|
|
|
* only allow VM_MTE if VM_MTE_ALLOWED has been set
|
|
|
|
* previously
|
|
|
|
*/
|
|
|
|
if ((vm_flags & VM_MTE) && !(vm_flags & VM_MTE_ALLOWED))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2024-10-01 23:58:54 +01:00
|
|
|
if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) {
|
|
|
|
/* An executable GCS isn't a good idea. */
|
|
|
|
if (vm_flags & VM_EXEC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* The memory management core should prevent this */
|
|
|
|
VM_WARN_ON(vm_flags & VM_SHARED);
|
|
|
|
}
|
|
|
|
|
2024-10-01 23:58:42 +01:00
|
|
|
return true;
|
2019-08-09 15:48:46 +01:00
|
|
|
|
|
|
|
}
|
|
|
|
#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
|
|
|
|
|
2024-09-03 12:09:17 +00:00
|
|
|
#endif /* !BUILD_VDSO */
|
|
|
|
|
2020-03-16 16:50:45 +00:00
|
|
|
#endif /* ! __ASM_MMAN_H__ */
|