linux/arch/s390/include/asm/page.h
Alexander Gordeev a3d0b7a13b Merge branch 'uaccess-key' into features
Heiko Carstens says:

===================
A rather large series which is supposed to fix the crash below[1], which was
seen when running the memop kernel kvm selftest.

Problem is that cmpxchg_user_key() is executing code with a non-default
key. If a system is IPL'ed with "LOAD NORMAL", and in addition the previous
system used storage keys where the fetch-protection bit is set for some pages,
and the cmpxchg_user_key() is located within such page a protection exception
will happen when executing such code.

Idea of this series is to register all code locations running with a
non-default key at compile time. All functions, which run with a non-default
key, then must explicitly call an init function which initializes the storage
key of all pages containing such code locations with default key, which
prevents such protection exceptions.

Furthermore all functions containing code which may be executed with a
non-default access key must be marked with __kprobes to prevent out-of-line
execution of any instruction of such functions, which would result in the same
problem.

By default the kernel will not issue any storage key changing instructions
like before, which will preserve the keyless-subset mode optimizations in
hosts.

Other possible implementations which I discarded:

- Moving the code to an own section. This would require an s390 specific
  change to modpost.c, which complains about section mismatches (EX_TABLE
  entries in non-default text section). No other architecture has something
  similar, so let's keep this architecture specific hack local.

- Just apply the default storage key to the whole kprobes text
  section. However this would add special s390 semantics to the kprobes text
  section, which no other architecture has. History has shown that such hacks
  fire back sooner or later.

Furthermore, and to keep this whole stuff quite simple, this only works for
code locations in core kernel code, not within modules. After this series
there is no module code left with such code, and as of now I don't see any new
kernel code which runs with a non-default access key.

Note: the original crash can be reproduced by replacing

page_set_storage_key(real, PAGE_DEFAULT_KEY, 1);

with

page_set_storage_key(real, 8, 1);

in arch/s390/kernel/skey.c:__skey_regions_initialize()

And then run tools/testing/selftests/kvm/s390/memop from the kernel selftests.

[1]:

Unable to handle kernel pointer dereference in virtual kernel address space
Failing address: 0000000000000000 TEID: 000000000000080b
Fault in home space mode while using kernel ASCE.
AS:0000000002528007 R3:00000001ffffc007 S:00000001ffffb801 P:000000000000013d
Oops: 0004 ilc:1 [#1]SMP
Modules linked in:
CPU: 3 UID: 0 PID: 791 Comm: memop Not tainted 6.16.0-rc1-00006-g3b568201d0a6-dirty #11 NONE
Hardware name: IBM 3931 A01 704 (z/VM 7.4.0)
Krnl PSW : 0794f00180000000 000003ffe0f4d91e (__cmpxchg_user_key1+0xbe/0x190)
           R:0 T:1 IO:1 EX:1 Key:9 M:1 W:0 P:0 AS:3 CC:3 PM:0 RI:0 EA:3
Krnl GPRS: 070003ffdfbf6af0 0000000000070000 0000000095b5a300 0000000000000000
           00000000f1000000 0000000000000000 0000000000000090 0000000000000000
           0000000000000040 0000000000000018 000003ff9b23d000 0000037fe0ef7bd8
           000003ffdfbf7500 00000000962e4000 0000037f00ffffff 0000037fe0ef7aa0
Krnl Code: 000003ffe0f4d912: ad03f0a0            stosm   160(%r15),3
           000003ffe0f4d916: a7780000            lhi     %r7,0
          #000003ffe0f4d91a: b20a6000            spka    0(%r6)
          >000003ffe0f4d91e: b2790100            sacf    256
           000003ffe0f4d922: a56f0080            llill   %r6,128
           000003ffe0f4d926: 5810a000            l       %r1,0(%r10)
           000003ffe0f4d92a: 141e                nr      %r1,%r14
           000003ffe0f4d92c: c0e7ffffffff        xilf    %r14,4294967295
Call Trace:
 [<000003ffe0f4d91e>] __cmpxchg_user_key1+0xbe/0x190
 [<000003ffe0189c6e>] cmpxchg_guest_abs_with_key+0x2fe/0x370
 [<000003ffe016d28e>] kvm_s390_vm_mem_op_cmpxchg+0x17e/0x350
 [<000003ffe0173284>] kvm_arch_vm_ioctl+0x354/0x6f0
 [<000003ffe015fedc>] kvm_vm_ioctl+0x2cc/0x6e0
 [<000003ffe05348ae>] vfs_ioctl+0x2e/0x70
 [<000003ffe0535e70>] __s390x_sys_ioctl+0xe0/0x100
 [<000003ffe0f40f06>] __do_syscall+0x136/0x340
 [<000003ffe0f4cb2e>] system_call+0x6e/0x90
Last Breaking-Event-Address:
 [<000003ffe0f4d896>] __cmpxchg_user_key1+0x36/0x190
===================

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
2025-06-29 13:21:16 +02:00

298 lines
7.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com)
*/
#ifndef _S390_PAGE_H
#define _S390_PAGE_H
#include <linux/const.h>
#include <asm/types.h>
#include <asm/asm.h>
#include <vdso/page.h>
#define PAGE_DEFAULT_ACC _AC(0, UL)
/* storage-protection override */
#define PAGE_SPO_ACC 9
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
#define HPAGE_SHIFT 20
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define ARCH_HAS_HUGE_PTE_TYPE
#define ARCH_HAS_PREPARE_HUGEPAGE
#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#include <asm/setup.h>
#ifndef __ASSEMBLER__
void __storage_key_init_range(unsigned long start, unsigned long end);
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
if (PAGE_DEFAULT_KEY != 0)
__storage_key_init_range(start, end);
}
#define clear_page(page) memset((page), 0, PAGE_SIZE)
/*
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to
* bypass caches when copying a page. Especially when copying huge pages
* this keeps L1 and L2 data caches alive.
*/
static inline void copy_page(void *to, void *from)
{
union register_pair dst, src;
dst.even = (unsigned long) to;
dst.odd = 0x1000;
src.even = (unsigned long) from;
src.odd = 0xb0001000;
asm volatile(
" mvcl %[dst],%[src]"
: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
: : "memory", "cc");
}
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
#ifdef CONFIG_STRICT_MM_TYPECHECKS
#define STRICT_MM_TYPECHECKS
#endif
#ifdef STRICT_MM_TYPECHECKS
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgste; } pgste_t;
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long p4d; } p4d_t;
typedef struct { unsigned long pgd; } pgd_t;
#define DEFINE_PGVAL_FUNC(name) \
static __always_inline unsigned long name ## _val(name ## _t name) \
{ \
return name.name; \
}
#else /* STRICT_MM_TYPECHECKS */
typedef unsigned long pgprot_t;
typedef unsigned long pgste_t;
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pud_t;
typedef unsigned long p4d_t;
typedef unsigned long pgd_t;
#define DEFINE_PGVAL_FUNC(name) \
static __always_inline unsigned long name ## _val(name ## _t name) \
{ \
return name; \
}
#endif /* STRICT_MM_TYPECHECKS */
DEFINE_PGVAL_FUNC(pgprot)
DEFINE_PGVAL_FUNC(pgste)
DEFINE_PGVAL_FUNC(pte)
DEFINE_PGVAL_FUNC(pmd)
DEFINE_PGVAL_FUNC(pud)
DEFINE_PGVAL_FUNC(p4d)
DEFINE_PGVAL_FUNC(pgd)
typedef pte_t *pgtable_t;
#define __pgprot(x) ((pgprot_t) { (x) } )
#define __pgste(x) ((pgste_t) { (x) } )
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pud(x) ((pud_t) { (x) } )
#define __p4d(x) ((p4d_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
static inline void page_set_storage_key(unsigned long addr,
unsigned char skey, int mapped)
{
if (!mapped) {
asm volatile(
" .insn rrf,0xb22b0000,%[skey],%[addr],8,0"
:
: [skey] "d" (skey), [addr] "a" (addr)
: "memory");
} else {
asm volatile(
" sske %[skey],%[addr]"
:
: [skey] "d" (skey), [addr] "a" (addr)
: "memory");
}
}
static inline unsigned char page_get_storage_key(unsigned long addr)
{
unsigned char skey;
asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
return skey;
}
static inline int page_reset_referenced(unsigned long addr)
{
int cc;
asm volatile(
" rrbe 0,%[addr]\n"
CC_IPM(cc)
: CC_OUT(cc, cc)
: [addr] "a" (addr)
: CC_CLOBBER);
return CC_TRANSFORM(cc);
}
/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
struct page;
struct folio;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
static inline int devmem_is_allowed(unsigned long pfn)
{
return 0;
}
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
int arch_make_folio_accessible(struct folio *folio);
#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
struct vm_layout {
unsigned long kaslr_offset;
unsigned long kaslr_offset_phys;
unsigned long identity_base;
unsigned long identity_size;
};
extern struct vm_layout vm_layout;
#define __kaslr_offset vm_layout.kaslr_offset
#define __kaslr_offset_phys vm_layout.kaslr_offset_phys
#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
#define __identity_base vm_layout.identity_base
#else
#define __identity_base 0UL
#endif
#define ident_map_size vm_layout.identity_size
static inline unsigned long kaslr_offset(void)
{
return __kaslr_offset;
}
extern int __kaslr_enabled;
static inline int kaslr_enabled(void)
{
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return __kaslr_enabled;
return 0;
}
#define __PAGE_OFFSET __identity_base
#define PAGE_OFFSET __PAGE_OFFSET
#ifdef __DECOMPRESSOR
#define __pa_nodebug(x) ((unsigned long)(x))
#define __pa(x) __pa_nodebug(x)
#define __pa32(x) __pa(x)
#define __va(x) ((void *)(unsigned long)(x))
#else /* __DECOMPRESSOR */
static inline unsigned long __pa_nodebug(unsigned long x)
{
if (x < __kaslr_offset)
return x - __identity_base;
return x - __kaslr_offset + __kaslr_offset_phys;
}
#ifdef CONFIG_DEBUG_VIRTUAL
unsigned long __phys_addr(unsigned long x, bool is_31bit);
#else /* CONFIG_DEBUG_VIRTUAL */
static inline unsigned long __phys_addr(unsigned long x, bool is_31bit)
{
return __pa_nodebug(x);
}
#endif /* CONFIG_DEBUG_VIRTUAL */
#define __pa(x) __phys_addr((unsigned long)(x), false)
#define __pa32(x) __phys_addr((unsigned long)(x), true)
#define __va(x) ((void *)((unsigned long)(x) + __identity_base))
#endif /* __DECOMPRESSOR */
#define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
#define phys_to_folio(phys) page_folio(phys_to_page(phys))
#define folio_to_phys(page) pfn_to_phys(folio_pfn(folio))
static inline void *pfn_to_virt(unsigned long pfn)
{
return __va(pfn_to_phys(pfn));
}
static inline unsigned long virt_to_pfn(const void *kaddr)
{
return phys_to_pfn(__pa(kaddr));
}
#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
#define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr))))
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
#endif /* !__ASSEMBLER__ */
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
#define AMODE31_SIZE (3 * PAGE_SIZE)
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define __NO_KASLR_START_KERNEL CONFIG_KERNEL_IMAGE_BASE
#define __NO_KASLR_END_KERNEL (__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE)
#define TEXT_OFFSET 0x100000
#endif /* _S390_PAGE_H */