parisc architecture fixes for kernel v6.17-rc1:

The parisc kernel wrongly allows reading from read-protected userspace
 memory without faulting, e.g. when userspace uses mprotect() to
 read-protect a memory area and then uses a pointer to this memory in a
 write(2, addr, 1) syscall.  To fix this issue, Dave Anglin developed a
 set of patches which use the proberi assembler instruction to
 additionally check read access permissions at runtime.
 
 Randy Dunlap contributed two patches to fix a minor typo and to explain
 why a 32-bit compiler is needed although a 64-bit kernel is built.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQS86RI+GtKfB8BJu973ErUQojoPXwUCaIz+oAAKCRD3ErUQojoP
 Xy2WAQDMnAWKA62nvsWnwYYtHK7t8tA03XPJ4HlcEOW+EKdW1AEA3yKFBnDQMEm1
 zXrjzavY044/00u1Wba3glExQ3Vo0go=
 =0F3R
 -----END PGP SIGNATURE-----

Merge tag 'parisc-for-6.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:

 - The parisc kernel wrongly allows reading from read-protected
   userspace memory without faulting, e.g. when userspace uses
   mprotect() to read-protect a memory area and then uses a pointer to
   this memory in a write(2, addr, 1) syscall.

   To fix this issue, Dave Anglin developed a set of patches which use
   the proberi assembler instruction to additionally check read access
   permissions at runtime.

 - Randy Dunlap contributed two patches to fix a minor typo and to
   explain why a 32-bit compiler is needed although a 64-bit kernel is
   built

* tag 'parisc-for-6.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Revise __get_user() to probe user read access
  parisc: Revise gateway LWS calls to probe user read access
  parisc: Drop WARN_ON_ONCE() from flush_cache_vmap
  parisc: Try to fixup kernel exception in bad_area_nosemaphore path of do_page_fault()
  parisc: Define and use set_pte_at()
  parisc: Rename pte_needs_flush() to pte_needs_cache_flush() in cache.c
  parisc: Check region is readable by user in raw_copy_from_user()
  parisc: Update comments in make_insert_tlb
  parisc: Makefile: explain that 64BIT requires both 32-bit and 64-bit compilers
  parisc: Makefile: fix a typo in palo.conf
This commit is contained in:
Linus Torvalds 2025-08-01 16:15:53 -07:00
commit 0905809b38
9 changed files with 112 additions and 26 deletions

View file

@ -39,7 +39,9 @@ endif
export LD_BFD
# Set default 32 bits cross compilers for vdso
# Set default 32 bits cross compilers for vdso.
# This means that for 64BIT, both the 64-bit tools and the 32-bit tools
# need to be in the path.
CC_ARCHES_32 = hppa hppa2.0 hppa1.1
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
CROSS32_COMPILE := $(call cc-cross-prefix, \
@ -139,7 +141,7 @@ palo lifimage: vmlinuz
fi
@if test ! -f "$(PALOCONF)"; then \
cp $(srctree)/arch/parisc/defpalo.conf $(objtree)/palo.conf; \
echo 'A generic palo config file ($(objree)/palo.conf) has been created for you.'; \
echo 'A generic palo config file ($(objtree)/palo.conf) has been created for you.'; \
echo 'You should check it and re-run "make palo".'; \
echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \
false; \

View file

@ -276,7 +276,7 @@ extern unsigned long *empty_zero_page;
#define pte_none(x) (pte_val(x) == 0)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_user(x) (pte_val(x) & _PAGE_USER)
#define pte_clear(mm, addr, xp) set_pte(xp, __pte(0))
#define pte_clear(mm, addr, xp) set_pte_at((mm), (addr), (xp), __pte(0))
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@ -392,6 +392,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
}
}
#define set_ptes set_ptes
#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
/* Used for deferring calls to flush_dcache_page() */
@ -456,7 +457,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
if (!pte_young(pte)) {
return 0;
}
set_pte(ptep, pte_mkold(pte));
set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
return 1;
}
@ -466,7 +467,7 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *pt
struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
set_pte(ptep, pte_wrprotect(*ptep));
set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))

View file

@ -32,6 +32,34 @@
pa; \
})
/**
* prober_user() - Probe user read access
* @sr: Space regster.
* @va: Virtual address.
*
* Return: Non-zero if address is accessible.
*
* Due to the way _PAGE_READ is handled in TLB entries, we need
* a special check to determine whether a user address is accessible.
* The ldb instruction does the initial access check. If it is
* successful, the probe instruction checks user access rights.
*/
#define prober_user(sr, va) ({ \
unsigned long read_allowed; \
__asm__ __volatile__( \
"copy %%r0,%0\n" \
"8:\tldb 0(%%sr%1,%2),%%r0\n" \
"\tproberi (%%sr%1,%2),%3,%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
"or %%r0,%%r0,%%r0") \
: "=&r" (read_allowed) \
: "i" (sr), "r" (va), "i" (PRIV_USER) \
: "memory" \
); \
read_allowed; \
})
#define CR_EIEM 15 /* External Interrupt Enable Mask */
#define CR_CR16 16 /* CR16 Interval Timer */
#define CR_EIRR 23 /* External Interrupt Request Register */

View file

@ -42,9 +42,24 @@
__gu_err; \
})
#define __get_user(val, ptr) \
({ \
__get_user_internal(SR_USER, val, ptr); \
#define __probe_user_internal(sr, error, ptr) \
({ \
__asm__("\tproberi (%%sr%1,%2),%3,%0\n" \
"\tcmpiclr,= 1,%0,%0\n" \
"\tldi %4,%0\n" \
: "=r"(error) \
: "i"(sr), "r"(ptr), "i"(PRIV_USER), \
"i"(-EFAULT)); \
})
#define __get_user(val, ptr) \
({ \
register long __gu_err; \
\
__gu_err = __get_user_internal(SR_USER, val, ptr); \
if (likely(!__gu_err)) \
__probe_user_internal(SR_USER, __gu_err, ptr); \
__gu_err; \
})
#define __get_user_asm(sr, val, ldx, ptr) \

View file

@ -429,7 +429,7 @@ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
return ptep;
}
static inline bool pte_needs_flush(pte_t pte)
static inline bool pte_needs_cache_flush(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
== (_PAGE_PRESENT | _PAGE_ACCESSED);
@ -630,7 +630,7 @@ static void flush_cache_page_if_present(struct vm_area_struct *vma,
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
pte = ptep_get(ptep);
needs_flush = pte_needs_flush(pte);
needs_flush = pte_needs_cache_flush(pte);
pte_unmap(ptep);
}
if (needs_flush)
@ -841,7 +841,7 @@ void flush_cache_vmap(unsigned long start, unsigned long end)
}
vm = find_vm_area((void *)start);
if (WARN_ON_ONCE(!vm)) {
if (!vm) {
flush_cache_all();
return;
}

View file

@ -499,6 +499,12 @@
* this happens is quite subtle, read below */
.macro make_insert_tlb spc,pte,prot,tmp
space_to_prot \spc \prot /* create prot id from space */
#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
/* need to drop DMB bit, as it's used as SPECIAL flag */
depi 0,_PAGE_SPECIAL_BIT,1,\pte
#endif
/* The following is the real subtlety. This is depositing
* T <-> _PAGE_REFTRAP
* D <-> _PAGE_DIRTY
@ -511,17 +517,18 @@
* Finally, _PAGE_READ goes in the top bit of PL1 (so we
* trigger an access rights trap in user space if the user
* tries to read an unreadable page */
#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
/* need to drop DMB bit, as it's used as SPECIAL flag */
depi 0,_PAGE_SPECIAL_BIT,1,\pte
#endif
depd \pte,8,7,\prot
/* PAGE_USER indicates the page can be read with user privileges,
* so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
* contains _PAGE_READ) */
* contains _PAGE_READ). While the kernel can't directly write
* user pages which have _PAGE_WRITE zero, it can read pages
* which have _PAGE_READ zero (PL <= PL1). Thus, the kernel
* exception fault handler doesn't trigger when reading pages
* that aren't user read accessible */
extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
depdi 7,11,3,\prot
/* If we're a gateway page, drop PL2 back to zero for promotion
* to kernel privilege (so we can execute the page as kernel).
* Any privilege promotion page always denys read and write */

View file

@ -613,6 +613,9 @@ lws_compare_and_swap32:
lws_compare_and_swap:
/* Trigger memory reference interruptions without writing to memory */
1: ldw 0(%r26), %r28
proberi (%r26), PRIV_USER, %r28
comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */
nop
2: stbys,e %r0, 0(%r26)
/* Calculate 8-bit hash index from virtual address */
@ -767,6 +770,9 @@ cas2_lock_start:
copy %r26, %r28
depi_safe 0, 31, 2, %r28
10: ldw 0(%r28), %r1
proberi (%r28), PRIV_USER, %r1
comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */
nop
11: stbys,e %r0, 0(%r28)
/* Calculate 8-bit hash index from virtual address */
@ -951,41 +957,47 @@ atomic_xchg_begin:
/* 8-bit exchange */
1: ldb 0(%r24), %r20
proberi (%r24), PRIV_USER, %r20
comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
b atomic_xchg_start
2: stbys,e %r0, 0(%r20)
nop
nop
nop
/* 16-bit exchange */
3: ldh 0(%r24), %r20
proberi (%r24), PRIV_USER, %r20
comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
b atomic_xchg_start
4: stbys,e %r0, 0(%r20)
nop
nop
nop
/* 32-bit exchange */
5: ldw 0(%r24), %r20
proberi (%r24), PRIV_USER, %r20
comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
nop
b atomic_xchg_start
6: stbys,e %r0, 0(%r23)
nop
nop
nop
nop
nop
/* 64-bit exchange */
#ifdef CONFIG_64BIT
7: ldd 0(%r24), %r20
proberi (%r24), PRIV_USER, %r20
comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
nop
8: stdby,e %r0, 0(%r23)
#else
7: ldw 0(%r24), %r20
8: ldw 4(%r24), %r20
proberi (%r24), PRIV_USER, %r20
comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
9: stbys,e %r0, 0(%r20)

View file

@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#define get_user_space() mfsp(SR_USER)
#define get_kernel_space() SR_KERNEL
@ -32,9 +33,25 @@ EXPORT_SYMBOL(raw_copy_to_user);
unsigned long raw_copy_from_user(void *dst, const void __user *src,
unsigned long len)
{
unsigned long start = (unsigned long) src;
unsigned long end = start + len;
unsigned long newlen = len;
mtsp(get_user_space(), SR_TEMP1);
mtsp(get_kernel_space(), SR_TEMP2);
return pa_memcpy(dst, (void __force *)src, len);
/* Check region is user accessible */
if (start)
while (start < end) {
if (!prober_user(SR_TEMP1, start)) {
newlen = (start - (unsigned long) src);
break;
}
start += PAGE_SIZE;
/* align to page boundry which may have different permission */
start = PAGE_ALIGN_DOWN(start);
}
return len - newlen + pa_memcpy(dst, (void __force *)src, newlen);
}
EXPORT_SYMBOL(raw_copy_from_user);

View file

@ -363,6 +363,10 @@ bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore:
if (!user_mode(regs) && fixup_exception(regs)) {
return;
}
if (user_mode(regs)) {
int signo, si_code;