linux/arch/um/include/asm/processor-generic.h
Johannes Berg d1d7f01f7c um: mark rodata read-only and implement _nofault accesses
Mark read-only data actually read-only (simple mprotect), and
to be able to test it also implement _nofault accesses. This
works by setting up a new "segv_continue" pointer in current,
and then when we hit a segfault we change the signal return
context so that we continue at that address. The code using
this sets it up so that it jumps to a label and then aborts
the access that way, returning -EFAULT.

It's possible to optimize the ___backtrack_faulted() thing by
using asm goto (compiler version dependent) and/or gcc's (not
sure if clang has it) &&label extension, but at least in one
attempt I made the && caused the compiler to not load -EFAULT
into the register in case of jumping to the &&label from the
fault handler. So leave it like this for now.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Co-developed-by: Benjamin Berg <benjamin.berg@intel.com>
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250210160926.420133-2-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-03-18 11:03:14 +01:00

91 lines
1.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#ifndef __UM_PROCESSOR_GENERIC_H
#define __UM_PROCESSOR_GENERIC_H
struct pt_regs;
struct task_struct;
#include <asm/ptrace.h>
#include <sysdep/archsetjmp.h>
#include <linux/prefetch.h>
#include <asm/cpufeatures.h>
struct mm_struct;
struct thread_struct {
struct pt_regs *segv_regs;
struct task_struct *prev_sched;
struct arch_thread arch;
jmp_buf switch_buf;
struct {
struct {
int (*proc)(void *);
void *arg;
} thread;
} request;
void *segv_continue;
/* Contains variable sized FP registers */
struct pt_regs regs;
};
#define INIT_THREAD \
{ \
.regs = EMPTY_REGS, \
.prev_sched = NULL, \
.arch = INIT_ARCH_THREAD, \
.request = { } \
}
/*
* User space process size: 3GB (default).
*/
extern unsigned long task_size;
#define TASK_SIZE (task_size)
#undef STACK_TOP
#undef STACK_TOP_MAX
extern unsigned long stacksizelim;
#define STACK_ROOM (stacksizelim)
#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
#define STACK_TOP_MAX STACK_TOP
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE (0x40000000)
extern void start_thread(struct pt_regs *regs, unsigned long entry,
unsigned long stack);
struct cpuinfo_um {
unsigned long loops_per_jiffy;
int ipi_pipe[2];
int cache_alignment;
union {
__u32 x86_capability[NCAPINTS + NBUGINTS];
unsigned long x86_capability_alignment;
};
};
extern struct cpuinfo_um boot_cpu_data;
#define cpu_data(cpu) boot_cpu_data
#define current_cpu_data boot_cpu_data
#define cache_line_size() (boot_cpu_data.cache_alignment)
#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
extern unsigned long __get_wchan(struct task_struct *p);
#endif