2019-08-25 10:49:18 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2006-01-18 17:42:46 -08:00
|
|
|
/*
|
2025-06-02 15:00:50 +02:00
|
|
|
* Copyright (C) 2021 Benjamin Berg <benjamin@sipsolutions.net>
|
2015-11-02 16:16:37 +00:00
|
|
|
* Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
|
2007-10-16 01:27:00 -07:00
|
|
|
* Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
2006-01-18 17:42:46 -08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
2022-07-13 13:56:17 +02:00
|
|
|
#include <stdbool.h>
|
2006-01-18 17:42:46 -08:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <sched.h>
|
2007-10-16 01:27:00 -07:00
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
2024-09-19 14:45:03 +02:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <mem_user.h>
|
2006-01-18 17:42:46 -08:00
|
|
|
#include <sys/mman.h>
|
2007-10-16 01:27:00 -07:00
|
|
|
#include <sys/wait.h>
|
2024-09-19 14:45:03 +02:00
|
|
|
#include <sys/stat.h>
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
#include <sys/socket.h>
|
2007-10-16 01:27:00 -07:00
|
|
|
#include <asm/unistd.h>
|
2012-10-08 03:27:32 +01:00
|
|
|
#include <as-layout.h>
|
|
|
|
#include <init.h>
|
|
|
|
#include <kern_util.h>
|
|
|
|
#include <mem.h>
|
|
|
|
#include <os.h>
|
|
|
|
#include <ptrace_user.h>
|
|
|
|
#include <registers.h>
|
|
|
|
#include <skas.h>
|
|
|
|
#include <sysdep/stub.h>
|
2025-06-02 15:00:50 +02:00
|
|
|
#include <sysdep/mcontext.h>
|
|
|
|
#include <linux/futex.h>
|
2017-05-04 08:15:10 +02:00
|
|
|
#include <linux/threads.h>
|
2024-07-02 19:21:18 +02:00
|
|
|
#include <timetravel.h>
|
2025-06-02 15:00:50 +02:00
|
|
|
#include <asm-generic/rwonce.h>
|
2024-04-23 20:58:56 +08:00
|
|
|
#include "../internal.h"
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
int is_skas_winch(int pid, int fd, void *data)
|
|
|
|
{
|
2011-08-18 20:08:19 +01:00
|
|
|
return pid == getpgrp();
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2021-01-13 22:07:41 +01:00
|
|
|
static const char *ptrace_reg_name(int idx)
|
|
|
|
{
|
|
|
|
#define R(n) case HOST_##n: return #n
|
|
|
|
|
|
|
|
switch (idx) {
|
|
|
|
#ifdef __x86_64__
|
|
|
|
R(BX);
|
|
|
|
R(CX);
|
|
|
|
R(DI);
|
|
|
|
R(SI);
|
|
|
|
R(DX);
|
|
|
|
R(BP);
|
|
|
|
R(AX);
|
|
|
|
R(R8);
|
|
|
|
R(R9);
|
|
|
|
R(R10);
|
|
|
|
R(R11);
|
|
|
|
R(R12);
|
|
|
|
R(R13);
|
|
|
|
R(R14);
|
|
|
|
R(R15);
|
|
|
|
R(ORIG_AX);
|
|
|
|
R(CS);
|
|
|
|
R(SS);
|
|
|
|
R(EFLAGS);
|
|
|
|
#elif defined(__i386__)
|
|
|
|
R(IP);
|
|
|
|
R(SP);
|
|
|
|
R(EFLAGS);
|
|
|
|
R(AX);
|
|
|
|
R(BX);
|
|
|
|
R(CX);
|
|
|
|
R(DX);
|
|
|
|
R(SI);
|
|
|
|
R(DI);
|
|
|
|
R(BP);
|
|
|
|
R(CS);
|
|
|
|
R(SS);
|
|
|
|
R(DS);
|
|
|
|
R(FS);
|
|
|
|
R(ES);
|
|
|
|
R(GS);
|
|
|
|
R(ORIG_AX);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2007-05-06 14:51:29 -07:00
|
|
|
static int ptrace_dump_regs(int pid)
|
|
|
|
{
|
2008-02-04 22:30:58 -08:00
|
|
|
unsigned long regs[MAX_REG_NR];
|
|
|
|
int i;
|
2007-05-06 14:51:29 -07:00
|
|
|
|
2008-02-04 22:30:58 -08:00
|
|
|
if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
|
|
|
|
return -errno;
|
2007-10-16 01:27:00 -07:00
|
|
|
|
|
|
|
printk(UM_KERN_ERR "Stub registers -\n");
|
2021-01-13 22:07:41 +01:00
|
|
|
for (i = 0; i < ARRAY_SIZE(regs); i++) {
|
|
|
|
const char *regname = ptrace_reg_name(i);
|
|
|
|
|
|
|
|
printk(UM_KERN_ERR "\t%s\t(%2d): %lx\n", regname, i, regs[i]);
|
|
|
|
}
|
2007-05-06 14:51:29 -07:00
|
|
|
|
2008-02-04 22:30:58 -08:00
|
|
|
return 0;
|
2007-05-06 14:51:29 -07:00
|
|
|
}
|
|
|
|
|
2007-05-06 14:51:48 -07:00
|
|
|
/*
|
|
|
|
* Signals that are OK to receive in the stub - we'll just continue it.
|
|
|
|
* SIGWINCH will happen when UML is inside a detached screen.
|
|
|
|
*/
|
2015-11-02 16:16:37 +00:00
|
|
|
#define STUB_SIG_MASK ((1 << SIGALRM) | (1 << SIGWINCH))
|
2007-05-06 14:51:48 -07:00
|
|
|
|
|
|
|
/* Signals that the stub will finish with - anything else is an error */
|
2008-02-04 22:30:56 -08:00
|
|
|
#define STUB_DONE_MASK (1 << SIGTRAP)
|
2007-05-06 14:51:48 -07:00
|
|
|
|
|
|
|
void wait_stub_done(int pid)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2014-07-20 12:56:34 +02:00
|
|
|
int n, status, err;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
while (1) {
|
2007-12-17 16:19:46 -08:00
|
|
|
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
|
2007-10-16 01:27:00 -07:00
|
|
|
if ((n < 0) || !WIFSTOPPED(status))
|
2007-05-06 14:51:48 -07:00
|
|
|
goto bad_wait;
|
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
|
2007-05-06 14:51:48 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
err = ptrace(PTRACE_CONT, pid, 0, 0);
|
2008-02-04 22:30:58 -08:00
|
|
|
if (err) {
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : continue failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
2007-05-06 14:51:48 -07:00
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
|
2007-05-06 14:51:48 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
bad_wait:
|
|
|
|
err = ptrace_dump_regs(pid);
|
2007-10-16 01:27:00 -07:00
|
|
|
if (err)
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "Failed to get registers from stub, errno = %d\n",
|
|
|
|
-err);
|
|
|
|
printk(UM_KERN_ERR "%s : failed to wait for SIGTRAP, pid = %d, n = %d, errno = %d, status = 0x%x\n",
|
|
|
|
__func__, pid, n, errno, status);
|
2014-07-20 12:56:34 +02:00
|
|
|
fatal_sigsegv();
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
void wait_stub_done_seccomp(struct mm_id *mm_idp, int running, int wait_sigsys)
|
|
|
|
{
|
|
|
|
struct stub_data *data = (void *)mm_idp->stack;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
do {
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
const char byte = 0;
|
|
|
|
struct iovec iov = {
|
|
|
|
.iov_base = (void *)&byte,
|
|
|
|
.iov_len = sizeof(byte),
|
|
|
|
};
|
|
|
|
union {
|
|
|
|
char data[CMSG_SPACE(sizeof(mm_idp->syscall_fd_map))];
|
|
|
|
struct cmsghdr align;
|
|
|
|
} ctrl;
|
|
|
|
struct msghdr msgh = {
|
|
|
|
.msg_iov = &iov,
|
|
|
|
.msg_iovlen = 1,
|
|
|
|
};
|
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
if (!running) {
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
if (mm_idp->syscall_fd_num) {
|
|
|
|
unsigned int fds_size =
|
|
|
|
sizeof(int) * mm_idp->syscall_fd_num;
|
|
|
|
struct cmsghdr *cmsg;
|
|
|
|
|
|
|
|
msgh.msg_control = ctrl.data;
|
|
|
|
msgh.msg_controllen = CMSG_SPACE(fds_size);
|
|
|
|
cmsg = CMSG_FIRSTHDR(&msgh);
|
|
|
|
cmsg->cmsg_level = SOL_SOCKET;
|
|
|
|
cmsg->cmsg_type = SCM_RIGHTS;
|
|
|
|
cmsg->cmsg_len = CMSG_LEN(fds_size);
|
|
|
|
memcpy(CMSG_DATA(cmsg), mm_idp->syscall_fd_map,
|
|
|
|
fds_size);
|
|
|
|
|
|
|
|
CATCH_EINTR(syscall(__NR_sendmsg, mm_idp->sock,
|
|
|
|
&msgh, 0));
|
|
|
|
}
|
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
data->signal = 0;
|
|
|
|
data->futex = FUTEX_IN_CHILD;
|
|
|
|
CATCH_EINTR(syscall(__NR_futex, &data->futex,
|
|
|
|
FUTEX_WAKE, 1, NULL, NULL, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* We need to check whether the child is still alive
|
|
|
|
* before and after the FUTEX_WAIT call. Before, in
|
|
|
|
* case it just died but we still updated data->futex
|
|
|
|
* to FUTEX_IN_CHILD. And after, in case it died while
|
|
|
|
* we were waiting (and SIGCHLD woke us up, see the
|
|
|
|
* IRQ handler in mmu.c).
|
|
|
|
*
|
|
|
|
* Either way, if PID is negative, then we have no
|
|
|
|
* choice but to kill the task.
|
|
|
|
*/
|
|
|
|
if (__READ_ONCE(mm_idp->pid) < 0)
|
|
|
|
goto out_kill;
|
|
|
|
|
|
|
|
ret = syscall(__NR_futex, &data->futex,
|
|
|
|
FUTEX_WAIT, FUTEX_IN_CHILD,
|
|
|
|
NULL, NULL, 0);
|
|
|
|
if (ret < 0 && errno != EINTR && errno != EAGAIN) {
|
|
|
|
printk(UM_KERN_ERR "%s : FUTEX_WAIT failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
|
|
|
goto out_kill;
|
|
|
|
}
|
|
|
|
} while (data->futex == FUTEX_IN_CHILD);
|
|
|
|
|
|
|
|
if (__READ_ONCE(mm_idp->pid) < 0)
|
|
|
|
goto out_kill;
|
|
|
|
|
|
|
|
running = 0;
|
|
|
|
|
|
|
|
/* We may receive a SIGALRM before SIGSYS, iterate again. */
|
|
|
|
} while (wait_sigsys && data->signal == SIGALRM);
|
|
|
|
|
|
|
|
if (data->mctx_offset > sizeof(data->sigstack) - sizeof(mcontext_t)) {
|
|
|
|
printk(UM_KERN_ERR "%s : invalid mcontext offset", __func__);
|
|
|
|
goto out_kill;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wait_sigsys && data->signal != SIGSYS) {
|
|
|
|
printk(UM_KERN_ERR "%s : expected SIGSYS but got %d",
|
|
|
|
__func__, data->signal);
|
|
|
|
goto out_kill;
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_kill:
|
|
|
|
printk(UM_KERN_ERR "%s : failed to wait for stub, pid = %d, errno = %d\n",
|
|
|
|
__func__, mm_idp->pid, errno);
|
|
|
|
/* This is not true inside start_userspace */
|
|
|
|
if (current_mm_id() == mm_idp)
|
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
|
|
|
|
2006-01-18 17:42:46 -08:00
|
|
|
extern unsigned long current_stub_stack(void);
|
|
|
|
|
2024-10-05 01:38:21 +02:00
|
|
|
static void get_skas_faultinfo(int pid, struct faultinfo *fi)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2015-03-18 21:31:27 +01:00
|
|
|
err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
|
|
|
|
if (err) {
|
|
|
|
printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
|
|
|
|
"errno = %d\n", pid, errno);
|
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
|
|
|
wait_stub_done(pid);
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2015-03-18 21:31:27 +01:00
|
|
|
/*
|
2017-07-06 00:34:05 +02:00
|
|
|
* faultinfo is prepared by the stub_segv_handler at start of
|
2015-03-18 21:31:27 +01:00
|
|
|
* the stub stack page. We just have to copy it.
|
|
|
|
*/
|
|
|
|
memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2025-07-11 14:50:20 +08:00
|
|
|
static void handle_trap(struct uml_pt_regs *regs)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2008-02-04 22:31:12 -08:00
|
|
|
if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
|
|
|
|
fatal_sigsegv();
|
|
|
|
|
2006-01-18 17:42:46 -08:00
|
|
|
handle_syscall(regs);
|
|
|
|
}
|
|
|
|
|
2014-10-12 13:02:13 +02:00
|
|
|
extern char __syscall_stub_start[];
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2024-09-19 14:45:03 +02:00
|
|
|
static int stub_exe_fd;
|
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
struct tramp_data {
|
|
|
|
struct stub_data *stub_data;
|
|
|
|
/* 0 is inherited, 1 is the kernel side */
|
|
|
|
int sockpair[2];
|
|
|
|
};
|
|
|
|
|
2025-01-13 10:41:07 +01:00
|
|
|
#ifndef CLOSE_RANGE_CLOEXEC
|
|
|
|
#define CLOSE_RANGE_CLOEXEC (1U << 2)
|
|
|
|
#endif
|
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
static int userspace_tramp(void *data)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
struct tramp_data *tramp_data = data;
|
2024-09-19 14:45:03 +02:00
|
|
|
char *const argv[] = { "uml-userspace", NULL };
|
2015-03-18 21:31:27 +01:00
|
|
|
unsigned long long offset;
|
2024-09-19 14:45:03 +02:00
|
|
|
struct stub_init_data init_data = {
|
2025-06-02 15:00:50 +02:00
|
|
|
.seccomp = using_seccomp,
|
2024-09-19 14:45:03 +02:00
|
|
|
.stub_start = STUB_START,
|
|
|
|
};
|
|
|
|
struct iomem_region *iomem;
|
|
|
|
int ret;
|
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
if (using_seccomp) {
|
|
|
|
init_data.signal_handler = STUB_CODE +
|
|
|
|
(unsigned long) stub_signal_interrupt -
|
|
|
|
(unsigned long) __syscall_stub_start;
|
|
|
|
init_data.signal_restorer = STUB_CODE +
|
|
|
|
(unsigned long) stub_signal_restorer -
|
|
|
|
(unsigned long) __syscall_stub_start;
|
|
|
|
} else {
|
|
|
|
init_data.signal_handler = STUB_CODE +
|
|
|
|
(unsigned long) stub_segv_handler -
|
|
|
|
(unsigned long) __syscall_stub_start;
|
|
|
|
init_data.signal_restorer = 0;
|
|
|
|
}
|
|
|
|
|
2024-09-19 14:45:03 +02:00
|
|
|
init_data.stub_code_fd = phys_mapping(uml_to_phys(__syscall_stub_start),
|
|
|
|
&offset);
|
|
|
|
init_data.stub_code_offset = MMAP_OFFSET(offset);
|
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
init_data.stub_data_fd = phys_mapping(uml_to_phys(tramp_data->stub_data),
|
|
|
|
&offset);
|
2024-09-19 14:45:03 +02:00
|
|
|
init_data.stub_data_offset = MMAP_OFFSET(offset);
|
|
|
|
|
2025-01-13 10:41:07 +01:00
|
|
|
/*
|
|
|
|
* Avoid leaking unneeded FDs to the stub by setting CLOEXEC on all FDs
|
|
|
|
* and then unsetting it on all memory related FDs.
|
|
|
|
* This is not strictly necessary from a safety perspective.
|
|
|
|
*/
|
|
|
|
syscall(__NR_close_range, 0, ~0U, CLOSE_RANGE_CLOEXEC);
|
2024-09-19 14:45:03 +02:00
|
|
|
|
|
|
|
fcntl(init_data.stub_data_fd, F_SETFD, 0);
|
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
/* In SECCOMP mode, these FDs are passed when needed */
|
|
|
|
if (!using_seccomp) {
|
|
|
|
for (iomem = iomem_regions; iomem; iomem = iomem->next)
|
|
|
|
fcntl(iomem->fd, F_SETFD, 0);
|
|
|
|
}
|
2024-09-19 14:45:03 +02:00
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
/* dup2 signaling FD/socket to STDIN */
|
|
|
|
if (dup2(tramp_data->sockpair[0], 0) < 0)
|
2024-09-19 14:45:03 +02:00
|
|
|
exit(3);
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
close(tramp_data->sockpair[0]);
|
2024-09-19 14:45:03 +02:00
|
|
|
|
|
|
|
/* Write init_data and close write side */
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
ret = write(tramp_data->sockpair[1], &init_data, sizeof(init_data));
|
|
|
|
close(tramp_data->sockpair[1]);
|
2024-09-19 14:45:03 +02:00
|
|
|
|
|
|
|
if (ret != sizeof(init_data))
|
|
|
|
exit(4);
|
|
|
|
|
2025-01-13 10:41:07 +01:00
|
|
|
/* Raw execveat for compatibility with older libc versions */
|
|
|
|
syscall(__NR_execveat, stub_exe_fd, (unsigned long)"",
|
|
|
|
(unsigned long)argv, NULL, AT_EMPTY_PATH);
|
2024-09-19 14:45:03 +02:00
|
|
|
|
|
|
|
exit(5);
|
|
|
|
}
|
|
|
|
|
|
|
|
extern char stub_exe_start[];
|
|
|
|
extern char stub_exe_end[];
|
|
|
|
|
|
|
|
extern char *tempdir;
|
|
|
|
|
|
|
|
#define STUB_EXE_NAME_TEMPLATE "/uml-userspace-XXXXXX"
|
|
|
|
|
|
|
|
#ifndef MFD_EXEC
|
|
|
|
#define MFD_EXEC 0x0010U
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int __init init_stub_exe_fd(void)
|
|
|
|
{
|
|
|
|
size_t written = 0;
|
|
|
|
char *tmpfile = NULL;
|
|
|
|
|
|
|
|
stub_exe_fd = memfd_create("uml-userspace",
|
|
|
|
MFD_EXEC | MFD_CLOEXEC | MFD_ALLOW_SEALING);
|
|
|
|
|
|
|
|
if (stub_exe_fd < 0) {
|
|
|
|
printk(UM_KERN_INFO "Could not create executable memfd, using temporary file!");
|
|
|
|
|
|
|
|
tmpfile = malloc(strlen(tempdir) +
|
|
|
|
strlen(STUB_EXE_NAME_TEMPLATE) + 1);
|
|
|
|
if (tmpfile == NULL)
|
|
|
|
panic("Failed to allocate memory for stub binary name");
|
|
|
|
|
|
|
|
strcpy(tmpfile, tempdir);
|
|
|
|
strcat(tmpfile, STUB_EXE_NAME_TEMPLATE);
|
|
|
|
|
|
|
|
stub_exe_fd = mkstemp(tmpfile);
|
|
|
|
if (stub_exe_fd < 0)
|
|
|
|
panic("Could not create temporary file for stub binary: %d",
|
|
|
|
-errno);
|
2015-03-18 21:31:27 +01:00
|
|
|
}
|
|
|
|
|
2024-09-19 14:45:03 +02:00
|
|
|
while (written < stub_exe_end - stub_exe_start) {
|
|
|
|
ssize_t res = write(stub_exe_fd, stub_exe_start + written,
|
|
|
|
stub_exe_end - stub_exe_start - written);
|
|
|
|
if (res < 0) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (tmpfile)
|
|
|
|
unlink(tmpfile);
|
|
|
|
panic("Failed write stub binary: %d", -errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
written += res;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
2023-11-10 12:03:39 +01:00
|
|
|
|
2024-09-19 14:45:03 +02:00
|
|
|
if (!tmpfile) {
|
|
|
|
fcntl(stub_exe_fd, F_ADD_SEALS,
|
|
|
|
F_SEAL_WRITE | F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL);
|
|
|
|
} else {
|
|
|
|
if (fchmod(stub_exe_fd, 00500) < 0) {
|
|
|
|
unlink(tmpfile);
|
|
|
|
panic("Could not make stub binary executable: %d",
|
|
|
|
-errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
close(stub_exe_fd);
|
|
|
|
stub_exe_fd = open(tmpfile, O_RDONLY | O_CLOEXEC | O_NOFOLLOW);
|
|
|
|
if (stub_exe_fd < 0) {
|
|
|
|
unlink(tmpfile);
|
|
|
|
panic("Could not reopen stub binary: %d", -errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
unlink(tmpfile);
|
|
|
|
free(tmpfile);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
return 0;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
2024-09-19 14:45:03 +02:00
|
|
|
__initcall(init_stub_exe_fd);
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2025-06-02 15:00:49 +02:00
|
|
|
int using_seccomp;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2017-07-06 00:34:05 +02:00
|
|
|
/**
|
|
|
|
* start_userspace() - prepare a new userspace process
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
* @mm_id: The corresponding struct mm_id
|
2017-07-06 00:34:05 +02:00
|
|
|
*
|
|
|
|
* Setups a new temporary stack page that is used while userspace_tramp() runs
|
|
|
|
* Clones the kernel process into a new userspace process, with FDs only.
|
|
|
|
*
|
|
|
|
* Return: When positive: the process id of the new userspace process,
|
|
|
|
* when negative: an error number.
|
|
|
|
* FIXME: can PIDs become negative?!
|
|
|
|
*/
|
2025-06-02 15:00:50 +02:00
|
|
|
int start_userspace(struct mm_id *mm_id)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2025-06-02 15:00:50 +02:00
|
|
|
struct stub_data *proc_data = (void *)mm_id->stack;
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
struct tramp_data tramp_data = {
|
|
|
|
.stub_data = proc_data,
|
|
|
|
};
|
2006-01-18 17:42:46 -08:00
|
|
|
void *stack;
|
|
|
|
unsigned long sp;
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
int status, n, err;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2017-07-06 00:34:05 +02:00
|
|
|
/* setup a temporary stack page */
|
2007-06-16 10:16:09 -07:00
|
|
|
stack = mmap(NULL, UM_KERN_PAGE_SIZE,
|
|
|
|
PROT_READ | PROT_WRITE | PROT_EXEC,
|
2006-01-18 17:42:46 -08:00
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
2008-02-04 22:30:58 -08:00
|
|
|
if (stack == MAP_FAILED) {
|
|
|
|
err = -errno;
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : mmap failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-07-06 00:34:05 +02:00
|
|
|
/* set stack pointer to the end of the stack page, so it can grow downwards */
|
um: Fix stack pointer alignment
GCC assumes that stack is aligned to 16-byte on call sites [1].
Since GCC 8, GCC began using 16-byte aligned SSE instructions to
implement assignments to structs on stack. When
CC_OPTIMIZE_FOR_PERFORMANCE is enabled, this affects
os-Linux/sigio.c, write_sigio_thread:
struct pollfds *fds, tmp;
tmp = current_poll;
Note that struct pollfds is exactly 16 bytes in size.
GCC 8+ generates assembly similar to:
movdqa (%rdi),%xmm0
movaps %xmm0,-0x50(%rbp)
This is an issue, because movaps will #GP if -0x50(%rbp) is not
aligned to 16 bytes [2], and how rbp gets assigned to is via glibc
clone thread_start, then function prologue, going though execution
trace similar to (showing only relevant instructions):
sub $0x10,%rsi
mov %rcx,0x8(%rsi)
mov %rdi,(%rsi)
syscall
pop %rax
pop %rdi
callq *%rax
push %rbp
mov %rsp,%rbp
The stack pointer always points to the topmost element on stack,
rather then the space right above the topmost. On push, the
pointer decrements first before writing to the memory pointed to
by it. Therefore, there is no need to have the stack pointer
pointer always point to valid memory unless the stack is poped;
so the `- sizeof(void *)` in the code is unnecessary.
On the other hand, glibc reserves the 16 bytes it needs on stack
and pops itself, so by the call instruction the stack pointer
is exactly the caller-supplied sp. It then push the 16 bytes of
the return address and the saved stack pointer, so the base
pointer will be 16-byte aligned if and only if the caller
supplied sp is 16-byte aligned. Therefore, the caller must supply
a 16-byte aligned pointer, which `stack + UM_KERN_PAGE_SIZE`
already satisfies.
On a side note, musl is unaffected by this issue because it forces
16 byte alignment via `and $-16,%rsi` in its clone wrapper.
Similarly, glibc i386 is also unaffected because it has
`andl $0xfffffff0, %ecx`.
To reproduce this bug, enable CONFIG_UML_RTC and
CC_OPTIMIZE_FOR_PERFORMANCE. uml_rtc will call
add_sigio_fd which will then cause write_sigio_thread to either go
into segfault loop or panic with "Segfault with no mm".
Similarly, signal stacks will be aligned by the host kernel upon
signal delivery. `- sizeof(void *)` to sigaltstack is
unconventional and extraneous.
On a related note, initialization of longjmp buffers do require
`- sizeof(void *)`. This is to account for the return address
that would have been pushed to the stack at the call site.
The reason for uml to respect 16-byte alignment, rather than
telling GCC to assume 8-byte alignment like the host kernel since
commit d9b0cde91c60 ("x86-64, gcc: Use
-mpreferred-stack-boundary=3 if supported"), is because uml links
against libc. There is no reason to assume libc is also compiled
with that flag and assumes 8-byte alignment rather than 16-byte.
[1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=40838
[2] https://c9x.me/x86/html/file_module_x86_id_180.html
Signed-off-by: YiFei Zhu <zhuyifei1999@gmail.com>
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Reviewed-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: Richard Weinberger <richard@nod.at>
2021-04-20 00:56:10 -05:00
|
|
|
sp = (unsigned long)stack + UM_KERN_PAGE_SIZE;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
/* socket pair for init data and SECCOMP FD passing (no CLOEXEC here) */
|
|
|
|
if (socketpair(AF_UNIX, SOCK_STREAM, 0, tramp_data.sockpair)) {
|
|
|
|
err = -errno;
|
|
|
|
printk(UM_KERN_ERR "%s : socketpair failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
if (using_seccomp)
|
|
|
|
proc_data->futex = FUTEX_IN_CHILD;
|
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
mm_id->pid = clone(userspace_tramp, (void *) sp,
|
2024-09-19 14:45:03 +02:00
|
|
|
CLONE_VFORK | CLONE_VM | SIGCHLD,
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
(void *)&tramp_data);
|
|
|
|
if (mm_id->pid < 0) {
|
2008-02-04 22:30:58 -08:00
|
|
|
err = -errno;
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : clone failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
goto out_close;
|
2008-02-04 22:30:58 -08:00
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
if (using_seccomp) {
|
|
|
|
wait_stub_done_seccomp(mm_id, 1, 1);
|
|
|
|
} else {
|
|
|
|
do {
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
CATCH_EINTR(n = waitpid(mm_id->pid, &status,
|
2025-06-02 15:00:50 +02:00
|
|
|
WUNTRACED | __WALL));
|
|
|
|
if (n < 0) {
|
|
|
|
err = -errno;
|
|
|
|
printk(UM_KERN_ERR "%s : wait failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
|
|
|
goto out_kill;
|
|
|
|
}
|
|
|
|
} while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGALRM));
|
|
|
|
|
|
|
|
if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
printk(UM_KERN_ERR "%s : expected SIGSTOP, got status = %d\n",
|
|
|
|
__func__, status);
|
|
|
|
goto out_kill;
|
|
|
|
}
|
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
if (ptrace(PTRACE_SETOPTIONS, mm_id->pid, NULL,
|
2025-06-02 15:00:50 +02:00
|
|
|
(void *) PTRACE_O_TRACESYSGOOD) < 0) {
|
2008-02-04 22:30:58 -08:00
|
|
|
err = -errno;
|
2025-06-02 15:00:50 +02:00
|
|
|
printk(UM_KERN_ERR "%s : PTRACE_SETOPTIONS failed, errno = %d\n",
|
2022-11-22 11:07:32 +01:00
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
goto out_kill;
|
|
|
|
}
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2008-02-04 22:30:58 -08:00
|
|
|
if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
|
|
|
|
err = -errno;
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : munmap failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
goto out_kill;
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
close(tramp_data.sockpair[0]);
|
|
|
|
if (using_seccomp)
|
|
|
|
mm_id->sock = tramp_data.sockpair[1];
|
|
|
|
else
|
|
|
|
close(tramp_data.sockpair[1]);
|
|
|
|
|
|
|
|
return 0;
|
2025-06-02 15:00:50 +02:00
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
out_kill:
|
|
|
|
os_kill_ptraced_process(mm_id->pid, 1);
|
|
|
|
out_close:
|
|
|
|
close(tramp_data.sockpair[0]);
|
|
|
|
close(tramp_data.sockpair[1]);
|
|
|
|
|
|
|
|
mm_id->pid = -1;
|
2008-02-04 22:30:58 -08:00
|
|
|
|
|
|
|
return err;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2025-07-08 17:04:00 +08:00
|
|
|
static int unscheduled_userspace_iterations;
|
2024-10-10 16:25:37 +02:00
|
|
|
extern unsigned long tt_extra_sched_jiffies;
|
|
|
|
|
2024-10-05 01:38:21 +02:00
|
|
|
void userspace(struct uml_pt_regs *regs)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2025-07-11 14:50:21 +08:00
|
|
|
int err, status, op;
|
2025-06-02 15:00:50 +02:00
|
|
|
siginfo_t si_ptrace;
|
|
|
|
siginfo_t *si;
|
|
|
|
int sig;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2012-05-23 00:25:15 -04:00
|
|
|
/* Handle any immediate reschedules or signals */
|
|
|
|
interrupt_end();
|
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
while (1) {
|
2025-07-11 14:50:21 +08:00
|
|
|
struct mm_id *mm_id = current_mm_id();
|
|
|
|
|
2024-10-10 16:25:37 +02:00
|
|
|
/*
|
|
|
|
* When we are in time-travel mode, userspace can theoretically
|
|
|
|
* do a *lot* of work without being scheduled. The problem with
|
|
|
|
* this is that it will prevent kernel bookkeeping (primarily
|
|
|
|
* the RCU) from running and this can for example cause OOM
|
|
|
|
* situations.
|
|
|
|
*
|
|
|
|
* This code accounts a jiffie against the scheduling clock
|
|
|
|
* after the defined userspace iterations in the same thread.
|
|
|
|
* By doing so the situation is effectively prevented.
|
|
|
|
*/
|
|
|
|
if (time_travel_mode == TT_MODE_INFCPU ||
|
|
|
|
time_travel_mode == TT_MODE_EXTERNAL) {
|
2024-11-03 16:05:03 +01:00
|
|
|
#ifdef CONFIG_UML_MAX_USERSPACE_ITERATIONS
|
|
|
|
if (CONFIG_UML_MAX_USERSPACE_ITERATIONS &&
|
2024-10-10 16:25:37 +02:00
|
|
|
unscheduled_userspace_iterations++ >
|
2024-11-03 16:05:03 +01:00
|
|
|
CONFIG_UML_MAX_USERSPACE_ITERATIONS) {
|
2024-10-10 16:25:37 +02:00
|
|
|
tt_extra_sched_jiffies += 1;
|
|
|
|
unscheduled_userspace_iterations = 0;
|
|
|
|
}
|
2024-11-03 16:05:03 +01:00
|
|
|
#endif
|
2024-10-10 16:25:37 +02:00
|
|
|
}
|
|
|
|
|
2024-07-02 19:21:18 +02:00
|
|
|
time_travel_print_bc_msg();
|
|
|
|
|
2024-07-03 15:45:36 +02:00
|
|
|
current_mm_sync();
|
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
if (using_seccomp) {
|
|
|
|
struct stub_data *proc_data = (void *) mm_id->stack;
|
2024-07-03 15:45:32 +02:00
|
|
|
|
2025-07-11 14:50:19 +08:00
|
|
|
err = set_stub_state(regs, proc_data, singlestepping());
|
|
|
|
if (err) {
|
2025-06-02 15:00:50 +02:00
|
|
|
printk(UM_KERN_ERR "%s - failed to set regs: %d",
|
2025-07-11 14:50:19 +08:00
|
|
|
__func__, err);
|
2025-06-02 15:00:50 +02:00
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2015-11-02 16:16:37 +00:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
/* Must have been reset by the syscall caller */
|
|
|
|
if (proc_data->restart_wait != 0)
|
|
|
|
panic("Programming error: Flag to only run syscalls in child was not cleared!");
|
|
|
|
|
|
|
|
/* Mark pending syscalls for flushing */
|
|
|
|
proc_data->syscall_data_len = mm_id->syscall_data_len;
|
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
wait_stub_done_seccomp(mm_id, 0, 0);
|
2025-06-02 15:00:50 +02:00
|
|
|
|
|
|
|
sig = proc_data->signal;
|
|
|
|
|
|
|
|
if (sig == SIGTRAP && proc_data->err != 0) {
|
|
|
|
printk(UM_KERN_ERR "%s - Error flushing stub syscalls",
|
|
|
|
__func__);
|
|
|
|
syscall_stub_dump_error(mm_id);
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
mm_id->syscall_data_len = proc_data->err;
|
2025-06-02 15:00:50 +02:00
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
um: pass FD for memory operations when needed
Instead of always sharing the FDs with the userspace process, only hand
over the FDs needed for mmap when required. The idea is that userspace
might be able to force the stub into executing an mmap syscall, however,
it will not be able to manipulate the control flow sufficiently to have
access to an FD that would allow mapping arbitrary memory.
Security wise, we need to be sure that only the expected syscalls are
executed after the kernel sends FDs through the socket. This is
currently not the case, as userspace can trivially jump to the
rt_sigreturn syscall instruction to execute any syscall that the stub is
permitted to do. With this, it can trick the kernel to send the FD,
which in turn allows userspace to freely map any physical memory.
As such, this is currently *not* secure. However, in principle the
approach should be fine with a more strict SECCOMP filter and a careful
review of the stub control flow (as userspace can prepare a stack). With
some care, it is likely possible to extend the security model to SMP if
desired.
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250602130052.545733-8-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-06-02 15:00:52 +02:00
|
|
|
mm_id->syscall_data_len = 0;
|
|
|
|
mm_id->syscall_fd_num = 0;
|
|
|
|
|
2025-07-11 14:50:19 +08:00
|
|
|
err = get_stub_state(regs, proc_data, NULL);
|
|
|
|
if (err) {
|
2025-06-02 15:00:50 +02:00
|
|
|
printk(UM_KERN_ERR "%s - failed to get regs: %d",
|
2025-07-11 14:50:19 +08:00
|
|
|
__func__, err);
|
2025-06-02 15:00:50 +02:00
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2011-09-14 16:21:23 -07:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
if (proc_data->si_offset > sizeof(proc_data->sigstack) - sizeof(*si))
|
|
|
|
panic("%s - Invalid siginfo offset from child",
|
|
|
|
__func__);
|
|
|
|
si = (void *)&proc_data->sigstack[proc_data->si_offset];
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
regs->is_user = 1;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
/* Fill in ORIG_RAX and extract fault information */
|
|
|
|
PT_SYSCALL_NR(regs->gp) = si->si_syscall;
|
|
|
|
if (sig == SIGSEGV) {
|
|
|
|
mcontext_t *mcontext = (void *)&proc_data->sigstack[proc_data->mctx_offset];
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
GET_FAULTINFO_FROM_MC(regs->faultinfo, mcontext);
|
|
|
|
}
|
|
|
|
} else {
|
2025-07-11 14:50:21 +08:00
|
|
|
int pid = mm_id->pid;
|
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
/* Flush out any pending syscalls */
|
2025-07-11 14:50:21 +08:00
|
|
|
err = syscall_stub_flush(mm_id);
|
2025-06-02 15:00:50 +02:00
|
|
|
if (err) {
|
|
|
|
if (err == -ENOMEM)
|
|
|
|
report_enomem();
|
|
|
|
|
|
|
|
printk(UM_KERN_ERR "%s - Error flushing stub syscalls: %d",
|
|
|
|
__func__, -err);
|
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2008-02-04 22:30:57 -08:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
/*
|
|
|
|
* This can legitimately fail if the process loads a
|
|
|
|
* bogus value into a segment register. It will
|
|
|
|
* segfault and PTRACE_GETREGS will read that value
|
|
|
|
* out of the process. However, PTRACE_SETREGS will
|
|
|
|
* fail. In this case, there is nothing to do but
|
|
|
|
* just kill the process.
|
|
|
|
*/
|
|
|
|
if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) {
|
|
|
|
printk(UM_KERN_ERR "%s - ptrace set regs failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2011-09-14 16:21:23 -07:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
if (put_fp_registers(pid, regs->fp)) {
|
|
|
|
printk(UM_KERN_ERR "%s - ptrace set fp regs failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
if (singlestepping())
|
|
|
|
op = PTRACE_SYSEMU_SINGLESTEP;
|
|
|
|
else
|
|
|
|
op = PTRACE_SYSEMU;
|
2012-08-02 00:49:17 +02:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
if (ptrace(op, pid, 0, 0)) {
|
|
|
|
printk(UM_KERN_ERR "%s - ptrace continue failed, op = %d, errno = %d\n",
|
|
|
|
__func__, op, errno);
|
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
|
|
|
|
|
|
|
CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
|
|
|
|
if (err < 0) {
|
|
|
|
printk(UM_KERN_ERR "%s - wait failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
|
|
|
fatal_sigsegv();
|
2020-11-10 13:02:21 +00:00
|
|
|
}
|
2012-08-02 00:49:17 +02:00
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
regs->is_user = 1;
|
|
|
|
if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
|
|
|
|
printk(UM_KERN_ERR "%s - PTRACE_GETREGS failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (get_fp_registers(pid, regs->fp)) {
|
|
|
|
printk(UM_KERN_ERR "%s - get_fp_registers failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WIFSTOPPED(status)) {
|
|
|
|
sig = WSTOPSIG(status);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These signal handlers need the si argument
|
|
|
|
* and SIGSEGV needs the faultinfo.
|
|
|
|
* The SIGIO and SIGALARM handlers which constitute
|
|
|
|
* the majority of invocations, do not use it.
|
|
|
|
*/
|
|
|
|
switch (sig) {
|
|
|
|
case SIGSEGV:
|
|
|
|
get_skas_faultinfo(pid,
|
|
|
|
®s->faultinfo);
|
|
|
|
fallthrough;
|
|
|
|
case SIGTRAP:
|
|
|
|
case SIGILL:
|
|
|
|
case SIGBUS:
|
|
|
|
case SIGFPE:
|
|
|
|
case SIGWINCH:
|
|
|
|
ptrace(PTRACE_GETSIGINFO, pid, 0,
|
|
|
|
(struct siginfo *)&si_ptrace);
|
|
|
|
si = &si_ptrace;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
si = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
sig = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
|
|
|
|
|
|
|
|
if (sig) {
|
2008-02-08 04:22:08 -08:00
|
|
|
switch (sig) {
|
2006-01-18 17:42:46 -08:00
|
|
|
case SIGSEGV:
|
2025-06-02 15:00:50 +02:00
|
|
|
if (using_seccomp || PTRACE_FULL_FAULTINFO)
|
|
|
|
(*sig_info[SIGSEGV])(SIGSEGV,
|
|
|
|
(struct siginfo *)si,
|
2025-02-10 17:09:25 +01:00
|
|
|
regs, NULL);
|
2025-06-02 15:00:46 +02:00
|
|
|
else
|
|
|
|
segv(regs->faultinfo, 0, 1, NULL, NULL);
|
|
|
|
|
2025-06-02 15:00:50 +02:00
|
|
|
break;
|
|
|
|
case SIGSYS:
|
|
|
|
handle_syscall(regs);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
case SIGTRAP + 0x80:
|
2025-07-11 14:50:20 +08:00
|
|
|
handle_trap(regs);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
case SIGTRAP:
|
2025-06-02 15:00:50 +02:00
|
|
|
relay_signal(SIGTRAP, (struct siginfo *)si, regs, NULL);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
2015-11-02 16:16:37 +00:00
|
|
|
case SIGALRM:
|
2007-10-16 01:27:25 -07:00
|
|
|
break;
|
|
|
|
case SIGIO:
|
2006-01-18 17:42:46 -08:00
|
|
|
case SIGILL:
|
|
|
|
case SIGBUS:
|
|
|
|
case SIGFPE:
|
|
|
|
case SIGWINCH:
|
2019-08-23 13:16:23 +02:00
|
|
|
block_signals_trace();
|
2025-06-02 15:00:50 +02:00
|
|
|
(*sig_info[sig])(sig, (struct siginfo *)si, regs, NULL);
|
2019-08-23 13:16:23 +02:00
|
|
|
unblock_signals_trace();
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
default:
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s - child stopped with signal %d\n",
|
|
|
|
__func__, sig);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
interrupt_end();
|
|
|
|
|
|
|
|
/* Avoid -ERESTARTSYS handling in host */
|
2007-10-16 01:27:00 -07:00
|
|
|
if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
|
2007-10-16 01:27:07 -07:00
|
|
|
PT_SYSCALL_NR(regs->gp) = -1;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
(*buf)[0].JB_IP = (unsigned long) handler;
|
2007-05-10 22:22:31 -07:00
|
|
|
(*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
|
|
|
|
sizeof(void *);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2006-02-07 12:58:43 -08:00
|
|
|
#define INIT_JMP_NEW_THREAD 0
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
#define INIT_JMP_CALLBACK 1
|
|
|
|
#define INIT_JMP_HALT 2
|
|
|
|
#define INIT_JMP_REBOOT 3
|
2006-01-18 17:42:46 -08:00
|
|
|
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
void switch_threads(jmp_buf *me, jmp_buf *you)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2024-10-10 16:25:37 +02:00
|
|
|
unscheduled_userspace_iterations = 0;
|
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
if (UML_SETJMP(me) == 0)
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
UML_LONGJMP(you, 1);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2006-04-18 22:21:41 -07:00
|
|
|
static jmp_buf initial_jmpbuf;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
/* XXX Make these percpu */
|
|
|
|
static void (*cb_proc)(void *arg);
|
|
|
|
static void *cb_arg;
|
2006-04-18 22:21:41 -07:00
|
|
|
static jmp_buf *cb_back;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
int start_idle_thread(void *stack, jmp_buf *switch_buf)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2006-07-14 00:24:02 -07:00
|
|
|
int n;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2011-08-18 20:04:39 +01:00
|
|
|
set_handler(SIGWINCH);
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2007-05-06 14:51:40 -07:00
|
|
|
/*
|
|
|
|
* Can't use UML_SETJMP or UML_LONGJMP here because they save
|
|
|
|
* and restore signals, with the possible side-effect of
|
|
|
|
* trying to handle any signals which came when they were
|
|
|
|
* blocked, which can't be done on this stack.
|
|
|
|
* Signals must be blocked when jumping back here and restored
|
|
|
|
* after returning to the jumper.
|
|
|
|
*/
|
|
|
|
n = setjmp(initial_jmpbuf);
|
2008-02-08 04:22:08 -08:00
|
|
|
switch (n) {
|
2006-01-18 17:42:46 -08:00
|
|
|
case INIT_JMP_NEW_THREAD:
|
2015-03-28 09:59:46 +01:00
|
|
|
(*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
(*switch_buf)[0].JB_SP = (unsigned long) stack +
|
2007-05-10 22:22:31 -07:00
|
|
|
UM_THREAD_SIZE - sizeof(void *);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
case INIT_JMP_CALLBACK:
|
|
|
|
(*cb_proc)(cb_arg);
|
2007-05-06 14:51:40 -07:00
|
|
|
longjmp(*cb_back, 1);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
case INIT_JMP_HALT:
|
|
|
|
kmalloc_ok = 0;
|
2007-10-16 01:27:00 -07:00
|
|
|
return 0;
|
2006-01-18 17:42:46 -08:00
|
|
|
case INIT_JMP_REBOOT:
|
|
|
|
kmalloc_ok = 0;
|
2007-10-16 01:27:00 -07:00
|
|
|
return 1;
|
2006-01-18 17:42:46 -08:00
|
|
|
default:
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "Bad sigsetjmp return in %s - %d\n",
|
|
|
|
__func__, n);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
2007-05-06 14:51:40 -07:00
|
|
|
longjmp(*switch_buf, 1);
|
2018-06-15 16:42:56 +02:00
|
|
|
|
|
|
|
/* unreachable */
|
|
|
|
printk(UM_KERN_ERR "impossible long jump!");
|
|
|
|
fatal_sigsegv();
|
|
|
|
return 0;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void initial_thread_cb_skas(void (*proc)(void *), void *arg)
|
|
|
|
{
|
2006-04-18 22:21:41 -07:00
|
|
|
jmp_buf here;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
cb_proc = proc;
|
|
|
|
cb_arg = arg;
|
|
|
|
cb_back = &here;
|
|
|
|
|
2019-08-23 13:16:23 +02:00
|
|
|
block_signals_trace();
|
2007-10-16 01:27:00 -07:00
|
|
|
if (UML_SETJMP(&here) == 0)
|
2006-04-18 22:21:41 -07:00
|
|
|
UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
|
2019-08-23 13:16:23 +02:00
|
|
|
unblock_signals_trace();
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
cb_proc = NULL;
|
|
|
|
cb_arg = NULL;
|
|
|
|
cb_back = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void halt_skas(void)
|
|
|
|
{
|
2019-08-23 13:16:23 +02:00
|
|
|
block_signals_trace();
|
2006-04-18 22:21:41 -07:00
|
|
|
UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2022-07-13 13:56:17 +02:00
|
|
|
static bool noreboot;
|
|
|
|
|
|
|
|
static int __init noreboot_cmd_param(char *str, int *add)
|
|
|
|
{
|
2024-10-11 12:04:37 +08:00
|
|
|
*add = 0;
|
2022-07-13 13:56:17 +02:00
|
|
|
noreboot = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__uml_setup("noreboot", noreboot_cmd_param,
|
|
|
|
"noreboot\n"
|
|
|
|
" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
|
|
|
|
" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
|
|
|
|
" crashes in CI\n");
|
|
|
|
|
2006-01-18 17:42:46 -08:00
|
|
|
void reboot_skas(void)
|
|
|
|
{
|
2019-08-23 13:16:23 +02:00
|
|
|
block_signals_trace();
|
2022-07-13 13:56:17 +02:00
|
|
|
UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|