2019-08-25 10:49:18 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2006-01-18 17:42:46 -08:00
|
|
|
/*
|
2015-11-02 16:16:37 +00:00
|
|
|
* Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
|
2007-10-16 01:27:00 -07:00
|
|
|
* Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
2006-01-18 17:42:46 -08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
2022-07-13 13:56:17 +02:00
|
|
|
#include <stdbool.h>
|
2006-01-18 17:42:46 -08:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <sched.h>
|
2007-10-16 01:27:00 -07:00
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
2024-09-19 14:45:03 +02:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <mem_user.h>
|
2006-01-18 17:42:46 -08:00
|
|
|
#include <sys/mman.h>
|
2007-10-16 01:27:00 -07:00
|
|
|
#include <sys/wait.h>
|
2024-09-19 14:45:03 +02:00
|
|
|
#include <sys/stat.h>
|
2007-10-16 01:27:00 -07:00
|
|
|
#include <asm/unistd.h>
|
2012-10-08 03:27:32 +01:00
|
|
|
#include <as-layout.h>
|
|
|
|
#include <init.h>
|
|
|
|
#include <kern_util.h>
|
|
|
|
#include <mem.h>
|
|
|
|
#include <os.h>
|
|
|
|
#include <ptrace_user.h>
|
|
|
|
#include <registers.h>
|
|
|
|
#include <skas.h>
|
|
|
|
#include <sysdep/stub.h>
|
2017-05-04 08:15:10 +02:00
|
|
|
#include <linux/threads.h>
|
2024-07-02 19:21:18 +02:00
|
|
|
#include <timetravel.h>
|
2024-04-23 20:58:56 +08:00
|
|
|
#include "../internal.h"
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
int is_skas_winch(int pid, int fd, void *data)
|
|
|
|
{
|
2011-08-18 20:08:19 +01:00
|
|
|
return pid == getpgrp();
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2021-01-13 22:07:41 +01:00
|
|
|
static const char *ptrace_reg_name(int idx)
|
|
|
|
{
|
|
|
|
#define R(n) case HOST_##n: return #n
|
|
|
|
|
|
|
|
switch (idx) {
|
|
|
|
#ifdef __x86_64__
|
|
|
|
R(BX);
|
|
|
|
R(CX);
|
|
|
|
R(DI);
|
|
|
|
R(SI);
|
|
|
|
R(DX);
|
|
|
|
R(BP);
|
|
|
|
R(AX);
|
|
|
|
R(R8);
|
|
|
|
R(R9);
|
|
|
|
R(R10);
|
|
|
|
R(R11);
|
|
|
|
R(R12);
|
|
|
|
R(R13);
|
|
|
|
R(R14);
|
|
|
|
R(R15);
|
|
|
|
R(ORIG_AX);
|
|
|
|
R(CS);
|
|
|
|
R(SS);
|
|
|
|
R(EFLAGS);
|
|
|
|
#elif defined(__i386__)
|
|
|
|
R(IP);
|
|
|
|
R(SP);
|
|
|
|
R(EFLAGS);
|
|
|
|
R(AX);
|
|
|
|
R(BX);
|
|
|
|
R(CX);
|
|
|
|
R(DX);
|
|
|
|
R(SI);
|
|
|
|
R(DI);
|
|
|
|
R(BP);
|
|
|
|
R(CS);
|
|
|
|
R(SS);
|
|
|
|
R(DS);
|
|
|
|
R(FS);
|
|
|
|
R(ES);
|
|
|
|
R(GS);
|
|
|
|
R(ORIG_AX);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2007-05-06 14:51:29 -07:00
|
|
|
static int ptrace_dump_regs(int pid)
|
|
|
|
{
|
2008-02-04 22:30:58 -08:00
|
|
|
unsigned long regs[MAX_REG_NR];
|
|
|
|
int i;
|
2007-05-06 14:51:29 -07:00
|
|
|
|
2008-02-04 22:30:58 -08:00
|
|
|
if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
|
|
|
|
return -errno;
|
2007-10-16 01:27:00 -07:00
|
|
|
|
|
|
|
printk(UM_KERN_ERR "Stub registers -\n");
|
2021-01-13 22:07:41 +01:00
|
|
|
for (i = 0; i < ARRAY_SIZE(regs); i++) {
|
|
|
|
const char *regname = ptrace_reg_name(i);
|
|
|
|
|
|
|
|
printk(UM_KERN_ERR "\t%s\t(%2d): %lx\n", regname, i, regs[i]);
|
|
|
|
}
|
2007-05-06 14:51:29 -07:00
|
|
|
|
2008-02-04 22:30:58 -08:00
|
|
|
return 0;
|
2007-05-06 14:51:29 -07:00
|
|
|
}
|
|
|
|
|
2007-05-06 14:51:48 -07:00
|
|
|
/*
|
|
|
|
* Signals that are OK to receive in the stub - we'll just continue it.
|
|
|
|
* SIGWINCH will happen when UML is inside a detached screen.
|
|
|
|
*/
|
2015-11-02 16:16:37 +00:00
|
|
|
#define STUB_SIG_MASK ((1 << SIGALRM) | (1 << SIGWINCH))
|
2007-05-06 14:51:48 -07:00
|
|
|
|
|
|
|
/* Signals that the stub will finish with - anything else is an error */
|
2008-02-04 22:30:56 -08:00
|
|
|
#define STUB_DONE_MASK (1 << SIGTRAP)
|
2007-05-06 14:51:48 -07:00
|
|
|
|
|
|
|
void wait_stub_done(int pid)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2014-07-20 12:56:34 +02:00
|
|
|
int n, status, err;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
while (1) {
|
2007-12-17 16:19:46 -08:00
|
|
|
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
|
2007-10-16 01:27:00 -07:00
|
|
|
if ((n < 0) || !WIFSTOPPED(status))
|
2007-05-06 14:51:48 -07:00
|
|
|
goto bad_wait;
|
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
|
2007-05-06 14:51:48 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
err = ptrace(PTRACE_CONT, pid, 0, 0);
|
2008-02-04 22:30:58 -08:00
|
|
|
if (err) {
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : continue failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
2007-05-06 14:51:48 -07:00
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
|
2007-05-06 14:51:48 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
bad_wait:
|
|
|
|
err = ptrace_dump_regs(pid);
|
2007-10-16 01:27:00 -07:00
|
|
|
if (err)
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "Failed to get registers from stub, errno = %d\n",
|
|
|
|
-err);
|
|
|
|
printk(UM_KERN_ERR "%s : failed to wait for SIGTRAP, pid = %d, n = %d, errno = %d, status = 0x%x\n",
|
|
|
|
__func__, pid, n, errno, status);
|
2014-07-20 12:56:34 +02:00
|
|
|
fatal_sigsegv();
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
extern unsigned long current_stub_stack(void);
|
|
|
|
|
2024-10-05 01:38:21 +02:00
|
|
|
static void get_skas_faultinfo(int pid, struct faultinfo *fi)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2015-03-18 21:31:27 +01:00
|
|
|
err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
|
|
|
|
if (err) {
|
|
|
|
printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
|
|
|
|
"errno = %d\n", pid, errno);
|
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
|
|
|
wait_stub_done(pid);
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2015-03-18 21:31:27 +01:00
|
|
|
/*
|
2017-07-06 00:34:05 +02:00
|
|
|
* faultinfo is prepared by the stub_segv_handler at start of
|
2015-03-18 21:31:27 +01:00
|
|
|
* the stub stack page. We just have to copy it.
|
|
|
|
*/
|
|
|
|
memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2024-10-05 01:38:21 +02:00
|
|
|
static void handle_segv(int pid, struct uml_pt_regs *regs)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2024-10-05 01:38:21 +02:00
|
|
|
get_skas_faultinfo(pid, ®s->faultinfo);
|
2007-10-16 01:26:58 -07:00
|
|
|
segv(regs->faultinfo, 0, 1, NULL);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2023-11-10 12:03:38 +01:00
|
|
|
static void handle_trap(int pid, struct uml_pt_regs *regs)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2008-02-04 22:31:12 -08:00
|
|
|
if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
|
|
|
|
fatal_sigsegv();
|
|
|
|
|
2006-01-18 17:42:46 -08:00
|
|
|
handle_syscall(regs);
|
|
|
|
}
|
|
|
|
|
2014-10-12 13:02:13 +02:00
|
|
|
extern char __syscall_stub_start[];
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2024-09-19 14:45:03 +02:00
|
|
|
static int stub_exe_fd;
|
|
|
|
|
2006-01-18 17:42:46 -08:00
|
|
|
static int userspace_tramp(void *stack)
|
|
|
|
{
|
2024-09-19 14:45:03 +02:00
|
|
|
char *const argv[] = { "uml-userspace", NULL };
|
|
|
|
int pipe_fds[2];
|
2015-03-18 21:31:27 +01:00
|
|
|
unsigned long long offset;
|
2024-09-19 14:45:03 +02:00
|
|
|
struct stub_init_data init_data = {
|
|
|
|
.stub_start = STUB_START,
|
|
|
|
.segv_handler = STUB_CODE +
|
|
|
|
(unsigned long) stub_segv_handler -
|
|
|
|
(unsigned long) __syscall_stub_start,
|
|
|
|
};
|
|
|
|
struct iomem_region *iomem;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
init_data.stub_code_fd = phys_mapping(uml_to_phys(__syscall_stub_start),
|
|
|
|
&offset);
|
|
|
|
init_data.stub_code_offset = MMAP_OFFSET(offset);
|
|
|
|
|
|
|
|
init_data.stub_data_fd = phys_mapping(uml_to_phys(stack), &offset);
|
|
|
|
init_data.stub_data_offset = MMAP_OFFSET(offset);
|
|
|
|
|
|
|
|
/* Set CLOEXEC on all FDs and then unset on all memory related FDs */
|
|
|
|
close_range(0, ~0U, CLOSE_RANGE_CLOEXEC);
|
|
|
|
|
|
|
|
fcntl(init_data.stub_data_fd, F_SETFD, 0);
|
|
|
|
for (iomem = iomem_regions; iomem; iomem = iomem->next)
|
|
|
|
fcntl(iomem->fd, F_SETFD, 0);
|
|
|
|
|
|
|
|
/* Create a pipe for init_data (no CLOEXEC) and dup2 to STDIN */
|
|
|
|
if (pipe(pipe_fds))
|
|
|
|
exit(2);
|
|
|
|
|
|
|
|
if (dup2(pipe_fds[0], 0) < 0)
|
|
|
|
exit(3);
|
|
|
|
close(pipe_fds[0]);
|
|
|
|
|
|
|
|
/* Write init_data and close write side */
|
|
|
|
ret = write(pipe_fds[1], &init_data, sizeof(init_data));
|
|
|
|
close(pipe_fds[1]);
|
|
|
|
|
|
|
|
if (ret != sizeof(init_data))
|
|
|
|
exit(4);
|
|
|
|
|
|
|
|
execveat(stub_exe_fd, "", argv, NULL, AT_EMPTY_PATH);
|
|
|
|
|
|
|
|
exit(5);
|
|
|
|
}
|
|
|
|
|
|
|
|
extern char stub_exe_start[];
|
|
|
|
extern char stub_exe_end[];
|
|
|
|
|
|
|
|
extern char *tempdir;
|
|
|
|
|
|
|
|
#define STUB_EXE_NAME_TEMPLATE "/uml-userspace-XXXXXX"
|
|
|
|
|
|
|
|
#ifndef MFD_EXEC
|
|
|
|
#define MFD_EXEC 0x0010U
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int __init init_stub_exe_fd(void)
|
|
|
|
{
|
|
|
|
size_t written = 0;
|
|
|
|
char *tmpfile = NULL;
|
|
|
|
|
|
|
|
stub_exe_fd = memfd_create("uml-userspace",
|
|
|
|
MFD_EXEC | MFD_CLOEXEC | MFD_ALLOW_SEALING);
|
|
|
|
|
|
|
|
if (stub_exe_fd < 0) {
|
|
|
|
printk(UM_KERN_INFO "Could not create executable memfd, using temporary file!");
|
|
|
|
|
|
|
|
tmpfile = malloc(strlen(tempdir) +
|
|
|
|
strlen(STUB_EXE_NAME_TEMPLATE) + 1);
|
|
|
|
if (tmpfile == NULL)
|
|
|
|
panic("Failed to allocate memory for stub binary name");
|
|
|
|
|
|
|
|
strcpy(tmpfile, tempdir);
|
|
|
|
strcat(tmpfile, STUB_EXE_NAME_TEMPLATE);
|
|
|
|
|
|
|
|
stub_exe_fd = mkstemp(tmpfile);
|
|
|
|
if (stub_exe_fd < 0)
|
|
|
|
panic("Could not create temporary file for stub binary: %d",
|
|
|
|
-errno);
|
2015-03-18 21:31:27 +01:00
|
|
|
}
|
|
|
|
|
2024-09-19 14:45:03 +02:00
|
|
|
while (written < stub_exe_end - stub_exe_start) {
|
|
|
|
ssize_t res = write(stub_exe_fd, stub_exe_start + written,
|
|
|
|
stub_exe_end - stub_exe_start - written);
|
|
|
|
if (res < 0) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (tmpfile)
|
|
|
|
unlink(tmpfile);
|
|
|
|
panic("Failed write stub binary: %d", -errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
written += res;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
2023-11-10 12:03:39 +01:00
|
|
|
|
2024-09-19 14:45:03 +02:00
|
|
|
if (!tmpfile) {
|
|
|
|
fcntl(stub_exe_fd, F_ADD_SEALS,
|
|
|
|
F_SEAL_WRITE | F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL);
|
|
|
|
} else {
|
|
|
|
if (fchmod(stub_exe_fd, 00500) < 0) {
|
|
|
|
unlink(tmpfile);
|
|
|
|
panic("Could not make stub binary executable: %d",
|
|
|
|
-errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
close(stub_exe_fd);
|
|
|
|
stub_exe_fd = open(tmpfile, O_RDONLY | O_CLOEXEC | O_NOFOLLOW);
|
|
|
|
if (stub_exe_fd < 0) {
|
|
|
|
unlink(tmpfile);
|
|
|
|
panic("Could not reopen stub binary: %d", -errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
unlink(tmpfile);
|
|
|
|
free(tmpfile);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
return 0;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
2024-09-19 14:45:03 +02:00
|
|
|
__initcall(init_stub_exe_fd);
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
int userspace_pid[NR_CPUS];
|
|
|
|
|
2017-07-06 00:34:05 +02:00
|
|
|
/**
|
|
|
|
* start_userspace() - prepare a new userspace process
|
2023-11-10 12:03:39 +01:00
|
|
|
* @stub_stack: pointer to the stub stack.
|
2017-07-06 00:34:05 +02:00
|
|
|
*
|
|
|
|
* Setups a new temporary stack page that is used while userspace_tramp() runs
|
|
|
|
* Clones the kernel process into a new userspace process, with FDs only.
|
|
|
|
*
|
|
|
|
* Return: When positive: the process id of the new userspace process,
|
|
|
|
* when negative: an error number.
|
|
|
|
* FIXME: can PIDs become negative?!
|
|
|
|
*/
|
2006-01-18 17:42:46 -08:00
|
|
|
int start_userspace(unsigned long stub_stack)
|
|
|
|
{
|
|
|
|
void *stack;
|
|
|
|
unsigned long sp;
|
2024-09-19 14:45:03 +02:00
|
|
|
int pid, status, n, err;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2017-07-06 00:34:05 +02:00
|
|
|
/* setup a temporary stack page */
|
2007-06-16 10:16:09 -07:00
|
|
|
stack = mmap(NULL, UM_KERN_PAGE_SIZE,
|
|
|
|
PROT_READ | PROT_WRITE | PROT_EXEC,
|
2006-01-18 17:42:46 -08:00
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
2008-02-04 22:30:58 -08:00
|
|
|
if (stack == MAP_FAILED) {
|
|
|
|
err = -errno;
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : mmap failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-07-06 00:34:05 +02:00
|
|
|
/* set stack pointer to the end of the stack page, so it can grow downwards */
|
um: Fix stack pointer alignment
GCC assumes that stack is aligned to 16-byte on call sites [1].
Since GCC 8, GCC began using 16-byte aligned SSE instructions to
implement assignments to structs on stack. When
CC_OPTIMIZE_FOR_PERFORMANCE is enabled, this affects
os-Linux/sigio.c, write_sigio_thread:
struct pollfds *fds, tmp;
tmp = current_poll;
Note that struct pollfds is exactly 16 bytes in size.
GCC 8+ generates assembly similar to:
movdqa (%rdi),%xmm0
movaps %xmm0,-0x50(%rbp)
This is an issue, because movaps will #GP if -0x50(%rbp) is not
aligned to 16 bytes [2], and how rbp gets assigned to is via glibc
clone thread_start, then function prologue, going though execution
trace similar to (showing only relevant instructions):
sub $0x10,%rsi
mov %rcx,0x8(%rsi)
mov %rdi,(%rsi)
syscall
pop %rax
pop %rdi
callq *%rax
push %rbp
mov %rsp,%rbp
The stack pointer always points to the topmost element on stack,
rather then the space right above the topmost. On push, the
pointer decrements first before writing to the memory pointed to
by it. Therefore, there is no need to have the stack pointer
pointer always point to valid memory unless the stack is poped;
so the `- sizeof(void *)` in the code is unnecessary.
On the other hand, glibc reserves the 16 bytes it needs on stack
and pops itself, so by the call instruction the stack pointer
is exactly the caller-supplied sp. It then push the 16 bytes of
the return address and the saved stack pointer, so the base
pointer will be 16-byte aligned if and only if the caller
supplied sp is 16-byte aligned. Therefore, the caller must supply
a 16-byte aligned pointer, which `stack + UM_KERN_PAGE_SIZE`
already satisfies.
On a side note, musl is unaffected by this issue because it forces
16 byte alignment via `and $-16,%rsi` in its clone wrapper.
Similarly, glibc i386 is also unaffected because it has
`andl $0xfffffff0, %ecx`.
To reproduce this bug, enable CONFIG_UML_RTC and
CC_OPTIMIZE_FOR_PERFORMANCE. uml_rtc will call
add_sigio_fd which will then cause write_sigio_thread to either go
into segfault loop or panic with "Segfault with no mm".
Similarly, signal stacks will be aligned by the host kernel upon
signal delivery. `- sizeof(void *)` to sigaltstack is
unconventional and extraneous.
On a related note, initialization of longjmp buffers do require
`- sizeof(void *)`. This is to account for the return address
that would have been pushed to the stack at the call site.
The reason for uml to respect 16-byte alignment, rather than
telling GCC to assume 8-byte alignment like the host kernel since
commit d9b0cde91c60 ("x86-64, gcc: Use
-mpreferred-stack-boundary=3 if supported"), is because uml links
against libc. There is no reason to assume libc is also compiled
with that flag and assumes 8-byte alignment rather than 16-byte.
[1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=40838
[2] https://c9x.me/x86/html/file_module_x86_id_180.html
Signed-off-by: YiFei Zhu <zhuyifei1999@gmail.com>
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Reviewed-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: Richard Weinberger <richard@nod.at>
2021-04-20 00:56:10 -05:00
|
|
|
sp = (unsigned long)stack + UM_KERN_PAGE_SIZE;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2017-07-06 00:34:05 +02:00
|
|
|
/* clone into new userspace process */
|
2024-09-19 14:45:03 +02:00
|
|
|
pid = clone(userspace_tramp, (void *) sp,
|
|
|
|
CLONE_VFORK | CLONE_VM | SIGCHLD,
|
|
|
|
(void *)stub_stack);
|
2008-02-04 22:30:58 -08:00
|
|
|
if (pid < 0) {
|
|
|
|
err = -errno;
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : clone failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
return err;
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
do {
|
2007-12-17 16:19:46 -08:00
|
|
|
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
|
2008-02-04 22:30:58 -08:00
|
|
|
if (n < 0) {
|
|
|
|
err = -errno;
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : wait failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
goto out_kill;
|
|
|
|
}
|
2015-11-02 16:16:37 +00:00
|
|
|
} while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGALRM));
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2008-02-04 22:30:58 -08:00
|
|
|
if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
|
|
|
|
err = -EINVAL;
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : expected SIGSTOP, got status = %d\n",
|
|
|
|
__func__, status);
|
2008-02-04 22:30:58 -08:00
|
|
|
goto out_kill;
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2023-11-10 12:03:38 +01:00
|
|
|
if (ptrace(PTRACE_SETOPTIONS, pid, NULL,
|
2008-02-04 22:30:58 -08:00
|
|
|
(void *) PTRACE_O_TRACESYSGOOD) < 0) {
|
|
|
|
err = -errno;
|
2023-11-10 12:03:38 +01:00
|
|
|
printk(UM_KERN_ERR "%s : PTRACE_SETOPTIONS failed, errno = %d\n",
|
2022-11-22 11:07:32 +01:00
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
goto out_kill;
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2008-02-04 22:30:58 -08:00
|
|
|
if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
|
|
|
|
err = -errno;
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s : munmap failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
goto out_kill;
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
return pid;
|
2008-02-04 22:30:58 -08:00
|
|
|
|
|
|
|
out_kill:
|
|
|
|
os_kill_ptraced_process(pid, 1);
|
|
|
|
return err;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2024-10-10 16:25:37 +02:00
|
|
|
int unscheduled_userspace_iterations;
|
|
|
|
extern unsigned long tt_extra_sched_jiffies;
|
|
|
|
|
2024-10-05 01:38:21 +02:00
|
|
|
void userspace(struct uml_pt_regs *regs)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
|
|
|
int err, status, op, pid = userspace_pid[0];
|
2012-08-02 00:49:17 +02:00
|
|
|
siginfo_t si;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2012-05-23 00:25:15 -04:00
|
|
|
/* Handle any immediate reschedules or signals */
|
|
|
|
interrupt_end();
|
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
while (1) {
|
2024-10-10 16:25:37 +02:00
|
|
|
/*
|
|
|
|
* When we are in time-travel mode, userspace can theoretically
|
|
|
|
* do a *lot* of work without being scheduled. The problem with
|
|
|
|
* this is that it will prevent kernel bookkeeping (primarily
|
|
|
|
* the RCU) from running and this can for example cause OOM
|
|
|
|
* situations.
|
|
|
|
*
|
|
|
|
* This code accounts a jiffie against the scheduling clock
|
|
|
|
* after the defined userspace iterations in the same thread.
|
|
|
|
* By doing so the situation is effectively prevented.
|
|
|
|
*/
|
|
|
|
if (time_travel_mode == TT_MODE_INFCPU ||
|
|
|
|
time_travel_mode == TT_MODE_EXTERNAL) {
|
2024-11-03 16:05:03 +01:00
|
|
|
#ifdef CONFIG_UML_MAX_USERSPACE_ITERATIONS
|
|
|
|
if (CONFIG_UML_MAX_USERSPACE_ITERATIONS &&
|
2024-10-10 16:25:37 +02:00
|
|
|
unscheduled_userspace_iterations++ >
|
2024-11-03 16:05:03 +01:00
|
|
|
CONFIG_UML_MAX_USERSPACE_ITERATIONS) {
|
2024-10-10 16:25:37 +02:00
|
|
|
tt_extra_sched_jiffies += 1;
|
|
|
|
unscheduled_userspace_iterations = 0;
|
|
|
|
}
|
2024-11-03 16:05:03 +01:00
|
|
|
#endif
|
2024-10-10 16:25:37 +02:00
|
|
|
}
|
|
|
|
|
2024-07-02 19:21:18 +02:00
|
|
|
time_travel_print_bc_msg();
|
|
|
|
|
2024-07-03 15:45:36 +02:00
|
|
|
current_mm_sync();
|
|
|
|
|
2024-07-03 15:45:32 +02:00
|
|
|
/* Flush out any pending syscalls */
|
|
|
|
err = syscall_stub_flush(current_mm_id());
|
|
|
|
if (err) {
|
|
|
|
if (err == -ENOMEM)
|
|
|
|
report_enomem();
|
|
|
|
|
|
|
|
printk(UM_KERN_ERR "%s - Error flushing stub syscalls: %d",
|
|
|
|
__func__, -err);
|
2021-01-13 22:08:03 +01:00
|
|
|
fatal_sigsegv();
|
2024-07-03 15:45:32 +02:00
|
|
|
}
|
2015-11-02 16:16:37 +00:00
|
|
|
|
2008-02-04 22:30:58 -08:00
|
|
|
/*
|
|
|
|
* This can legitimately fail if the process loads a
|
|
|
|
* bogus value into a segment register. It will
|
|
|
|
* segfault and PTRACE_GETREGS will read that value
|
|
|
|
* out of the process. However, PTRACE_SETREGS will
|
|
|
|
* fail. In this case, there is nothing to do but
|
|
|
|
* just kill the process.
|
|
|
|
*/
|
2017-07-06 00:31:14 +02:00
|
|
|
if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) {
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s - ptrace set regs failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
2017-07-06 00:31:14 +02:00
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2017-07-06 00:31:14 +02:00
|
|
|
if (put_fp_registers(pid, regs->fp)) {
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s - ptrace set fp regs failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2011-09-14 16:21:23 -07:00
|
|
|
fatal_sigsegv();
|
2017-07-06 00:31:14 +02:00
|
|
|
}
|
2011-09-14 16:21:23 -07:00
|
|
|
|
2023-11-10 12:03:38 +01:00
|
|
|
if (singlestepping())
|
|
|
|
op = PTRACE_SYSEMU_SINGLESTEP;
|
|
|
|
else
|
|
|
|
op = PTRACE_SYSEMU;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2008-02-04 22:30:58 -08:00
|
|
|
if (ptrace(op, pid, 0, 0)) {
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s - ptrace continue failed, op = %d, errno = %d\n",
|
|
|
|
__func__, op, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2007-12-17 16:19:46 -08:00
|
|
|
CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
|
2008-02-04 22:30:58 -08:00
|
|
|
if (err < 0) {
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s - wait failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2007-10-16 01:26:58 -07:00
|
|
|
regs->is_user = 1;
|
2008-02-04 22:30:58 -08:00
|
|
|
if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s - PTRACE_GETREGS failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
2008-02-04 22:30:57 -08:00
|
|
|
|
2011-09-14 16:21:23 -07:00
|
|
|
if (get_fp_registers(pid, regs->fp)) {
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s - get_fp_registers failed, errno = %d\n",
|
|
|
|
__func__, errno);
|
2011-09-14 16:21:23 -07:00
|
|
|
fatal_sigsegv();
|
|
|
|
}
|
|
|
|
|
2006-01-18 17:42:46 -08:00
|
|
|
UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
|
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
if (WIFSTOPPED(status)) {
|
2007-05-06 14:51:48 -07:00
|
|
|
int sig = WSTOPSIG(status);
|
2012-08-02 00:49:17 +02:00
|
|
|
|
2020-11-10 13:02:21 +00:00
|
|
|
/* These signal handlers need the si argument.
|
|
|
|
* The SIGIO and SIGALARM handlers which constitute the
|
|
|
|
* majority of invocations, do not use it.
|
|
|
|
*/
|
|
|
|
switch (sig) {
|
|
|
|
case SIGSEGV:
|
|
|
|
case SIGTRAP:
|
|
|
|
case SIGILL:
|
|
|
|
case SIGBUS:
|
|
|
|
case SIGFPE:
|
|
|
|
case SIGWINCH:
|
|
|
|
ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
|
|
|
|
break;
|
|
|
|
}
|
2012-08-02 00:49:17 +02:00
|
|
|
|
2008-02-08 04:22:08 -08:00
|
|
|
switch (sig) {
|
2006-01-18 17:42:46 -08:00
|
|
|
case SIGSEGV:
|
2015-03-18 21:31:27 +01:00
|
|
|
if (PTRACE_FULL_FAULTINFO) {
|
2007-10-16 01:27:00 -07:00
|
|
|
get_skas_faultinfo(pid,
|
2024-10-05 01:38:21 +02:00
|
|
|
®s->faultinfo);
|
2013-07-19 11:31:36 +02:00
|
|
|
(*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
|
2012-08-02 00:49:17 +02:00
|
|
|
regs);
|
2007-05-06 14:51:48 -07:00
|
|
|
}
|
2024-10-05 01:38:21 +02:00
|
|
|
else handle_segv(pid, regs);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
case SIGTRAP + 0x80:
|
2023-11-10 12:03:38 +01:00
|
|
|
handle_trap(pid, regs);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
case SIGTRAP:
|
2013-07-19 11:31:36 +02:00
|
|
|
relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
2015-11-02 16:16:37 +00:00
|
|
|
case SIGALRM:
|
2007-10-16 01:27:25 -07:00
|
|
|
break;
|
|
|
|
case SIGIO:
|
2006-01-18 17:42:46 -08:00
|
|
|
case SIGILL:
|
|
|
|
case SIGBUS:
|
|
|
|
case SIGFPE:
|
|
|
|
case SIGWINCH:
|
2019-08-23 13:16:23 +02:00
|
|
|
block_signals_trace();
|
2013-07-19 11:31:36 +02:00
|
|
|
(*sig_info[sig])(sig, (struct siginfo *)&si, regs);
|
2019-08-23 13:16:23 +02:00
|
|
|
unblock_signals_trace();
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
default:
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "%s - child stopped with signal %d\n",
|
|
|
|
__func__, sig);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
pid = userspace_pid[0];
|
|
|
|
interrupt_end();
|
|
|
|
|
|
|
|
/* Avoid -ERESTARTSYS handling in host */
|
2007-10-16 01:27:00 -07:00
|
|
|
if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
|
2007-10-16 01:27:07 -07:00
|
|
|
PT_SYSCALL_NR(regs->gp) = -1;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
(*buf)[0].JB_IP = (unsigned long) handler;
|
2007-05-10 22:22:31 -07:00
|
|
|
(*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
|
|
|
|
sizeof(void *);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2006-02-07 12:58:43 -08:00
|
|
|
#define INIT_JMP_NEW_THREAD 0
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
#define INIT_JMP_CALLBACK 1
|
|
|
|
#define INIT_JMP_HALT 2
|
|
|
|
#define INIT_JMP_REBOOT 3
|
2006-01-18 17:42:46 -08:00
|
|
|
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
void switch_threads(jmp_buf *me, jmp_buf *you)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2024-10-10 16:25:37 +02:00
|
|
|
unscheduled_userspace_iterations = 0;
|
|
|
|
|
2007-10-16 01:27:00 -07:00
|
|
|
if (UML_SETJMP(me) == 0)
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
UML_LONGJMP(you, 1);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2006-04-18 22:21:41 -07:00
|
|
|
static jmp_buf initial_jmpbuf;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
/* XXX Make these percpu */
|
|
|
|
static void (*cb_proc)(void *arg);
|
|
|
|
static void *cb_arg;
|
2006-04-18 22:21:41 -07:00
|
|
|
static jmp_buf *cb_back;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
int start_idle_thread(void *stack, jmp_buf *switch_buf)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2006-07-14 00:24:02 -07:00
|
|
|
int n;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2011-08-18 20:04:39 +01:00
|
|
|
set_handler(SIGWINCH);
|
2006-01-18 17:42:46 -08:00
|
|
|
|
2007-05-06 14:51:40 -07:00
|
|
|
/*
|
|
|
|
* Can't use UML_SETJMP or UML_LONGJMP here because they save
|
|
|
|
* and restore signals, with the possible side-effect of
|
|
|
|
* trying to handle any signals which came when they were
|
|
|
|
* blocked, which can't be done on this stack.
|
|
|
|
* Signals must be blocked when jumping back here and restored
|
|
|
|
* after returning to the jumper.
|
|
|
|
*/
|
|
|
|
n = setjmp(initial_jmpbuf);
|
2008-02-08 04:22:08 -08:00
|
|
|
switch (n) {
|
2006-01-18 17:42:46 -08:00
|
|
|
case INIT_JMP_NEW_THREAD:
|
2015-03-28 09:59:46 +01:00
|
|
|
(*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
|
[PATCH] uml: thread creation tidying
fork on UML has always somewhat subtle. The underlying cause has been the
need to initialize a stack for the new process. The only portable way to
initialize a new stack is to set it as the alternate signal stack and take a
signal. The signal handler does whatever initialization is needed and jumps
back to the original stack, where the fork processing is finished. The basic
context switching mechanism is a jmp_buf for each process. You switch to a
new process by longjmping to its jmp_buf.
Now that UML has its own implementation of setjmp and longjmp, and I can poke
around inside a jmp_buf without fear that libc will change the structure, a
much simpler mechanism is possible. The jmpbuf can simply be initialized by
hand.
This eliminates -
the need to set up and remove the alternate signal stack
sending and handling a signal
the signal blocking needed around the stack switching, since
there is no stack switching
setting up the jmp_buf needed to jump back to the original
stack after the new one is set up
In addition, since jmp_buf is now defined by UML, and not by libc, it can be
embedded in the thread struct. This makes it unnecessary to have it exist on
the stack, where it used to be. It also simplifies interfaces, since the
switch jmp_buf used to be a void * inside the thread struct, and functions
which took it as an argument needed to define a jmp_buf variable and assign it
from the void *.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-27 01:50:40 -07:00
|
|
|
(*switch_buf)[0].JB_SP = (unsigned long) stack +
|
2007-05-10 22:22:31 -07:00
|
|
|
UM_THREAD_SIZE - sizeof(void *);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
case INIT_JMP_CALLBACK:
|
|
|
|
(*cb_proc)(cb_arg);
|
2007-05-06 14:51:40 -07:00
|
|
|
longjmp(*cb_back, 1);
|
2006-01-18 17:42:46 -08:00
|
|
|
break;
|
|
|
|
case INIT_JMP_HALT:
|
|
|
|
kmalloc_ok = 0;
|
2007-10-16 01:27:00 -07:00
|
|
|
return 0;
|
2006-01-18 17:42:46 -08:00
|
|
|
case INIT_JMP_REBOOT:
|
|
|
|
kmalloc_ok = 0;
|
2007-10-16 01:27:00 -07:00
|
|
|
return 1;
|
2006-01-18 17:42:46 -08:00
|
|
|
default:
|
2022-11-22 11:07:32 +01:00
|
|
|
printk(UM_KERN_ERR "Bad sigsetjmp return in %s - %d\n",
|
|
|
|
__func__, n);
|
2008-02-04 22:30:58 -08:00
|
|
|
fatal_sigsegv();
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
2007-05-06 14:51:40 -07:00
|
|
|
longjmp(*switch_buf, 1);
|
2018-06-15 16:42:56 +02:00
|
|
|
|
|
|
|
/* unreachable */
|
|
|
|
printk(UM_KERN_ERR "impossible long jump!");
|
|
|
|
fatal_sigsegv();
|
|
|
|
return 0;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void initial_thread_cb_skas(void (*proc)(void *), void *arg)
|
|
|
|
{
|
2006-04-18 22:21:41 -07:00
|
|
|
jmp_buf here;
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
cb_proc = proc;
|
|
|
|
cb_arg = arg;
|
|
|
|
cb_back = &here;
|
|
|
|
|
2019-08-23 13:16:23 +02:00
|
|
|
block_signals_trace();
|
2007-10-16 01:27:00 -07:00
|
|
|
if (UML_SETJMP(&here) == 0)
|
2006-04-18 22:21:41 -07:00
|
|
|
UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
|
2019-08-23 13:16:23 +02:00
|
|
|
unblock_signals_trace();
|
2006-01-18 17:42:46 -08:00
|
|
|
|
|
|
|
cb_proc = NULL;
|
|
|
|
cb_arg = NULL;
|
|
|
|
cb_back = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void halt_skas(void)
|
|
|
|
{
|
2019-08-23 13:16:23 +02:00
|
|
|
block_signals_trace();
|
2006-04-18 22:21:41 -07:00
|
|
|
UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2022-07-13 13:56:17 +02:00
|
|
|
static bool noreboot;
|
|
|
|
|
|
|
|
static int __init noreboot_cmd_param(char *str, int *add)
|
|
|
|
{
|
2024-10-11 12:04:37 +08:00
|
|
|
*add = 0;
|
2022-07-13 13:56:17 +02:00
|
|
|
noreboot = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__uml_setup("noreboot", noreboot_cmd_param,
|
|
|
|
"noreboot\n"
|
|
|
|
" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
|
|
|
|
" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
|
|
|
|
" crashes in CI\n");
|
|
|
|
|
2006-01-18 17:42:46 -08:00
|
|
|
void reboot_skas(void)
|
|
|
|
{
|
2019-08-23 13:16:23 +02:00
|
|
|
block_signals_trace();
|
2022-07-13 13:56:17 +02:00
|
|
|
UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|
|
|
|
|
2007-10-16 01:26:58 -07:00
|
|
|
void __switch_mm(struct mm_id *mm_idp)
|
2006-01-18 17:42:46 -08:00
|
|
|
{
|
2024-08-26 18:08:12 +08:00
|
|
|
userspace_pid[0] = mm_idp->pid;
|
2006-01-18 17:42:46 -08:00
|
|
|
}
|