2019-06-04 10:11:33 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2013-01-18 15:12:18 +05:30
|
|
|
/*
|
|
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
|
|
*
|
|
|
|
* Amit Bhor, Kanika Nema: Codito Technologies 2004
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-08 18:51:36 +01:00
|
|
|
#include <linux/sched/task.h>
|
2017-02-08 18:51:37 +01:00
|
|
|
#include <linux/sched/task_stack.h>
|
2017-02-08 18:51:36 +01:00
|
|
|
|
2013-01-18 15:12:18 +05:30
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/tick.h>
|
|
|
|
|
2020-01-17 15:04:03 -08:00
|
|
|
#include <asm/fpu.h>
|
|
|
|
|
2013-01-18 15:12:18 +05:30
|
|
|
SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
|
|
|
|
{
|
|
|
|
task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We return the user space TLS data ptr as sys-call return code
|
|
|
|
* Ideally it should be copy to user.
|
|
|
|
* However we can cheat by the fact that some sys-calls do return
|
|
|
|
* absurdly high values
|
|
|
|
* Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
|
|
|
|
* it won't be considered a sys-call error
|
|
|
|
* and it will be loads better than copy-to-user, which is a definite
|
|
|
|
* D-TLB Miss
|
|
|
|
*/
|
|
|
|
SYSCALL_DEFINE0(arc_gettls)
|
|
|
|
{
|
|
|
|
return task_thread_info(current)->thr_ptr;
|
|
|
|
}
|
2013-01-18 15:12:18 +05:30
|
|
|
|
2022-02-14 20:22:10 +01:00
|
|
|
SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new)
|
2016-10-20 07:39:45 -07:00
|
|
|
{
|
2016-11-07 10:36:46 -08:00
|
|
|
struct pt_regs *regs = current_pt_regs();
|
2018-06-19 17:22:05 +03:00
|
|
|
u32 uval;
|
|
|
|
int ret;
|
2016-10-20 07:39:45 -07:00
|
|
|
|
|
|
|
/*
|
2021-03-22 17:58:19 +05:30
|
|
|
* This is only for old cores lacking LLOCK/SCOND, which by definition
|
2016-10-20 07:39:45 -07:00
|
|
|
* can't possibly be SMP. Thus doesn't need to be SMP safe.
|
|
|
|
* And this also helps reduce the overhead for serializing in
|
|
|
|
* the UP case
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
|
|
|
|
|
2021-03-22 17:58:19 +05:30
|
|
|
/* Z indicates to userspace if operation succeeded */
|
2016-11-07 10:36:46 -08:00
|
|
|
regs->status32 &= ~STATUS_Z_MASK;
|
|
|
|
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-03 18:57:57 -08:00
|
|
|
ret = access_ok(uaddr, sizeof(*uaddr));
|
2018-06-19 17:22:05 +03:00
|
|
|
if (!ret)
|
|
|
|
goto fail;
|
2016-10-20 07:39:45 -07:00
|
|
|
|
2018-06-19 17:22:05 +03:00
|
|
|
again:
|
2016-10-20 07:39:45 -07:00
|
|
|
preempt_disable();
|
|
|
|
|
2018-06-19 17:22:05 +03:00
|
|
|
ret = __get_user(uval, uaddr);
|
|
|
|
if (ret)
|
|
|
|
goto fault;
|
2016-10-20 07:39:45 -07:00
|
|
|
|
2018-06-19 17:22:05 +03:00
|
|
|
if (uval != expected)
|
|
|
|
goto out;
|
2016-10-20 07:39:45 -07:00
|
|
|
|
2018-06-19 17:22:05 +03:00
|
|
|
ret = __put_user(new, uaddr);
|
|
|
|
if (ret)
|
|
|
|
goto fault;
|
|
|
|
|
|
|
|
regs->status32 |= STATUS_Z_MASK;
|
2016-10-20 07:39:45 -07:00
|
|
|
|
2018-06-19 17:22:05 +03:00
|
|
|
out:
|
|
|
|
preempt_enable();
|
2016-11-07 10:36:46 -08:00
|
|
|
return uval;
|
2018-06-19 17:22:05 +03:00
|
|
|
|
|
|
|
fault:
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
if (unlikely(ret != -EFAULT))
|
|
|
|
goto fail;
|
|
|
|
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_read_lock(current->mm);
|
2020-08-11 18:39:01 -07:00
|
|
|
ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
|
2018-06-19 17:22:05 +03:00
|
|
|
FAULT_FLAG_WRITE, NULL);
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_read_unlock(current->mm);
|
2018-06-19 17:22:05 +03:00
|
|
|
|
|
|
|
if (likely(!ret))
|
|
|
|
goto again;
|
|
|
|
|
|
|
|
fail:
|
2019-05-23 10:17:27 -05:00
|
|
|
force_sig(SIGSEGV);
|
2018-06-19 17:22:05 +03:00
|
|
|
return ret;
|
2016-10-20 07:39:45 -07:00
|
|
|
}
|
|
|
|
|
2017-06-02 11:49:10 -07:00
|
|
|
#ifdef CONFIG_ISA_ARCV2
|
|
|
|
|
2013-03-21 22:49:36 +01:00
|
|
|
void arch_cpu_idle(void)
|
2013-01-18 15:12:18 +05:30
|
|
|
{
|
2021-03-22 17:58:19 +05:30
|
|
|
/* Re-enable interrupts <= default irq priority before committing SLEEP */
|
2017-06-02 11:49:10 -07:00
|
|
|
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
|
|
|
|
|
2015-11-16 13:52:07 +05:30
|
|
|
__asm__ __volatile__(
|
|
|
|
"sleep %0 \n"
|
|
|
|
:
|
2017-06-02 11:49:10 -07:00
|
|
|
:"I"(arg)); /* can't be "r" has to be embedded const */
|
2023-01-12 20:43:35 +01:00
|
|
|
|
|
|
|
raw_local_irq_disable();
|
2017-06-02 11:49:10 -07:00
|
|
|
}
|
|
|
|
|
2017-05-28 09:52:06 +03:00
|
|
|
#else /* ARC700 */
|
2017-06-02 11:49:10 -07:00
|
|
|
|
|
|
|
void arch_cpu_idle(void)
|
|
|
|
{
|
2021-03-22 17:58:19 +05:30
|
|
|
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
|
2017-06-02 11:49:10 -07:00
|
|
|
__asm__ __volatile__("sleep 0x3 \n");
|
2023-01-12 20:43:35 +01:00
|
|
|
raw_local_irq_disable();
|
2013-01-18 15:12:18 +05:30
|
|
|
}
|
|
|
|
|
2017-06-02 11:49:10 -07:00
|
|
|
#endif
|
|
|
|
|
2013-01-18 15:12:18 +05:30
|
|
|
asmlinkage void ret_from_fork(void);
|
|
|
|
|
2015-03-13 20:04:18 +02:00
|
|
|
/*
|
|
|
|
* Copy architecture-specific thread state
|
|
|
|
*
|
|
|
|
* Layout of Child kernel mode stack as setup at the end of this function is
|
2013-01-18 15:12:18 +05:30
|
|
|
*
|
|
|
|
* | ... |
|
|
|
|
* | ... |
|
|
|
|
* | unused |
|
|
|
|
* | |
|
|
|
|
* ------------------
|
2020-05-14 00:51:26 -07:00
|
|
|
* | r25 | <==== top of Stack (thread_info.ksp)
|
2013-01-18 15:12:18 +05:30
|
|
|
* ~ ~
|
2015-08-19 17:23:58 +05:30
|
|
|
* | --to-- | (CALLEE Regs of kernel mode)
|
2013-01-18 15:12:18 +05:30
|
|
|
* | r13 |
|
|
|
|
* ------------------
|
|
|
|
* | fp |
|
|
|
|
* | blink | @ret_from_fork
|
|
|
|
* ------------------
|
|
|
|
* | |
|
|
|
|
* ~ ~
|
|
|
|
* ~ ~
|
|
|
|
* | |
|
|
|
|
* ------------------
|
|
|
|
* | r12 |
|
|
|
|
* ~ ~
|
|
|
|
* | --to-- | (scratch Regs of user mode)
|
|
|
|
* | r0 |
|
2013-05-28 13:24:43 +05:30
|
|
|
* ------------------
|
|
|
|
* | SP |
|
|
|
|
* | orig_r0 |
|
2013-06-11 18:56:54 +05:30
|
|
|
* | event/ECR |
|
2013-01-18 15:12:18 +05:30
|
|
|
* ------------------ <===== END of PAGE
|
|
|
|
*/
|
2022-04-08 18:07:50 -05:00
|
|
|
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
2013-01-18 15:12:18 +05:30
|
|
|
{
|
2022-04-08 18:07:50 -05:00
|
|
|
unsigned long clone_flags = args->flags;
|
|
|
|
unsigned long usp = args->stack;
|
|
|
|
unsigned long tls = args->tls;
|
2013-01-18 15:12:18 +05:30
|
|
|
struct pt_regs *c_regs; /* child's pt_regs */
|
|
|
|
unsigned long *childksp; /* to unwind out of __switch_to() */
|
|
|
|
struct callee_regs *c_callee; /* child's callee regs */
|
|
|
|
struct callee_regs *parent_callee; /* paren't callee */
|
|
|
|
struct pt_regs *regs = current_pt_regs();
|
|
|
|
|
|
|
|
/* Mark the specific anchors to begin with (see pic above) */
|
|
|
|
c_regs = task_pt_regs(p);
|
|
|
|
childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
|
|
|
|
c_callee = ((struct callee_regs *)childksp) - 1;
|
|
|
|
|
|
|
|
/*
|
2020-05-14 00:51:26 -07:00
|
|
|
* __switch_to() uses thread_info.ksp to start unwinding stack
|
2013-01-18 15:12:18 +05:30
|
|
|
* For kernel threads we don't need to create callee regs, the
|
|
|
|
* stack layout nevertheless needs to remain the same.
|
|
|
|
* Also, since __switch_to anyways unwinds callee regs, we use
|
|
|
|
* this to populate kernel thread entry-pt/args into callee regs,
|
|
|
|
* so that ret_from_kernel_thread() becomes simpler.
|
|
|
|
*/
|
2020-05-14 00:51:26 -07:00
|
|
|
task_thread_info(p)->ksp = (unsigned long)c_callee; /* THREAD_INFO_KSP */
|
2013-01-18 15:12:18 +05:30
|
|
|
|
|
|
|
/* __switch_to expects FP(0), BLINK(return addr) at top */
|
|
|
|
childksp[0] = 0; /* fp */
|
|
|
|
childksp[1] = (unsigned long)ret_from_fork; /* blink */
|
|
|
|
|
2022-04-12 10:18:48 -05:00
|
|
|
if (unlikely(args->fn)) {
|
2013-01-18 15:12:18 +05:30
|
|
|
memset(c_regs, 0, sizeof(struct pt_regs));
|
|
|
|
|
2022-04-12 10:18:48 -05:00
|
|
|
c_callee->r13 = (unsigned long)args->fn_arg;
|
|
|
|
c_callee->r14 = (unsigned long)args->fn;
|
2013-01-18 15:12:18 +05:30
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*--------- User Task Only --------------*/
|
|
|
|
|
|
|
|
/* __switch_to expects FP(0), BLINK(return addr) at top of stack */
|
|
|
|
childksp[0] = 0; /* for POP fp */
|
|
|
|
childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
|
|
|
|
|
|
|
|
/* Copy parents pt regs on child's kernel mode stack */
|
|
|
|
*c_regs = *regs;
|
|
|
|
|
|
|
|
if (usp)
|
|
|
|
c_regs->sp = usp;
|
|
|
|
|
|
|
|
c_regs->r0 = 0; /* fork returns 0 in child */
|
|
|
|
|
|
|
|
parent_callee = ((struct callee_regs *)regs) - 1;
|
|
|
|
*c_callee = *parent_callee;
|
|
|
|
|
|
|
|
if (unlikely(clone_flags & CLONE_SETTLS)) {
|
|
|
|
/*
|
|
|
|
* set task's userland tls data ptr from 4th arg
|
|
|
|
* clone C-lib call is difft from clone sys-call
|
|
|
|
*/
|
2020-01-15 16:08:12 -08:00
|
|
|
task_thread_info(p)->thr_ptr = tls;
|
2013-01-18 15:12:18 +05:30
|
|
|
} else {
|
|
|
|
/* Normal fork case: set parent's TLS ptr in child */
|
|
|
|
task_thread_info(p)->thr_ptr =
|
|
|
|
task_thread_info(current)->thr_ptr;
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:48:48 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* setup usermode thread pointer #1:
|
|
|
|
* when child is picked by scheduler, __switch_to() uses @c_callee to
|
|
|
|
* populate usermode callee regs: this works (despite being in a kernel
|
|
|
|
* function) since special return path for child @ret_from_fork()
|
|
|
|
* ensures those regs are not clobbered all the way to RTIE to usermode
|
|
|
|
*/
|
|
|
|
c_callee->r25 = task_thread_info(p)->thr_ptr;
|
|
|
|
|
2013-01-18 15:12:18 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-18 12:19:59 +05:30
|
|
|
/*
|
|
|
|
* Do necessary setup to start up a new user task
|
|
|
|
*/
|
2020-01-17 15:04:03 -08:00
|
|
|
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
|
2014-04-18 12:19:59 +05:30
|
|
|
{
|
|
|
|
regs->sp = usp;
|
|
|
|
regs->ret = pc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* [U]ser Mode bit set
|
|
|
|
* [L] ZOL loop inhibited to begin with - cleared by a LP insn
|
|
|
|
* Interrupts enabled
|
|
|
|
*/
|
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 18:30:41 +05:30
|
|
|
regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
|
2014-04-18 12:19:59 +05:30
|
|
|
|
2020-01-17 15:04:03 -08:00
|
|
|
fpu_init_task(regs);
|
|
|
|
|
2014-04-18 12:19:59 +05:30
|
|
|
/* bogus seed values for debugging */
|
|
|
|
regs->lp_start = 0x10;
|
|
|
|
regs->lp_end = 0x80;
|
|
|
|
}
|
|
|
|
|
2013-01-18 15:12:18 +05:30
|
|
|
/*
|
|
|
|
* Some archs flush debug and FPU info here
|
|
|
|
*/
|
|
|
|
void flush_thread(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
int elf_check_arch(const struct elf32_hdr *x)
|
|
|
|
{
|
|
|
|
unsigned int eflags;
|
|
|
|
|
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 18:30:41 +05:30
|
|
|
if (x->e_machine != EM_ARC_INUSE) {
|
|
|
|
pr_err("ELF not built for %s ISA\n",
|
|
|
|
is_isa_arcompact() ? "ARCompact":"ARCv2");
|
2013-01-18 15:12:18 +05:30
|
|
|
return 0;
|
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 18:30:41 +05:30
|
|
|
}
|
2013-01-18 15:12:18 +05:30
|
|
|
|
|
|
|
eflags = x->e_flags;
|
2016-08-10 14:10:57 -07:00
|
|
|
if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
|
2013-01-18 15:12:18 +05:30
|
|
|
pr_err("ABI mismatch - you need newer toolchain\n");
|
2021-10-25 10:50:57 -05:00
|
|
|
force_fatal_sig(SIGSEGV);
|
2013-01-18 15:12:18 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(elf_check_arch);
|