2022-02-07 17:23:17 +01:00
|
|
|
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
|
|
|
|
/*
|
|
|
|
* MIPS specific definitions for NOLIBC
|
|
|
|
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _NOLIBC_ARCH_MIPS_H
|
|
|
|
#define _NOLIBC_ARCH_MIPS_H
|
|
|
|
|
2023-05-21 11:36:34 +02:00
|
|
|
#include "compiler.h"
|
2023-07-16 02:28:55 +08:00
|
|
|
#include "crt.h"
|
2023-05-21 11:36:34 +02:00
|
|
|
|
2023-10-20 09:30:33 +02:00
|
|
|
#if !defined(_ABIO32)
|
|
|
|
#error Unsupported MIPS ABI
|
|
|
|
#endif
|
|
|
|
|
2022-02-07 17:23:17 +01:00
|
|
|
/* Syscalls for MIPS ABI O32 :
|
|
|
|
* - WARNING! there's always a delayed slot!
|
|
|
|
* - WARNING again, the syntax is different, registers take a '$' and numbers
|
|
|
|
* do not.
|
|
|
|
* - registers are 32-bit
|
|
|
|
* - stack is 8-byte aligned
|
|
|
|
* - syscall number is passed in v0 (starts at 0xfa0).
|
|
|
|
* - arguments are in a0, a1, a2, a3, then the stack. The caller needs to
|
|
|
|
* leave some room in the stack for the callee to save a0..a3 if needed.
|
|
|
|
* - Many registers are clobbered, in fact only a0..a2 and s0..s8 are
|
|
|
|
* preserved. See: https://www.linux-mips.org/wiki/Syscall as well as
|
|
|
|
* scall32-o32.S in the kernel sources.
|
|
|
|
* - the system call is performed by calling "syscall"
|
|
|
|
* - syscall return comes in v0, and register a3 needs to be checked to know
|
|
|
|
* if an error occurred, in which case errno is in v0.
|
|
|
|
* - the arguments are cast to long and assigned into the target registers
|
|
|
|
* which are then simply passed as registers to the asm code, so that we
|
|
|
|
* don't have to experience issues with register constraints.
|
|
|
|
*/
|
|
|
|
|
2023-07-07 22:54:26 +08:00
|
|
|
#define _NOLIBC_SYSCALL_CLOBBERLIST \
|
|
|
|
"memory", "cc", "at", "v1", "hi", "lo", \
|
|
|
|
"t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9"
|
|
|
|
|
2022-02-07 17:23:17 +01:00
|
|
|
#define my_syscall0(num) \
|
|
|
|
({ \
|
2022-03-29 17:17:30 +07:00
|
|
|
register long _num __asm__ ("v0") = (num); \
|
|
|
|
register long _arg4 __asm__ ("a3"); \
|
2023-07-07 22:50:34 +08:00
|
|
|
\
|
2023-07-07 22:52:09 +08:00
|
|
|
__asm__ volatile ( \
|
2022-02-07 17:23:17 +01:00
|
|
|
"addiu $sp, $sp, -32\n" \
|
|
|
|
"syscall\n" \
|
|
|
|
"addiu $sp, $sp, 32\n" \
|
|
|
|
: "=r"(_num), "=r"(_arg4) \
|
|
|
|
: "r"(_num) \
|
2023-07-07 22:54:26 +08:00
|
|
|
: _NOLIBC_SYSCALL_CLOBBERLIST \
|
2022-02-07 17:23:17 +01:00
|
|
|
); \
|
|
|
|
_arg4 ? -_num : _num; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define my_syscall1(num, arg1) \
|
|
|
|
({ \
|
2022-03-29 17:17:30 +07:00
|
|
|
register long _num __asm__ ("v0") = (num); \
|
|
|
|
register long _arg1 __asm__ ("a0") = (long)(arg1); \
|
|
|
|
register long _arg4 __asm__ ("a3"); \
|
2023-07-07 22:50:34 +08:00
|
|
|
\
|
2023-07-07 22:52:09 +08:00
|
|
|
__asm__ volatile ( \
|
2022-02-07 17:23:17 +01:00
|
|
|
"addiu $sp, $sp, -32\n" \
|
|
|
|
"syscall\n" \
|
|
|
|
"addiu $sp, $sp, 32\n" \
|
|
|
|
: "=r"(_num), "=r"(_arg4) \
|
|
|
|
: "0"(_num), \
|
|
|
|
"r"(_arg1) \
|
2023-07-07 22:54:26 +08:00
|
|
|
: _NOLIBC_SYSCALL_CLOBBERLIST \
|
2022-02-07 17:23:17 +01:00
|
|
|
); \
|
|
|
|
_arg4 ? -_num : _num; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define my_syscall2(num, arg1, arg2) \
|
|
|
|
({ \
|
2022-03-29 17:17:30 +07:00
|
|
|
register long _num __asm__ ("v0") = (num); \
|
|
|
|
register long _arg1 __asm__ ("a0") = (long)(arg1); \
|
|
|
|
register long _arg2 __asm__ ("a1") = (long)(arg2); \
|
|
|
|
register long _arg4 __asm__ ("a3"); \
|
2023-07-07 22:50:34 +08:00
|
|
|
\
|
2023-07-07 22:52:09 +08:00
|
|
|
__asm__ volatile ( \
|
2022-02-07 17:23:17 +01:00
|
|
|
"addiu $sp, $sp, -32\n" \
|
|
|
|
"syscall\n" \
|
|
|
|
"addiu $sp, $sp, 32\n" \
|
|
|
|
: "=r"(_num), "=r"(_arg4) \
|
|
|
|
: "0"(_num), \
|
|
|
|
"r"(_arg1), "r"(_arg2) \
|
2023-07-07 22:54:26 +08:00
|
|
|
: _NOLIBC_SYSCALL_CLOBBERLIST \
|
2022-02-07 17:23:17 +01:00
|
|
|
); \
|
|
|
|
_arg4 ? -_num : _num; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define my_syscall3(num, arg1, arg2, arg3) \
|
|
|
|
({ \
|
2022-03-29 17:17:30 +07:00
|
|
|
register long _num __asm__ ("v0") = (num); \
|
|
|
|
register long _arg1 __asm__ ("a0") = (long)(arg1); \
|
|
|
|
register long _arg2 __asm__ ("a1") = (long)(arg2); \
|
|
|
|
register long _arg3 __asm__ ("a2") = (long)(arg3); \
|
|
|
|
register long _arg4 __asm__ ("a3"); \
|
2023-07-07 22:50:34 +08:00
|
|
|
\
|
2023-07-07 22:52:09 +08:00
|
|
|
__asm__ volatile ( \
|
2022-02-07 17:23:17 +01:00
|
|
|
"addiu $sp, $sp, -32\n" \
|
|
|
|
"syscall\n" \
|
|
|
|
"addiu $sp, $sp, 32\n" \
|
|
|
|
: "=r"(_num), "=r"(_arg4) \
|
|
|
|
: "0"(_num), \
|
|
|
|
"r"(_arg1), "r"(_arg2), "r"(_arg3) \
|
2023-07-07 22:54:26 +08:00
|
|
|
: _NOLIBC_SYSCALL_CLOBBERLIST \
|
2022-02-07 17:23:17 +01:00
|
|
|
); \
|
|
|
|
_arg4 ? -_num : _num; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define my_syscall4(num, arg1, arg2, arg3, arg4) \
|
|
|
|
({ \
|
2022-03-29 17:17:30 +07:00
|
|
|
register long _num __asm__ ("v0") = (num); \
|
|
|
|
register long _arg1 __asm__ ("a0") = (long)(arg1); \
|
|
|
|
register long _arg2 __asm__ ("a1") = (long)(arg2); \
|
|
|
|
register long _arg3 __asm__ ("a2") = (long)(arg3); \
|
|
|
|
register long _arg4 __asm__ ("a3") = (long)(arg4); \
|
2023-07-07 22:50:34 +08:00
|
|
|
\
|
2023-07-07 22:52:09 +08:00
|
|
|
__asm__ volatile ( \
|
2022-02-07 17:23:17 +01:00
|
|
|
"addiu $sp, $sp, -32\n" \
|
|
|
|
"syscall\n" \
|
|
|
|
"addiu $sp, $sp, 32\n" \
|
|
|
|
: "=r" (_num), "=r"(_arg4) \
|
|
|
|
: "0"(_num), \
|
|
|
|
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
|
2023-07-07 22:54:26 +08:00
|
|
|
: _NOLIBC_SYSCALL_CLOBBERLIST \
|
2022-02-07 17:23:17 +01:00
|
|
|
); \
|
|
|
|
_arg4 ? -_num : _num; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
|
|
|
|
({ \
|
2022-03-29 17:17:30 +07:00
|
|
|
register long _num __asm__ ("v0") = (num); \
|
|
|
|
register long _arg1 __asm__ ("a0") = (long)(arg1); \
|
|
|
|
register long _arg2 __asm__ ("a1") = (long)(arg2); \
|
|
|
|
register long _arg3 __asm__ ("a2") = (long)(arg3); \
|
|
|
|
register long _arg4 __asm__ ("a3") = (long)(arg4); \
|
2022-02-07 17:23:17 +01:00
|
|
|
register long _arg5 = (long)(arg5); \
|
2023-07-07 22:50:34 +08:00
|
|
|
\
|
2023-07-07 22:52:09 +08:00
|
|
|
__asm__ volatile ( \
|
2022-02-07 17:23:17 +01:00
|
|
|
"addiu $sp, $sp, -32\n" \
|
|
|
|
"sw %7, 16($sp)\n" \
|
2023-07-07 22:55:35 +08:00
|
|
|
"syscall\n" \
|
2022-02-07 17:23:17 +01:00
|
|
|
"addiu $sp, $sp, 32\n" \
|
|
|
|
: "=r" (_num), "=r"(_arg4) \
|
|
|
|
: "0"(_num), \
|
|
|
|
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
|
2023-07-07 22:54:26 +08:00
|
|
|
: _NOLIBC_SYSCALL_CLOBBERLIST \
|
2022-02-07 17:23:17 +01:00
|
|
|
); \
|
|
|
|
_arg4 ? -_num : _num; \
|
|
|
|
})
|
|
|
|
|
2023-07-07 22:55:35 +08:00
|
|
|
#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
|
|
|
|
({ \
|
|
|
|
register long _num __asm__ ("v0") = (num); \
|
|
|
|
register long _arg1 __asm__ ("a0") = (long)(arg1); \
|
|
|
|
register long _arg2 __asm__ ("a1") = (long)(arg2); \
|
|
|
|
register long _arg3 __asm__ ("a2") = (long)(arg3); \
|
|
|
|
register long _arg4 __asm__ ("a3") = (long)(arg4); \
|
|
|
|
register long _arg5 = (long)(arg5); \
|
|
|
|
register long _arg6 = (long)(arg6); \
|
|
|
|
\
|
|
|
|
__asm__ volatile ( \
|
|
|
|
"addiu $sp, $sp, -32\n" \
|
|
|
|
"sw %7, 16($sp)\n" \
|
|
|
|
"sw %8, 20($sp)\n" \
|
|
|
|
"syscall\n" \
|
|
|
|
"addiu $sp, $sp, 32\n" \
|
|
|
|
: "=r" (_num), "=r"(_arg4) \
|
|
|
|
: "0"(_num), \
|
|
|
|
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
|
|
|
|
"r"(_arg6) \
|
|
|
|
: _NOLIBC_SYSCALL_CLOBBERLIST \
|
|
|
|
); \
|
|
|
|
_arg4 ? -_num : _num; \
|
|
|
|
})
|
|
|
|
|
2022-02-07 17:23:17 +01:00
|
|
|
/* startup code, note that it's called __start on MIPS */
|
2023-07-16 02:18:54 +08:00
|
|
|
void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector __start(void)
|
tools/nolibc: make compiler and assembler agree on the section around _start
The out-of-block asm() statement carrying _start does not allow the
compiler to know what section the assembly code is being emitted to,
and there's no easy way to push/pop the current section and restore
it. It sometimes causes issues depending on the include files ordering
and compiler optimizations. For example if a variable is declared
immediately before the asm() block and another one after, the compiler
assumes that the current section is still .bss and doesn't re-emit it,
making the second variable appear inside the .text section instead.
Forcing .bss at the end of the _start block doesn't work either because
at certain optimizations the compiler may reorder blocks and will make
some real code appear just after this block.
A significant number of solutions were attempted, but many of them were
still sensitive to section reordering. In the end, the best way to make
sure the compiler and assembler agree on the current section is to place
this code inside a function. Here the function is directly called _start
and configured not to emit a frame-pointer, hence to have no prologue.
If some future architectures would still emit some prologue, another
working approach consists in naming the function differently and placing
the _start label inside the asm statement. But the current solution is
simpler.
It was tested with nolibc-test at -O,-O0,-O2,-O3,-Os for arm,arm64,i386,
mips,riscv,s390 and x86_64.
Signed-off-by: Willy Tarreau <w@1wt.eu>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
2023-01-10 08:24:13 +01:00
|
|
|
{
|
|
|
|
__asm__ volatile (
|
|
|
|
".set push\n"
|
2023-07-16 02:28:55 +08:00
|
|
|
".set noreorder\n"
|
2023-11-08 19:14:43 +01:00
|
|
|
"bal 1f\n" /* prime $ra for .cpload */
|
|
|
|
"nop\n"
|
|
|
|
"1:\n"
|
|
|
|
".cpload $ra\n"
|
2023-07-16 02:28:55 +08:00
|
|
|
"move $a0, $sp\n" /* save stack pointer to $a0, as arg1 of _start_c */
|
2023-11-08 19:14:43 +01:00
|
|
|
"addiu $sp, $sp, -4\n" /* space for .cprestore to store $gp */
|
|
|
|
".cprestore 0\n"
|
2023-07-16 02:28:55 +08:00
|
|
|
"li $t0, -8\n"
|
|
|
|
"and $sp, $sp, $t0\n" /* $sp must be 8-byte aligned */
|
|
|
|
"addiu $sp, $sp, -16\n" /* the callee expects to save a0..a3 there */
|
|
|
|
"jal _start_c\n" /* transfer to c runtime */
|
|
|
|
" nop\n" /* delayed slot */
|
tools/nolibc: make compiler and assembler agree on the section around _start
The out-of-block asm() statement carrying _start does not allow the
compiler to know what section the assembly code is being emitted to,
and there's no easy way to push/pop the current section and restore
it. It sometimes causes issues depending on the include files ordering
and compiler optimizations. For example if a variable is declared
immediately before the asm() block and another one after, the compiler
assumes that the current section is still .bss and doesn't re-emit it,
making the second variable appear inside the .text section instead.
Forcing .bss at the end of the _start block doesn't work either because
at certain optimizations the compiler may reorder blocks and will make
some real code appear just after this block.
A significant number of solutions were attempted, but many of them were
still sensitive to section reordering. In the end, the best way to make
sure the compiler and assembler agree on the current section is to place
this code inside a function. Here the function is directly called _start
and configured not to emit a frame-pointer, hence to have no prologue.
If some future architectures would still emit some prologue, another
working approach consists in naming the function differently and placing
the _start label inside the asm statement. But the current solution is
simpler.
It was tested with nolibc-test at -O,-O0,-O2,-O3,-Os for arm,arm64,i386,
mips,riscv,s390 and x86_64.
Signed-off-by: Willy Tarreau <w@1wt.eu>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
2023-01-10 08:24:13 +01:00
|
|
|
".set pop\n"
|
|
|
|
);
|
|
|
|
__builtin_unreachable();
|
|
|
|
}
|
2022-02-07 17:23:17 +01:00
|
|
|
|
2023-04-06 21:54:49 +00:00
|
|
|
#endif /* _NOLIBC_ARCH_MIPS_H */
|