2019-05-29 07:18:00 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-07-10 18:04:30 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 Regents of the University of California
|
|
|
|
* Copyright (C) 2017 SiFive
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kbuild.h>
|
2021-09-27 17:10:05 +05:30
|
|
|
#include <linux/mm.h>
|
2017-07-10 18:04:30 -07:00
|
|
|
#include <linux/sched.h>
|
2024-04-05 14:24:53 +00:00
|
|
|
#include <linux/ftrace.h>
|
2023-03-30 14:43:21 +08:00
|
|
|
#include <linux/suspend.h>
|
2021-09-27 17:10:05 +05:30
|
|
|
#include <asm/kvm_host.h>
|
2017-07-10 18:04:30 -07:00
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/ptrace.h>
|
2022-01-20 01:09:13 -08:00
|
|
|
#include <asm/cpu_ops_sbi.h>
|
2023-09-27 22:48:00 +00:00
|
|
|
#include <asm/stacktrace.h>
|
2022-02-10 11:19:42 +05:30
|
|
|
#include <asm/suspend.h>
|
2017-07-10 18:04:30 -07:00
|
|
|
|
2020-12-09 00:57:18 +05:30
|
|
|
void asm_offsets(void);
|
|
|
|
|
2017-07-10 18:04:30 -07:00
|
|
|
void asm_offsets(void)
|
|
|
|
{
|
|
|
|
OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
|
|
|
|
OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
|
|
|
|
OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
|
|
|
|
OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
|
|
|
|
OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
|
|
|
|
OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
|
|
|
|
OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
|
|
|
|
OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
|
|
|
|
OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
|
|
|
|
OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);
|
|
|
|
OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]);
|
|
|
|
OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
|
|
|
|
OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
|
|
|
|
OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
|
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings
In 6.5, we removed the vmalloc fault path because that can't work (see
[1] [2]). Then in order to make sure that new page table entries were
seen by the page table walker, we had to preventively emit a sfence.vma
on all harts [3] but this solution is very costly since it relies on IPI.
And even there, we could end up in a loop of vmalloc faults if a vmalloc
allocation is done in the IPI path (for example if it is traced, see
[4]), which could result in a kernel stack overflow.
Those preventive sfence.vma needed to be emitted because:
- if the uarch caches invalid entries, the new mapping may not be
observed by the page table walker and an invalidation may be needed.
- if the uarch does not cache invalid entries, a reordered access
could "miss" the new mapping and traps: in that case, we would actually
only need to retry the access, no sfence.vma is required.
So this patch removes those preventive sfence.vma and actually handles
the possible (and unlikely) exceptions. And since the kernel stacks
mappings lie in the vmalloc area, this handling must be done very early
when the trap is taken, at the very beginning of handle_exception: this
also rules out the vmalloc allocations in the fault path.
Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1]
Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2]
Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3]
Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4]
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com>
Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
|
|
|
|
|
|
|
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
|
2017-07-10 18:04:30 -07:00
|
|
|
OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
|
2019-01-03 11:32:33 +08:00
|
|
|
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
|
2017-07-10 18:04:30 -07:00
|
|
|
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
|
|
|
|
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
|
2023-09-27 22:48:02 +00:00
|
|
|
#ifdef CONFIG_SHADOW_CALL_STACK
|
|
|
|
OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
|
|
|
|
#endif
|
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings
In 6.5, we removed the vmalloc fault path because that can't work (see
[1] [2]). Then in order to make sure that new page table entries were
seen by the page table walker, we had to preventively emit a sfence.vma
on all harts [3] but this solution is very costly since it relies on IPI.
And even there, we could end up in a loop of vmalloc faults if a vmalloc
allocation is done in the IPI path (for example if it is traced, see
[4]), which could result in a kernel stack overflow.
Those preventive sfence.vma needed to be emitted because:
- if the uarch caches invalid entries, the new mapping may not be
observed by the page table walker and an invalidation may be needed.
- if the uarch does not cache invalid entries, a reordered access
could "miss" the new mapping and traps: in that case, we would actually
only need to retry the access, no sfence.vma is required.
So this patch removes those preventive sfence.vma and actually handles
the possible (and unlikely) exceptions. And since the kernel stacks
mappings lie in the vmalloc area, this handling must be done very early
when the trap is taken, at the very beginning of handle_exception: this
also rules out the vmalloc allocations in the fault path.
Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1]
Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2]
Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3]
Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4]
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com>
Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
OFFSET(TASK_TI_A0, task_struct, thread_info.a0);
|
|
|
|
OFFSET(TASK_TI_A1, task_struct, thread_info.a1);
|
|
|
|
OFFSET(TASK_TI_A2, task_struct, thread_info.a2);
|
|
|
|
#endif
|
2017-07-10 18:04:30 -07:00
|
|
|
|
2023-09-27 22:47:59 +00:00
|
|
|
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
|
2017-07-10 18:04:30 -07:00
|
|
|
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
|
|
|
|
OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
|
|
|
|
OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
|
|
|
|
OFFSET(TASK_THREAD_F3, task_struct, thread.fstate.f[3]);
|
|
|
|
OFFSET(TASK_THREAD_F4, task_struct, thread.fstate.f[4]);
|
|
|
|
OFFSET(TASK_THREAD_F5, task_struct, thread.fstate.f[5]);
|
|
|
|
OFFSET(TASK_THREAD_F6, task_struct, thread.fstate.f[6]);
|
|
|
|
OFFSET(TASK_THREAD_F7, task_struct, thread.fstate.f[7]);
|
|
|
|
OFFSET(TASK_THREAD_F8, task_struct, thread.fstate.f[8]);
|
|
|
|
OFFSET(TASK_THREAD_F9, task_struct, thread.fstate.f[9]);
|
|
|
|
OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]);
|
|
|
|
OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]);
|
|
|
|
OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]);
|
|
|
|
OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]);
|
|
|
|
OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]);
|
|
|
|
OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]);
|
|
|
|
OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]);
|
|
|
|
OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]);
|
|
|
|
OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]);
|
|
|
|
OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]);
|
|
|
|
OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]);
|
|
|
|
OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]);
|
|
|
|
OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]);
|
|
|
|
OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]);
|
|
|
|
OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]);
|
|
|
|
OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]);
|
|
|
|
OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]);
|
|
|
|
OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]);
|
|
|
|
OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]);
|
|
|
|
OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]);
|
|
|
|
OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
|
|
|
|
OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
|
|
|
|
OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
|
riscv: Enable per-task stack canaries
This enables the use of per-task stack canary values if GCC has
support for emitting the stack canary reference relative to the
value of tp, which holds the task struct pointer in the riscv
kernel.
After compare arm64 and x86 implementations, seems arm64's is more
flexible and readable. The key point is how gcc get the offset of
stack_canary from gs/el0_sp.
x86: Use a fix offset from gs, not flexible.
struct fixed_percpu_data {
/*
* GCC hardcodes the stack canary as %gs:40. Since the
* irq_stack is the object at %gs:0, we reserve the bottom
* 48 bytes of the irq stack for the canary.
*/
char gs_base[40]; // :(
unsigned long stack_canary;
};
arm64: Use -mstack-protector-guard-offset & guard-reg
gcc options:
-mstack-protector-guard=sysreg
-mstack-protector-guard-reg=sp_el0
-mstack-protector-guard-offset=xxx
riscv: Use -mstack-protector-guard-offset & guard-reg
gcc options:
-mstack-protector-guard=tls
-mstack-protector-guard-reg=tp
-mstack-protector-guard-offset=xxx
GCC's implementation has been merged:
commit c931e8d5a96463427040b0d11f9c4352ac22b2b0
Author: Cooper Qu <cooper.qu@linux.alibaba.com>
Date: Mon Jul 13 16:15:08 2020 +0800
RISC-V: Add support for TLS stack protector canary access
In the end, these codes are inserted by gcc before return:
* 0xffffffe00020b396 <+120>: ld a5,1008(tp) # 0x3f0
* 0xffffffe00020b39a <+124>: xor a5,a5,a4
* 0xffffffe00020b39c <+126>: mv a0,s5
* 0xffffffe00020b39e <+128>: bnez a5,0xffffffe00020b61c <_do_fork+766>
0xffffffe00020b3a2 <+132>: ld ra,136(sp)
0xffffffe00020b3a4 <+134>: ld s0,128(sp)
0xffffffe00020b3a6 <+136>: ld s1,120(sp)
0xffffffe00020b3a8 <+138>: ld s2,112(sp)
0xffffffe00020b3aa <+140>: ld s3,104(sp)
0xffffffe00020b3ac <+142>: ld s4,96(sp)
0xffffffe00020b3ae <+144>: ld s5,88(sp)
0xffffffe00020b3b0 <+146>: ld s6,80(sp)
0xffffffe00020b3b2 <+148>: ld s7,72(sp)
0xffffffe00020b3b4 <+150>: addi sp,sp,144
0xffffffe00020b3b6 <+152>: ret
...
* 0xffffffe00020b61c <+766>: auipc ra,0x7f8
* 0xffffffe00020b620 <+770>: jalr -1764(ra) # 0xffffffe000a02f38 <__stack_chk_fail>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Cooper Qu <cooper.qu@linux.alibaba.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-17 16:29:18 +00:00
|
|
|
#ifdef CONFIG_STACKPROTECTOR
|
|
|
|
OFFSET(TSK_STACK_CANARY, task_struct, stack_canary);
|
|
|
|
#endif
|
2017-07-10 18:04:30 -07:00
|
|
|
|
|
|
|
DEFINE(PT_SIZE, sizeof(struct pt_regs));
|
2019-10-28 13:10:32 +01:00
|
|
|
OFFSET(PT_EPC, pt_regs, epc);
|
2017-07-10 18:04:30 -07:00
|
|
|
OFFSET(PT_RA, pt_regs, ra);
|
|
|
|
OFFSET(PT_FP, pt_regs, s0);
|
|
|
|
OFFSET(PT_S0, pt_regs, s0);
|
|
|
|
OFFSET(PT_S1, pt_regs, s1);
|
|
|
|
OFFSET(PT_S2, pt_regs, s2);
|
|
|
|
OFFSET(PT_S3, pt_regs, s3);
|
|
|
|
OFFSET(PT_S4, pt_regs, s4);
|
|
|
|
OFFSET(PT_S5, pt_regs, s5);
|
|
|
|
OFFSET(PT_S6, pt_regs, s6);
|
|
|
|
OFFSET(PT_S7, pt_regs, s7);
|
|
|
|
OFFSET(PT_S8, pt_regs, s8);
|
|
|
|
OFFSET(PT_S9, pt_regs, s9);
|
|
|
|
OFFSET(PT_S10, pt_regs, s10);
|
|
|
|
OFFSET(PT_S11, pt_regs, s11);
|
|
|
|
OFFSET(PT_SP, pt_regs, sp);
|
|
|
|
OFFSET(PT_TP, pt_regs, tp);
|
|
|
|
OFFSET(PT_A0, pt_regs, a0);
|
|
|
|
OFFSET(PT_A1, pt_regs, a1);
|
|
|
|
OFFSET(PT_A2, pt_regs, a2);
|
|
|
|
OFFSET(PT_A3, pt_regs, a3);
|
|
|
|
OFFSET(PT_A4, pt_regs, a4);
|
|
|
|
OFFSET(PT_A5, pt_regs, a5);
|
|
|
|
OFFSET(PT_A6, pt_regs, a6);
|
|
|
|
OFFSET(PT_A7, pt_regs, a7);
|
|
|
|
OFFSET(PT_T0, pt_regs, t0);
|
|
|
|
OFFSET(PT_T1, pt_regs, t1);
|
|
|
|
OFFSET(PT_T2, pt_regs, t2);
|
|
|
|
OFFSET(PT_T3, pt_regs, t3);
|
|
|
|
OFFSET(PT_T4, pt_regs, t4);
|
|
|
|
OFFSET(PT_T5, pt_regs, t5);
|
|
|
|
OFFSET(PT_T6, pt_regs, t6);
|
|
|
|
OFFSET(PT_GP, pt_regs, gp);
|
|
|
|
OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
|
2019-10-28 13:10:32 +01:00
|
|
|
OFFSET(PT_STATUS, pt_regs, status);
|
|
|
|
OFFSET(PT_BADADDR, pt_regs, badaddr);
|
|
|
|
OFFSET(PT_CAUSE, pt_regs, cause);
|
2017-07-10 18:04:30 -07:00
|
|
|
|
2022-02-10 11:19:42 +05:30
|
|
|
OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
|
|
|
|
|
2023-03-30 14:43:21 +08:00
|
|
|
OFFSET(HIBERN_PBE_ADDR, pbe, address);
|
|
|
|
OFFSET(HIBERN_PBE_ORIG, pbe, orig_address);
|
|
|
|
OFFSET(HIBERN_PBE_NEXT, pbe, next);
|
|
|
|
|
2021-09-27 17:10:05 +05:30
|
|
|
OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_GP, kvm_vcpu_arch, guest_context.gp);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_TP, kvm_vcpu_arch, guest_context.tp);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_T0, kvm_vcpu_arch, guest_context.t0);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_T1, kvm_vcpu_arch, guest_context.t1);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_T2, kvm_vcpu_arch, guest_context.t2);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S0, kvm_vcpu_arch, guest_context.s0);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S1, kvm_vcpu_arch, guest_context.s1);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_A0, kvm_vcpu_arch, guest_context.a0);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_A1, kvm_vcpu_arch, guest_context.a1);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_A2, kvm_vcpu_arch, guest_context.a2);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_A3, kvm_vcpu_arch, guest_context.a3);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_A4, kvm_vcpu_arch, guest_context.a4);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_A5, kvm_vcpu_arch, guest_context.a5);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_A6, kvm_vcpu_arch, guest_context.a6);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_A7, kvm_vcpu_arch, guest_context.a7);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S2, kvm_vcpu_arch, guest_context.s2);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S3, kvm_vcpu_arch, guest_context.s3);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S4, kvm_vcpu_arch, guest_context.s4);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S5, kvm_vcpu_arch, guest_context.s5);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S6, kvm_vcpu_arch, guest_context.s6);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S7, kvm_vcpu_arch, guest_context.s7);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S8, kvm_vcpu_arch, guest_context.s8);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S9, kvm_vcpu_arch, guest_context.s9);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S10, kvm_vcpu_arch, guest_context.s10);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_S11, kvm_vcpu_arch, guest_context.s11);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_T3, kvm_vcpu_arch, guest_context.t3);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_T4, kvm_vcpu_arch, guest_context.t4);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_T5, kvm_vcpu_arch, guest_context.t5);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_T6, kvm_vcpu_arch, guest_context.t6);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_SEPC, kvm_vcpu_arch, guest_context.sepc);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_SSTATUS, kvm_vcpu_arch, guest_context.sstatus);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_HSTATUS, kvm_vcpu_arch, guest_context.hstatus);
|
|
|
|
OFFSET(KVM_ARCH_GUEST_SCOUNTEREN, kvm_vcpu_arch, guest_csr.scounteren);
|
|
|
|
|
|
|
|
OFFSET(KVM_ARCH_HOST_ZERO, kvm_vcpu_arch, host_context.zero);
|
|
|
|
OFFSET(KVM_ARCH_HOST_RA, kvm_vcpu_arch, host_context.ra);
|
|
|
|
OFFSET(KVM_ARCH_HOST_SP, kvm_vcpu_arch, host_context.sp);
|
|
|
|
OFFSET(KVM_ARCH_HOST_GP, kvm_vcpu_arch, host_context.gp);
|
|
|
|
OFFSET(KVM_ARCH_HOST_TP, kvm_vcpu_arch, host_context.tp);
|
|
|
|
OFFSET(KVM_ARCH_HOST_T0, kvm_vcpu_arch, host_context.t0);
|
|
|
|
OFFSET(KVM_ARCH_HOST_T1, kvm_vcpu_arch, host_context.t1);
|
|
|
|
OFFSET(KVM_ARCH_HOST_T2, kvm_vcpu_arch, host_context.t2);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S0, kvm_vcpu_arch, host_context.s0);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S1, kvm_vcpu_arch, host_context.s1);
|
|
|
|
OFFSET(KVM_ARCH_HOST_A0, kvm_vcpu_arch, host_context.a0);
|
|
|
|
OFFSET(KVM_ARCH_HOST_A1, kvm_vcpu_arch, host_context.a1);
|
|
|
|
OFFSET(KVM_ARCH_HOST_A2, kvm_vcpu_arch, host_context.a2);
|
|
|
|
OFFSET(KVM_ARCH_HOST_A3, kvm_vcpu_arch, host_context.a3);
|
|
|
|
OFFSET(KVM_ARCH_HOST_A4, kvm_vcpu_arch, host_context.a4);
|
|
|
|
OFFSET(KVM_ARCH_HOST_A5, kvm_vcpu_arch, host_context.a5);
|
|
|
|
OFFSET(KVM_ARCH_HOST_A6, kvm_vcpu_arch, host_context.a6);
|
|
|
|
OFFSET(KVM_ARCH_HOST_A7, kvm_vcpu_arch, host_context.a7);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S2, kvm_vcpu_arch, host_context.s2);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S3, kvm_vcpu_arch, host_context.s3);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S4, kvm_vcpu_arch, host_context.s4);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S5, kvm_vcpu_arch, host_context.s5);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S6, kvm_vcpu_arch, host_context.s6);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S7, kvm_vcpu_arch, host_context.s7);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S8, kvm_vcpu_arch, host_context.s8);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S9, kvm_vcpu_arch, host_context.s9);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S10, kvm_vcpu_arch, host_context.s10);
|
|
|
|
OFFSET(KVM_ARCH_HOST_S11, kvm_vcpu_arch, host_context.s11);
|
|
|
|
OFFSET(KVM_ARCH_HOST_T3, kvm_vcpu_arch, host_context.t3);
|
|
|
|
OFFSET(KVM_ARCH_HOST_T4, kvm_vcpu_arch, host_context.t4);
|
|
|
|
OFFSET(KVM_ARCH_HOST_T5, kvm_vcpu_arch, host_context.t5);
|
|
|
|
OFFSET(KVM_ARCH_HOST_T6, kvm_vcpu_arch, host_context.t6);
|
|
|
|
OFFSET(KVM_ARCH_HOST_SEPC, kvm_vcpu_arch, host_context.sepc);
|
|
|
|
OFFSET(KVM_ARCH_HOST_SSTATUS, kvm_vcpu_arch, host_context.sstatus);
|
|
|
|
OFFSET(KVM_ARCH_HOST_HSTATUS, kvm_vcpu_arch, host_context.hstatus);
|
|
|
|
OFFSET(KVM_ARCH_HOST_SSCRATCH, kvm_vcpu_arch, host_sscratch);
|
|
|
|
OFFSET(KVM_ARCH_HOST_STVEC, kvm_vcpu_arch, host_stvec);
|
|
|
|
OFFSET(KVM_ARCH_HOST_SCOUNTEREN, kvm_vcpu_arch, host_scounteren);
|
|
|
|
|
2021-09-27 17:10:06 +05:30
|
|
|
OFFSET(KVM_ARCH_TRAP_SEPC, kvm_cpu_trap, sepc);
|
|
|
|
OFFSET(KVM_ARCH_TRAP_SCAUSE, kvm_cpu_trap, scause);
|
|
|
|
OFFSET(KVM_ARCH_TRAP_STVAL, kvm_cpu_trap, stval);
|
|
|
|
OFFSET(KVM_ARCH_TRAP_HTVAL, kvm_cpu_trap, htval);
|
|
|
|
OFFSET(KVM_ARCH_TRAP_HTINST, kvm_cpu_trap, htinst);
|
|
|
|
|
2021-09-27 17:10:12 +05:30
|
|
|
/* F extension */
|
|
|
|
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F0, kvm_cpu_context, fp.f.f[0]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F1, kvm_cpu_context, fp.f.f[1]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F2, kvm_cpu_context, fp.f.f[2]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F3, kvm_cpu_context, fp.f.f[3]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F4, kvm_cpu_context, fp.f.f[4]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F5, kvm_cpu_context, fp.f.f[5]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F6, kvm_cpu_context, fp.f.f[6]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F7, kvm_cpu_context, fp.f.f[7]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F8, kvm_cpu_context, fp.f.f[8]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F9, kvm_cpu_context, fp.f.f[9]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F10, kvm_cpu_context, fp.f.f[10]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F11, kvm_cpu_context, fp.f.f[11]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F12, kvm_cpu_context, fp.f.f[12]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F13, kvm_cpu_context, fp.f.f[13]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F14, kvm_cpu_context, fp.f.f[14]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F15, kvm_cpu_context, fp.f.f[15]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F16, kvm_cpu_context, fp.f.f[16]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F17, kvm_cpu_context, fp.f.f[17]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F18, kvm_cpu_context, fp.f.f[18]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F19, kvm_cpu_context, fp.f.f[19]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F20, kvm_cpu_context, fp.f.f[20]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F21, kvm_cpu_context, fp.f.f[21]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F22, kvm_cpu_context, fp.f.f[22]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F23, kvm_cpu_context, fp.f.f[23]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F24, kvm_cpu_context, fp.f.f[24]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F25, kvm_cpu_context, fp.f.f[25]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F26, kvm_cpu_context, fp.f.f[26]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F27, kvm_cpu_context, fp.f.f[27]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F28, kvm_cpu_context, fp.f.f[28]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F29, kvm_cpu_context, fp.f.f[29]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F30, kvm_cpu_context, fp.f.f[30]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_F31, kvm_cpu_context, fp.f.f[31]);
|
|
|
|
OFFSET(KVM_ARCH_FP_F_FCSR, kvm_cpu_context, fp.f.fcsr);
|
|
|
|
|
|
|
|
/* D extension */
|
|
|
|
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F0, kvm_cpu_context, fp.d.f[0]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F1, kvm_cpu_context, fp.d.f[1]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F2, kvm_cpu_context, fp.d.f[2]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F3, kvm_cpu_context, fp.d.f[3]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F4, kvm_cpu_context, fp.d.f[4]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F5, kvm_cpu_context, fp.d.f[5]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F6, kvm_cpu_context, fp.d.f[6]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F7, kvm_cpu_context, fp.d.f[7]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F8, kvm_cpu_context, fp.d.f[8]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F9, kvm_cpu_context, fp.d.f[9]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F10, kvm_cpu_context, fp.d.f[10]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F11, kvm_cpu_context, fp.d.f[11]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F12, kvm_cpu_context, fp.d.f[12]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F13, kvm_cpu_context, fp.d.f[13]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F14, kvm_cpu_context, fp.d.f[14]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F15, kvm_cpu_context, fp.d.f[15]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F16, kvm_cpu_context, fp.d.f[16]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F17, kvm_cpu_context, fp.d.f[17]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F18, kvm_cpu_context, fp.d.f[18]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F19, kvm_cpu_context, fp.d.f[19]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F20, kvm_cpu_context, fp.d.f[20]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F21, kvm_cpu_context, fp.d.f[21]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F22, kvm_cpu_context, fp.d.f[22]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F23, kvm_cpu_context, fp.d.f[23]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F24, kvm_cpu_context, fp.d.f[24]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F25, kvm_cpu_context, fp.d.f[25]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F26, kvm_cpu_context, fp.d.f[26]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F27, kvm_cpu_context, fp.d.f[27]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F28, kvm_cpu_context, fp.d.f[28]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F29, kvm_cpu_context, fp.d.f[29]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F30, kvm_cpu_context, fp.d.f[30]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_F31, kvm_cpu_context, fp.d.f[31]);
|
|
|
|
OFFSET(KVM_ARCH_FP_D_FCSR, kvm_cpu_context, fp.d.fcsr);
|
|
|
|
|
2017-07-10 18:04:30 -07:00
|
|
|
/*
|
|
|
|
* THREAD_{F,X}* might be larger than a S-type offset can handle, but
|
|
|
|
* these are used in performance-sensitive assembly so we can't resort
|
|
|
|
* to loading the long immediate every time.
|
|
|
|
*/
|
|
|
|
DEFINE(TASK_THREAD_RA_RA,
|
|
|
|
offsetof(struct task_struct, thread.ra)
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_SP_RA,
|
|
|
|
offsetof(struct task_struct, thread.sp)
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S0_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[0])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S1_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[1])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S2_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[2])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S3_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[3])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S4_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[4])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S5_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[5])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S6_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[6])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S7_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[7])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S8_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[8])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S9_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[9])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S10_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[10])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_S11_RA,
|
|
|
|
offsetof(struct task_struct, thread.s[11])
|
|
|
|
- offsetof(struct task_struct, thread.ra)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE(TASK_THREAD_F0_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F1_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[1])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F2_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[2])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F3_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[3])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F4_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[4])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F5_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[5])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F6_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[6])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F7_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[7])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F8_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[8])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F9_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[9])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F10_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[10])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F11_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[11])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F12_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[12])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F13_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[13])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F14_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[14])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F15_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[15])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F16_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[16])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F17_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[17])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F18_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[18])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F19_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[19])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F20_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[20])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F21_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[21])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F22_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[22])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F23_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[23])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F24_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[24])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F25_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[25])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F26_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[26])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F27_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[27])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F28_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[28])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F29_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[29])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F30_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[30])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_F31_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.f[31])
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
DEFINE(TASK_THREAD_FCSR_F0,
|
|
|
|
offsetof(struct task_struct, thread.fstate.fcsr)
|
|
|
|
- offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
|
);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We allocate a pt_regs on the stack when entering the kernel. This
|
|
|
|
* ensures the alignment is sane.
|
|
|
|
*/
|
|
|
|
DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
|
2021-06-17 15:53:07 +02:00
|
|
|
|
|
|
|
OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
|
2022-01-20 01:09:13 -08:00
|
|
|
OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr);
|
|
|
|
OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr);
|
2023-09-27 22:48:00 +00:00
|
|
|
|
|
|
|
DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN));
|
|
|
|
OFFSET(STACKFRAME_FP, stackframe, fp);
|
|
|
|
OFFSET(STACKFRAME_RA, stackframe, ra);
|
2024-04-05 14:24:53 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
|
2024-10-08 19:05:28 -04:00
|
|
|
DEFINE(FREGS_SIZE_ON_STACK, ALIGN(sizeof(struct __arch_ftrace_regs), STACK_ALIGN));
|
|
|
|
DEFINE(FREGS_EPC, offsetof(struct __arch_ftrace_regs, epc));
|
|
|
|
DEFINE(FREGS_RA, offsetof(struct __arch_ftrace_regs, ra));
|
|
|
|
DEFINE(FREGS_SP, offsetof(struct __arch_ftrace_regs, sp));
|
|
|
|
DEFINE(FREGS_S0, offsetof(struct __arch_ftrace_regs, s0));
|
|
|
|
DEFINE(FREGS_T1, offsetof(struct __arch_ftrace_regs, t1));
|
|
|
|
DEFINE(FREGS_A0, offsetof(struct __arch_ftrace_regs, a0));
|
|
|
|
DEFINE(FREGS_A1, offsetof(struct __arch_ftrace_regs, a1));
|
|
|
|
DEFINE(FREGS_A2, offsetof(struct __arch_ftrace_regs, a2));
|
|
|
|
DEFINE(FREGS_A3, offsetof(struct __arch_ftrace_regs, a3));
|
|
|
|
DEFINE(FREGS_A4, offsetof(struct __arch_ftrace_regs, a4));
|
|
|
|
DEFINE(FREGS_A5, offsetof(struct __arch_ftrace_regs, a5));
|
|
|
|
DEFINE(FREGS_A6, offsetof(struct __arch_ftrace_regs, a6));
|
|
|
|
DEFINE(FREGS_A7, offsetof(struct __arch_ftrace_regs, a7));
|
2024-04-05 14:24:53 +00:00
|
|
|
#endif
|
2017-07-10 18:04:30 -07:00
|
|
|
}
|