2019-05-29 07:18:00 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2017-07-10 18:05:09 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 Regents of the University of California
|
2020-03-17 18:11:34 -07:00
|
|
|
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
2017-07-10 18:05:09 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_RISCV_SBI_H
|
|
|
|
#define _ASM_RISCV_SBI_H
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
2022-01-20 01:09:18 -08:00
|
|
|
#include <linux/cpumask.h>
|
2024-08-29 18:50:48 +02:00
|
|
|
#include <linux/jump_label.h>
|
2017-07-10 18:05:09 -07:00
|
|
|
|
2019-10-28 13:10:34 +01:00
|
|
|
#ifdef CONFIG_RISCV_SBI
|
2020-03-17 18:11:35 -07:00
|
|
|
enum sbi_ext_id {
|
2020-03-17 18:11:37 -07:00
|
|
|
#ifdef CONFIG_RISCV_SBI_V01
|
2020-03-17 18:11:35 -07:00
|
|
|
SBI_EXT_0_1_SET_TIMER = 0x0,
|
|
|
|
SBI_EXT_0_1_CONSOLE_PUTCHAR = 0x1,
|
|
|
|
SBI_EXT_0_1_CONSOLE_GETCHAR = 0x2,
|
|
|
|
SBI_EXT_0_1_CLEAR_IPI = 0x3,
|
|
|
|
SBI_EXT_0_1_SEND_IPI = 0x4,
|
|
|
|
SBI_EXT_0_1_REMOTE_FENCE_I = 0x5,
|
|
|
|
SBI_EXT_0_1_REMOTE_SFENCE_VMA = 0x6,
|
|
|
|
SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID = 0x7,
|
|
|
|
SBI_EXT_0_1_SHUTDOWN = 0x8,
|
2020-03-17 18:11:37 -07:00
|
|
|
#endif
|
2020-03-17 18:11:35 -07:00
|
|
|
SBI_EXT_BASE = 0x10,
|
2020-03-17 18:11:36 -07:00
|
|
|
SBI_EXT_TIME = 0x54494D45,
|
|
|
|
SBI_EXT_IPI = 0x735049,
|
|
|
|
SBI_EXT_RFENCE = 0x52464E43,
|
2020-03-17 18:11:42 -07:00
|
|
|
SBI_EXT_HSM = 0x48534D,
|
2021-06-09 17:43:22 +05:30
|
|
|
SBI_EXT_SRST = 0x53525354,
|
2023-12-06 12:08:09 +01:00
|
|
|
SBI_EXT_SUSP = 0x53555350,
|
2022-02-18 16:46:56 -08:00
|
|
|
SBI_EXT_PMU = 0x504D55,
|
2022-07-22 18:54:54 +05:30
|
|
|
SBI_EXT_DBCN = 0x4442434E,
|
2023-12-20 17:00:15 +01:00
|
|
|
SBI_EXT_STA = 0x535441,
|
2024-10-21 01:17:28 +05:30
|
|
|
SBI_EXT_NACL = 0x4E41434C,
|
2025-05-23 12:19:18 +02:00
|
|
|
SBI_EXT_FWFT = 0x46574654,
|
2021-11-18 00:39:10 -08:00
|
|
|
|
|
|
|
/* Experimentals extensions must lie within this range */
|
|
|
|
SBI_EXT_EXPERIMENTAL_START = 0x08000000,
|
|
|
|
SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF,
|
|
|
|
|
|
|
|
/* Vendor extensions must lie within this range */
|
|
|
|
SBI_EXT_VENDOR_START = 0x09000000,
|
|
|
|
SBI_EXT_VENDOR_END = 0x09FFFFFF,
|
2020-03-17 18:11:35 -07:00
|
|
|
};
|
2017-07-10 18:05:09 -07:00
|
|
|
|
2020-03-17 18:11:35 -07:00
|
|
|
enum sbi_ext_base_fid {
|
|
|
|
SBI_EXT_BASE_GET_SPEC_VERSION = 0,
|
|
|
|
SBI_EXT_BASE_GET_IMP_ID,
|
|
|
|
SBI_EXT_BASE_GET_IMP_VERSION,
|
|
|
|
SBI_EXT_BASE_PROBE_EXT,
|
|
|
|
SBI_EXT_BASE_GET_MVENDORID,
|
|
|
|
SBI_EXT_BASE_GET_MARCHID,
|
|
|
|
SBI_EXT_BASE_GET_MIMPID,
|
|
|
|
};
|
2017-07-10 18:05:09 -07:00
|
|
|
|
2020-03-17 18:11:36 -07:00
|
|
|
enum sbi_ext_time_fid {
|
|
|
|
SBI_EXT_TIME_SET_TIMER = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum sbi_ext_ipi_fid {
|
|
|
|
SBI_EXT_IPI_SEND_IPI = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum sbi_ext_rfence_fid {
|
|
|
|
SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
|
|
|
|
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
|
|
|
|
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
|
|
|
|
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
|
2021-03-06 06:48:01 +01:00
|
|
|
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
2020-03-17 18:11:36 -07:00
|
|
|
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
|
2021-03-06 06:48:01 +01:00
|
|
|
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
2020-03-17 18:11:36 -07:00
|
|
|
};
|
|
|
|
|
2020-03-17 18:11:42 -07:00
|
|
|
enum sbi_ext_hsm_fid {
|
|
|
|
SBI_EXT_HSM_HART_START = 0,
|
|
|
|
SBI_EXT_HSM_HART_STOP,
|
|
|
|
SBI_EXT_HSM_HART_STATUS,
|
2021-02-15 10:43:39 +05:30
|
|
|
SBI_EXT_HSM_HART_SUSPEND,
|
2020-03-17 18:11:42 -07:00
|
|
|
};
|
|
|
|
|
2021-02-15 10:43:39 +05:30
|
|
|
enum sbi_hsm_hart_state {
|
|
|
|
SBI_HSM_STATE_STARTED = 0,
|
|
|
|
SBI_HSM_STATE_STOPPED,
|
|
|
|
SBI_HSM_STATE_START_PENDING,
|
|
|
|
SBI_HSM_STATE_STOP_PENDING,
|
|
|
|
SBI_HSM_STATE_SUSPENDED,
|
|
|
|
SBI_HSM_STATE_SUSPEND_PENDING,
|
|
|
|
SBI_HSM_STATE_RESUME_PENDING,
|
2020-03-17 18:11:42 -07:00
|
|
|
};
|
|
|
|
|
2021-02-15 10:43:39 +05:30
|
|
|
#define SBI_HSM_SUSP_BASE_MASK 0x7fffffff
|
|
|
|
#define SBI_HSM_SUSP_NON_RET_BIT 0x80000000
|
|
|
|
#define SBI_HSM_SUSP_PLAT_BASE 0x10000000
|
|
|
|
|
|
|
|
#define SBI_HSM_SUSPEND_RET_DEFAULT 0x00000000
|
|
|
|
#define SBI_HSM_SUSPEND_RET_PLATFORM SBI_HSM_SUSP_PLAT_BASE
|
|
|
|
#define SBI_HSM_SUSPEND_RET_LAST SBI_HSM_SUSP_BASE_MASK
|
|
|
|
#define SBI_HSM_SUSPEND_NON_RET_DEFAULT SBI_HSM_SUSP_NON_RET_BIT
|
|
|
|
#define SBI_HSM_SUSPEND_NON_RET_PLATFORM (SBI_HSM_SUSP_NON_RET_BIT | \
|
|
|
|
SBI_HSM_SUSP_PLAT_BASE)
|
|
|
|
#define SBI_HSM_SUSPEND_NON_RET_LAST (SBI_HSM_SUSP_NON_RET_BIT | \
|
|
|
|
SBI_HSM_SUSP_BASE_MASK)
|
|
|
|
|
2021-06-09 17:43:22 +05:30
|
|
|
enum sbi_ext_srst_fid {
|
|
|
|
SBI_EXT_SRST_RESET = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum sbi_srst_reset_type {
|
|
|
|
SBI_SRST_RESET_TYPE_SHUTDOWN = 0,
|
|
|
|
SBI_SRST_RESET_TYPE_COLD_REBOOT,
|
|
|
|
SBI_SRST_RESET_TYPE_WARM_REBOOT,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum sbi_srst_reset_reason {
|
|
|
|
SBI_SRST_RESET_REASON_NONE = 0,
|
|
|
|
SBI_SRST_RESET_REASON_SYS_FAILURE,
|
|
|
|
};
|
|
|
|
|
2023-12-06 12:08:09 +01:00
|
|
|
enum sbi_ext_susp_fid {
|
|
|
|
SBI_EXT_SUSP_SYSTEM_SUSPEND = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum sbi_ext_susp_sleep_type {
|
|
|
|
SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0,
|
|
|
|
};
|
|
|
|
|
2022-02-18 16:46:56 -08:00
|
|
|
enum sbi_ext_pmu_fid {
|
|
|
|
SBI_EXT_PMU_NUM_COUNTERS = 0,
|
|
|
|
SBI_EXT_PMU_COUNTER_GET_INFO,
|
|
|
|
SBI_EXT_PMU_COUNTER_CFG_MATCH,
|
|
|
|
SBI_EXT_PMU_COUNTER_START,
|
|
|
|
SBI_EXT_PMU_COUNTER_STOP,
|
|
|
|
SBI_EXT_PMU_COUNTER_FW_READ,
|
2024-04-20 08:17:18 -07:00
|
|
|
SBI_EXT_PMU_COUNTER_FW_READ_HI,
|
2024-04-20 08:17:21 -07:00
|
|
|
SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
|
2022-02-18 16:46:56 -08:00
|
|
|
};
|
|
|
|
|
2022-07-11 10:46:31 -07:00
|
|
|
union sbi_pmu_ctr_info {
|
|
|
|
unsigned long value;
|
|
|
|
struct {
|
|
|
|
unsigned long csr:12;
|
|
|
|
unsigned long width:6;
|
|
|
|
#if __riscv_xlen == 32
|
|
|
|
unsigned long reserved:13;
|
|
|
|
#else
|
|
|
|
unsigned long reserved:45;
|
|
|
|
#endif
|
|
|
|
unsigned long type:1;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2024-04-20 08:17:21 -07:00
|
|
|
/* Data structure to contain the pmu snapshot data */
|
|
|
|
struct riscv_pmu_snapshot_data {
|
|
|
|
u64 ctr_overflow_mask;
|
|
|
|
u64 ctr_values[64];
|
|
|
|
u64 reserved[447];
|
|
|
|
};
|
|
|
|
|
2022-07-11 10:46:32 -07:00
|
|
|
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
|
2024-12-12 16:09:32 -08:00
|
|
|
#define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
|
2022-02-18 16:46:56 -08:00
|
|
|
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
|
2024-08-12 05:11:09 +00:00
|
|
|
#define RISCV_PLAT_FW_EVENT 0xFFFF
|
2022-02-18 16:46:56 -08:00
|
|
|
|
|
|
|
/** General pmu event codes specified in SBI PMU extension */
|
|
|
|
enum sbi_pmu_hw_generic_events_t {
|
|
|
|
SBI_PMU_HW_NO_EVENT = 0,
|
|
|
|
SBI_PMU_HW_CPU_CYCLES = 1,
|
|
|
|
SBI_PMU_HW_INSTRUCTIONS = 2,
|
|
|
|
SBI_PMU_HW_CACHE_REFERENCES = 3,
|
|
|
|
SBI_PMU_HW_CACHE_MISSES = 4,
|
|
|
|
SBI_PMU_HW_BRANCH_INSTRUCTIONS = 5,
|
|
|
|
SBI_PMU_HW_BRANCH_MISSES = 6,
|
|
|
|
SBI_PMU_HW_BUS_CYCLES = 7,
|
|
|
|
SBI_PMU_HW_STALLED_CYCLES_FRONTEND = 8,
|
|
|
|
SBI_PMU_HW_STALLED_CYCLES_BACKEND = 9,
|
|
|
|
SBI_PMU_HW_REF_CPU_CYCLES = 10,
|
|
|
|
|
|
|
|
SBI_PMU_HW_GENERAL_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Special "firmware" events provided by the firmware, even if the hardware
|
|
|
|
* does not support performance events. These events are encoded as a raw
|
|
|
|
* event type in Linux kernel perf framework.
|
|
|
|
*/
|
|
|
|
enum sbi_pmu_fw_generic_events_t {
|
|
|
|
SBI_PMU_FW_MISALIGNED_LOAD = 0,
|
|
|
|
SBI_PMU_FW_MISALIGNED_STORE = 1,
|
|
|
|
SBI_PMU_FW_ACCESS_LOAD = 2,
|
|
|
|
SBI_PMU_FW_ACCESS_STORE = 3,
|
|
|
|
SBI_PMU_FW_ILLEGAL_INSN = 4,
|
|
|
|
SBI_PMU_FW_SET_TIMER = 5,
|
|
|
|
SBI_PMU_FW_IPI_SENT = 6,
|
2023-02-04 17:15:04 -08:00
|
|
|
SBI_PMU_FW_IPI_RCVD = 7,
|
2022-02-18 16:46:56 -08:00
|
|
|
SBI_PMU_FW_FENCE_I_SENT = 8,
|
2023-02-04 17:15:04 -08:00
|
|
|
SBI_PMU_FW_FENCE_I_RCVD = 9,
|
2022-02-18 16:46:56 -08:00
|
|
|
SBI_PMU_FW_SFENCE_VMA_SENT = 10,
|
|
|
|
SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
|
|
|
|
SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
|
|
|
|
SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13,
|
|
|
|
|
|
|
|
SBI_PMU_FW_HFENCE_GVMA_SENT = 14,
|
|
|
|
SBI_PMU_FW_HFENCE_GVMA_RCVD = 15,
|
|
|
|
SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16,
|
|
|
|
SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17,
|
|
|
|
|
|
|
|
SBI_PMU_FW_HFENCE_VVMA_SENT = 18,
|
|
|
|
SBI_PMU_FW_HFENCE_VVMA_RCVD = 19,
|
|
|
|
SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20,
|
|
|
|
SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21,
|
|
|
|
SBI_PMU_FW_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* SBI PMU event types */
|
|
|
|
enum sbi_pmu_event_type {
|
|
|
|
SBI_PMU_EVENT_TYPE_HW = 0x0,
|
|
|
|
SBI_PMU_EVENT_TYPE_CACHE = 0x1,
|
|
|
|
SBI_PMU_EVENT_TYPE_RAW = 0x2,
|
|
|
|
SBI_PMU_EVENT_TYPE_FW = 0xf,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* SBI PMU event types */
|
|
|
|
enum sbi_pmu_ctr_type {
|
|
|
|
SBI_PMU_CTR_TYPE_HW = 0x0,
|
|
|
|
SBI_PMU_CTR_TYPE_FW,
|
|
|
|
};
|
|
|
|
|
2022-07-11 10:46:32 -07:00
|
|
|
/* Helper macros to decode event idx */
|
|
|
|
#define SBI_PMU_EVENT_IDX_OFFSET 20
|
|
|
|
#define SBI_PMU_EVENT_IDX_MASK 0xFFFFF
|
|
|
|
#define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF
|
|
|
|
#define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000
|
|
|
|
#define SBI_PMU_EVENT_RAW_IDX 0x20000
|
|
|
|
#define SBI_PMU_FIXED_CTR_MASK 0x07
|
|
|
|
|
|
|
|
#define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8
|
|
|
|
#define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06
|
|
|
|
#define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01
|
|
|
|
|
2023-02-04 17:15:04 -08:00
|
|
|
#define SBI_PMU_EVENT_CACHE_ID_SHIFT 3
|
|
|
|
#define SBI_PMU_EVENT_CACHE_OP_SHIFT 1
|
|
|
|
|
2022-07-11 10:46:32 -07:00
|
|
|
#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
|
|
|
|
|
2022-02-18 16:46:56 -08:00
|
|
|
/* Flags defined for config matching function */
|
2024-04-20 08:17:20 -07:00
|
|
|
#define SBI_PMU_CFG_FLAG_SKIP_MATCH BIT(0)
|
|
|
|
#define SBI_PMU_CFG_FLAG_CLEAR_VALUE BIT(1)
|
|
|
|
#define SBI_PMU_CFG_FLAG_AUTO_START BIT(2)
|
|
|
|
#define SBI_PMU_CFG_FLAG_SET_VUINH BIT(3)
|
|
|
|
#define SBI_PMU_CFG_FLAG_SET_VSINH BIT(4)
|
|
|
|
#define SBI_PMU_CFG_FLAG_SET_UINH BIT(5)
|
|
|
|
#define SBI_PMU_CFG_FLAG_SET_SINH BIT(6)
|
|
|
|
#define SBI_PMU_CFG_FLAG_SET_MINH BIT(7)
|
2022-02-18 16:46:56 -08:00
|
|
|
|
|
|
|
/* Flags defined for counter start function */
|
2024-04-20 08:17:20 -07:00
|
|
|
#define SBI_PMU_START_FLAG_SET_INIT_VALUE BIT(0)
|
2024-04-20 08:17:21 -07:00
|
|
|
#define SBI_PMU_START_FLAG_INIT_SNAPSHOT BIT(1)
|
2022-02-18 16:46:56 -08:00
|
|
|
|
|
|
|
/* Flags defined for counter stop function */
|
2024-04-20 08:17:20 -07:00
|
|
|
#define SBI_PMU_STOP_FLAG_RESET BIT(0)
|
2024-04-20 08:17:21 -07:00
|
|
|
#define SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT BIT(1)
|
2022-02-18 16:46:56 -08:00
|
|
|
|
2022-07-22 18:54:54 +05:30
|
|
|
enum sbi_ext_dbcn_fid {
|
|
|
|
SBI_EXT_DBCN_CONSOLE_WRITE = 0,
|
|
|
|
SBI_EXT_DBCN_CONSOLE_READ = 1,
|
|
|
|
SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2,
|
|
|
|
};
|
|
|
|
|
2023-12-20 17:00:15 +01:00
|
|
|
/* SBI STA (steal-time accounting) extension */
|
|
|
|
enum sbi_ext_sta_fid {
|
|
|
|
SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct sbi_sta_struct {
|
|
|
|
__le32 sequence;
|
|
|
|
__le32 flags;
|
|
|
|
__le64 steal;
|
|
|
|
u8 preempted;
|
|
|
|
u8 pad[47];
|
|
|
|
} __packed;
|
|
|
|
|
2024-04-20 08:17:22 -07:00
|
|
|
#define SBI_SHMEM_DISABLE -1
|
2023-12-20 17:00:15 +01:00
|
|
|
|
2024-10-21 01:17:28 +05:30
|
|
|
enum sbi_ext_nacl_fid {
|
|
|
|
SBI_EXT_NACL_PROBE_FEATURE = 0x0,
|
|
|
|
SBI_EXT_NACL_SET_SHMEM = 0x1,
|
|
|
|
SBI_EXT_NACL_SYNC_CSR = 0x2,
|
|
|
|
SBI_EXT_NACL_SYNC_HFENCE = 0x3,
|
|
|
|
SBI_EXT_NACL_SYNC_SRET = 0x4,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum sbi_ext_nacl_feature {
|
|
|
|
SBI_NACL_FEAT_SYNC_CSR = 0x0,
|
|
|
|
SBI_NACL_FEAT_SYNC_HFENCE = 0x1,
|
|
|
|
SBI_NACL_FEAT_SYNC_SRET = 0x2,
|
|
|
|
SBI_NACL_FEAT_AUTOSWAP_CSR = 0x3,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_ADDR_SHIFT 12
|
|
|
|
#define SBI_NACL_SHMEM_SCRATCH_OFFSET 0x0000
|
|
|
|
#define SBI_NACL_SHMEM_SCRATCH_SIZE 0x1000
|
|
|
|
#define SBI_NACL_SHMEM_SRET_OFFSET 0x0000
|
|
|
|
#define SBI_NACL_SHMEM_SRET_SIZE 0x0200
|
|
|
|
#define SBI_NACL_SHMEM_AUTOSWAP_OFFSET (SBI_NACL_SHMEM_SRET_OFFSET + \
|
|
|
|
SBI_NACL_SHMEM_SRET_SIZE)
|
|
|
|
#define SBI_NACL_SHMEM_AUTOSWAP_SIZE 0x0080
|
|
|
|
#define SBI_NACL_SHMEM_UNUSED_OFFSET (SBI_NACL_SHMEM_AUTOSWAP_OFFSET + \
|
|
|
|
SBI_NACL_SHMEM_AUTOSWAP_SIZE)
|
|
|
|
#define SBI_NACL_SHMEM_UNUSED_SIZE 0x0580
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_OFFSET (SBI_NACL_SHMEM_UNUSED_OFFSET + \
|
|
|
|
SBI_NACL_SHMEM_UNUSED_SIZE)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_SIZE 0x0780
|
|
|
|
#define SBI_NACL_SHMEM_DBITMAP_OFFSET (SBI_NACL_SHMEM_HFENCE_OFFSET + \
|
|
|
|
SBI_NACL_SHMEM_HFENCE_SIZE)
|
|
|
|
#define SBI_NACL_SHMEM_DBITMAP_SIZE 0x0080
|
|
|
|
#define SBI_NACL_SHMEM_CSR_OFFSET (SBI_NACL_SHMEM_DBITMAP_OFFSET + \
|
|
|
|
SBI_NACL_SHMEM_DBITMAP_SIZE)
|
|
|
|
#define SBI_NACL_SHMEM_CSR_SIZE ((__riscv_xlen / 8) * 1024)
|
|
|
|
#define SBI_NACL_SHMEM_SIZE (SBI_NACL_SHMEM_CSR_OFFSET + \
|
|
|
|
SBI_NACL_SHMEM_CSR_SIZE)
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_CSR_INDEX(__csr_num) \
|
|
|
|
((((__csr_num) & 0xc00) >> 2) | ((__csr_num) & 0xff))
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_ENTRY_SZ ((__riscv_xlen / 8) * 4)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_ENTRY_MAX \
|
|
|
|
(SBI_NACL_SHMEM_HFENCE_SIZE / \
|
|
|
|
SBI_NACL_SHMEM_HFENCE_ENTRY_SZ)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_ENTRY(__num) \
|
|
|
|
(SBI_NACL_SHMEM_HFENCE_OFFSET + \
|
|
|
|
(__num) * SBI_NACL_SHMEM_HFENCE_ENTRY_SZ)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(__num) \
|
|
|
|
SBI_NACL_SHMEM_HFENCE_ENTRY(__num)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(__num)\
|
|
|
|
(SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + (__riscv_xlen / 8))
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(__num)\
|
|
|
|
(SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + \
|
|
|
|
((__riscv_xlen / 8) * 3))
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS 1
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT \
|
|
|
|
(__riscv_xlen - SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK \
|
|
|
|
((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) - 1)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND \
|
|
|
|
(SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK << \
|
|
|
|
SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT)
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS 3
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT \
|
|
|
|
(SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT - \
|
|
|
|
SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS)
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS 4
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT \
|
|
|
|
(SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT - \
|
|
|
|
SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK \
|
|
|
|
((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) - 1)
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA 0x0
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL 0x1
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID 0x2
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL 0x3
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA 0x4
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL 0x5
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID 0x6
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL 0x7
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS 1
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT \
|
|
|
|
(SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT - \
|
|
|
|
SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS)
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS 7
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT \
|
|
|
|
(SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT - \
|
|
|
|
SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK \
|
|
|
|
((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) - 1)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_ORDER_BASE 12
|
|
|
|
|
|
|
|
#if __riscv_xlen == 32
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 9
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 7
|
|
|
|
#else
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 16
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 14
|
|
|
|
#endif
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT \
|
|
|
|
SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK \
|
|
|
|
((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS) - 1)
|
|
|
|
#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK \
|
|
|
|
((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS) - 1)
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS BIT(0)
|
|
|
|
#define SBI_NACL_SHMEM_AUTOSWAP_HSTATUS ((__riscv_xlen / 8) * 1)
|
|
|
|
|
|
|
|
#define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i))
|
|
|
|
#define SBI_NACL_SHMEM_SRET_X_LAST 31
|
|
|
|
|
2025-05-23 12:19:18 +02:00
|
|
|
/* SBI function IDs for FW feature extension */
|
|
|
|
#define SBI_EXT_FWFT_SET 0x0
|
|
|
|
#define SBI_EXT_FWFT_GET 0x1
|
|
|
|
|
|
|
|
enum sbi_fwft_feature_t {
|
|
|
|
SBI_FWFT_MISALIGNED_EXC_DELEG = 0x0,
|
|
|
|
SBI_FWFT_LANDING_PAD = 0x1,
|
|
|
|
SBI_FWFT_SHADOW_STACK = 0x2,
|
|
|
|
SBI_FWFT_DOUBLE_TRAP = 0x3,
|
|
|
|
SBI_FWFT_PTE_AD_HW_UPDATING = 0x4,
|
|
|
|
SBI_FWFT_POINTER_MASKING_PMLEN = 0x5,
|
|
|
|
SBI_FWFT_LOCAL_RESERVED_START = 0x6,
|
|
|
|
SBI_FWFT_LOCAL_RESERVED_END = 0x3fffffff,
|
|
|
|
SBI_FWFT_LOCAL_PLATFORM_START = 0x40000000,
|
|
|
|
SBI_FWFT_LOCAL_PLATFORM_END = 0x7fffffff,
|
|
|
|
|
|
|
|
SBI_FWFT_GLOBAL_RESERVED_START = 0x80000000,
|
|
|
|
SBI_FWFT_GLOBAL_RESERVED_END = 0xbfffffff,
|
|
|
|
SBI_FWFT_GLOBAL_PLATFORM_START = 0xc0000000,
|
|
|
|
SBI_FWFT_GLOBAL_PLATFORM_END = 0xffffffff,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SBI_FWFT_PLATFORM_FEATURE_BIT BIT(30)
|
|
|
|
#define SBI_FWFT_GLOBAL_FEATURE_BIT BIT(31)
|
|
|
|
|
|
|
|
#define SBI_FWFT_SET_FLAG_LOCK BIT(0)
|
|
|
|
|
2023-12-20 17:00:15 +01:00
|
|
|
/* SBI spec version fields */
|
2020-03-17 18:11:35 -07:00
|
|
|
#define SBI_SPEC_VERSION_DEFAULT 0x1
|
|
|
|
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
|
|
|
|
#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
|
|
|
|
#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
|
2017-07-10 18:05:09 -07:00
|
|
|
|
2020-03-17 18:11:35 -07:00
|
|
|
/* SBI return error codes */
|
|
|
|
#define SBI_SUCCESS 0
|
|
|
|
#define SBI_ERR_FAILURE -1
|
|
|
|
#define SBI_ERR_NOT_SUPPORTED -2
|
|
|
|
#define SBI_ERR_INVALID_PARAM -3
|
|
|
|
#define SBI_ERR_DENIED -4
|
|
|
|
#define SBI_ERR_INVALID_ADDRESS -5
|
2021-11-18 00:39:12 -08:00
|
|
|
#define SBI_ERR_ALREADY_AVAILABLE -6
|
2022-02-18 16:46:56 -08:00
|
|
|
#define SBI_ERR_ALREADY_STARTED -7
|
|
|
|
#define SBI_ERR_ALREADY_STOPPED -8
|
2024-04-20 08:17:21 -07:00
|
|
|
#define SBI_ERR_NO_SHMEM -9
|
2025-05-23 12:19:18 +02:00
|
|
|
#define SBI_ERR_INVALID_STATE -10
|
|
|
|
#define SBI_ERR_BAD_RANGE -11
|
|
|
|
#define SBI_ERR_TIMEOUT -12
|
|
|
|
#define SBI_ERR_IO -13
|
|
|
|
#define SBI_ERR_DENIED_LOCKED -14
|
2017-07-10 18:05:09 -07:00
|
|
|
|
2020-03-17 18:11:35 -07:00
|
|
|
extern unsigned long sbi_spec_version;
|
|
|
|
struct sbiret {
|
|
|
|
long error;
|
|
|
|
long value;
|
|
|
|
};
|
2017-07-10 18:05:09 -07:00
|
|
|
|
2020-11-26 10:40:38 +08:00
|
|
|
void sbi_init(void);
|
2024-08-29 18:50:48 +02:00
|
|
|
long __sbi_base_ecall(int fid);
|
riscv: Improve sbi_ecall() code generation by reordering arguments
The sbi_ecall() function arguments are not in the same order as the
ecall arguments, so we end up re-ordering the registers before the
ecall which is useless and costly.
So simply reorder the arguments in the same way as expected by ecall.
Instead of reordering directly the arguments of sbi_ecall(), use a proxy
macro since the current ordering is more natural.
Before:
Dump of assembler code for function sbi_ecall:
0xffffffff800085e0 <+0>: add sp,sp,-32
0xffffffff800085e2 <+2>: sd s0,24(sp)
0xffffffff800085e4 <+4>: mv t1,a0
0xffffffff800085e6 <+6>: add s0,sp,32
0xffffffff800085e8 <+8>: mv t3,a1
0xffffffff800085ea <+10>: mv a0,a2
0xffffffff800085ec <+12>: mv a1,a3
0xffffffff800085ee <+14>: mv a2,a4
0xffffffff800085f0 <+16>: mv a3,a5
0xffffffff800085f2 <+18>: mv a4,a6
0xffffffff800085f4 <+20>: mv a5,a7
0xffffffff800085f6 <+22>: mv a6,t3
0xffffffff800085f8 <+24>: mv a7,t1
0xffffffff800085fa <+26>: ecall
0xffffffff800085fe <+30>: ld s0,24(sp)
0xffffffff80008600 <+32>: add sp,sp,32
0xffffffff80008602 <+34>: ret
After:
Dump of assembler code for function __sbi_ecall:
0xffffffff8000b6b2 <+0>: add sp,sp,-32
0xffffffff8000b6b4 <+2>: sd s0,24(sp)
0xffffffff8000b6b6 <+4>: add s0,sp,32
0xffffffff8000b6b8 <+6>: ecall
0xffffffff8000b6bc <+10>: ld s0,24(sp)
0xffffffff8000b6be <+12>: add sp,sp,32
0xffffffff8000b6c0 <+14>: ret
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com>
Link: https://lore.kernel.org/r/20240322112629.68170-1-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-03-22 12:26:29 +01:00
|
|
|
struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
|
|
|
|
unsigned long arg2, unsigned long arg3,
|
|
|
|
unsigned long arg4, unsigned long arg5,
|
|
|
|
int fid, int ext);
|
|
|
|
#define sbi_ecall(e, f, a0, a1, a2, a3, a4, a5) \
|
|
|
|
__sbi_ecall(a0, a1, a2, a3, a4, a5, f, e)
|
2017-07-10 18:05:09 -07:00
|
|
|
|
2023-11-24 12:39:01 +05:30
|
|
|
#ifdef CONFIG_RISCV_SBI_V01
|
2020-03-17 18:11:35 -07:00
|
|
|
void sbi_console_putchar(int ch);
|
|
|
|
int sbi_console_getchar(void);
|
2023-11-24 12:39:01 +05:30
|
|
|
#else
|
|
|
|
static inline void sbi_console_putchar(int ch) { }
|
|
|
|
static inline int sbi_console_getchar(void) { return -ENOENT; }
|
|
|
|
#endif
|
2021-03-22 22:26:02 +08:00
|
|
|
long sbi_get_mvendorid(void);
|
|
|
|
long sbi_get_marchid(void);
|
|
|
|
long sbi_get_mimpid(void);
|
2020-03-17 18:11:35 -07:00
|
|
|
void sbi_set_timer(uint64_t stime_value);
|
|
|
|
void sbi_shutdown(void);
|
2023-03-28 09:22:19 +05:30
|
|
|
void sbi_send_ipi(unsigned int cpu);
|
2022-01-20 01:09:18 -08:00
|
|
|
int sbi_remote_fence_i(const struct cpumask *cpu_mask);
|
2017-07-10 18:05:09 -07:00
|
|
|
|
2022-01-20 01:09:18 -08:00
|
|
|
int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
|
2020-03-17 18:11:35 -07:00
|
|
|
unsigned long start,
|
|
|
|
unsigned long size,
|
|
|
|
unsigned long asid);
|
2022-01-20 01:09:18 -08:00
|
|
|
int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
|
2020-03-17 18:11:38 -07:00
|
|
|
unsigned long start,
|
|
|
|
unsigned long size);
|
2022-01-20 01:09:18 -08:00
|
|
|
int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
|
2020-03-17 18:11:38 -07:00
|
|
|
unsigned long start,
|
|
|
|
unsigned long size,
|
|
|
|
unsigned long vmid);
|
2022-01-20 01:09:18 -08:00
|
|
|
int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
|
2020-03-17 18:11:38 -07:00
|
|
|
unsigned long start,
|
|
|
|
unsigned long size);
|
2022-01-20 01:09:18 -08:00
|
|
|
int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
|
2020-03-17 18:11:38 -07:00
|
|
|
unsigned long start,
|
|
|
|
unsigned long size,
|
|
|
|
unsigned long asid);
|
2023-04-27 18:36:26 +02:00
|
|
|
long sbi_probe_extension(int ext);
|
2017-07-10 18:05:09 -07:00
|
|
|
|
2025-05-23 12:19:21 +02:00
|
|
|
int sbi_fwft_set(u32 feature, unsigned long value, unsigned long flags);
|
|
|
|
int sbi_fwft_set_cpumask(const cpumask_t *mask, u32 feature,
|
|
|
|
unsigned long value, unsigned long flags);
|
|
|
|
/**
|
|
|
|
* sbi_fwft_set_online_cpus() - Set a feature on all online cpus
|
|
|
|
* @feature: The feature to be set
|
|
|
|
* @value: The feature value to be set
|
|
|
|
* @flags: FWFT feature set flags
|
|
|
|
*
|
|
|
|
* Return: 0 on success, appropriate linux error code otherwise.
|
|
|
|
*/
|
|
|
|
static inline int sbi_fwft_set_online_cpus(u32 feature, unsigned long value,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
return sbi_fwft_set_cpumask(cpu_online_mask, feature, value, flags);
|
|
|
|
}
|
|
|
|
|
2020-03-17 18:11:35 -07:00
|
|
|
/* Check if current SBI specification version is 0.1 or not */
|
|
|
|
static inline int sbi_spec_is_0_1(void)
|
2017-07-10 18:05:09 -07:00
|
|
|
{
|
2020-03-17 18:11:35 -07:00
|
|
|
return (sbi_spec_version == SBI_SPEC_VERSION_DEFAULT) ? 1 : 0;
|
2017-07-10 18:05:09 -07:00
|
|
|
}
|
|
|
|
|
2020-03-17 18:11:35 -07:00
|
|
|
/* Get the major version of SBI */
|
|
|
|
static inline unsigned long sbi_major_version(void)
|
2017-07-10 18:05:09 -07:00
|
|
|
{
|
2020-03-17 18:11:35 -07:00
|
|
|
return (sbi_spec_version >> SBI_SPEC_VERSION_MAJOR_SHIFT) &
|
|
|
|
SBI_SPEC_VERSION_MAJOR_MASK;
|
2017-07-10 18:05:09 -07:00
|
|
|
}
|
|
|
|
|
2020-03-17 18:11:35 -07:00
|
|
|
/* Get the minor version of SBI */
|
|
|
|
static inline unsigned long sbi_minor_version(void)
|
2017-07-10 18:05:09 -07:00
|
|
|
{
|
2020-03-17 18:11:35 -07:00
|
|
|
return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK;
|
2017-07-10 18:05:09 -07:00
|
|
|
}
|
2020-03-17 18:11:41 -07:00
|
|
|
|
2021-06-09 17:43:22 +05:30
|
|
|
/* Make SBI version */
|
|
|
|
static inline unsigned long sbi_mk_version(unsigned long major,
|
|
|
|
unsigned long minor)
|
|
|
|
{
|
2024-04-20 08:17:23 -07:00
|
|
|
return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << SBI_SPEC_VERSION_MAJOR_SHIFT)
|
|
|
|
| (minor & SBI_SPEC_VERSION_MINOR_MASK);
|
2021-06-09 17:43:22 +05:30
|
|
|
}
|
|
|
|
|
2024-08-29 18:50:48 +02:00
|
|
|
static inline int sbi_err_map_linux_errno(int err)
|
|
|
|
{
|
|
|
|
switch (err) {
|
|
|
|
case SBI_SUCCESS:
|
|
|
|
return 0;
|
|
|
|
case SBI_ERR_DENIED:
|
2025-05-23 12:19:20 +02:00
|
|
|
case SBI_ERR_DENIED_LOCKED:
|
2024-08-29 18:50:48 +02:00
|
|
|
return -EPERM;
|
|
|
|
case SBI_ERR_INVALID_PARAM:
|
2025-05-23 12:19:20 +02:00
|
|
|
case SBI_ERR_INVALID_STATE:
|
2024-08-29 18:50:48 +02:00
|
|
|
return -EINVAL;
|
2025-05-23 12:19:20 +02:00
|
|
|
case SBI_ERR_BAD_RANGE:
|
|
|
|
return -ERANGE;
|
2024-08-29 18:50:48 +02:00
|
|
|
case SBI_ERR_INVALID_ADDRESS:
|
|
|
|
return -EFAULT;
|
2025-05-23 12:19:20 +02:00
|
|
|
case SBI_ERR_NO_SHMEM:
|
|
|
|
return -ENOMEM;
|
|
|
|
case SBI_ERR_TIMEOUT:
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
case SBI_ERR_IO:
|
|
|
|
return -EIO;
|
2024-08-29 18:50:48 +02:00
|
|
|
case SBI_ERR_NOT_SUPPORTED:
|
|
|
|
case SBI_ERR_FAILURE:
|
|
|
|
default:
|
|
|
|
return -ENOTSUPP;
|
|
|
|
};
|
|
|
|
}
|
2023-11-24 12:39:02 +05:30
|
|
|
|
|
|
|
extern bool sbi_debug_console_available;
|
|
|
|
int sbi_debug_console_write(const char *bytes, unsigned int num_bytes);
|
|
|
|
int sbi_debug_console_read(char *bytes, unsigned int num_bytes);
|
|
|
|
|
2019-10-28 13:10:36 +01:00
|
|
|
#else /* CONFIG_RISCV_SBI */
|
2022-01-20 01:09:18 -08:00
|
|
|
static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
|
2020-11-26 10:40:38 +08:00
|
|
|
static inline void sbi_init(void) {}
|
2019-10-28 13:10:34 +01:00
|
|
|
#endif /* CONFIG_RISCV_SBI */
|
2022-10-12 01:18:40 +02:00
|
|
|
|
2024-05-02 21:50:50 -07:00
|
|
|
unsigned long riscv_get_mvendorid(void);
|
|
|
|
unsigned long riscv_get_marchid(void);
|
2022-10-12 01:18:40 +02:00
|
|
|
unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
|
|
|
|
unsigned long riscv_cached_marchid(unsigned int cpu_id);
|
|
|
|
unsigned long riscv_cached_mimpid(unsigned int cpu_id);
|
|
|
|
|
2023-03-28 09:22:19 +05:30
|
|
|
#if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
|
riscv: Use IPIs for remote cache/TLB flushes by default
An IPI backend is always required in an SMP configuration, but an SBI
implementation is not. For example, SBI will be unavailable when the
kernel runs in M mode. For this reason, consider IPI delivery of cache
and TLB flushes to be the base case, and any other implementation (such
as the SBI remote fence extension) to be an optimization.
Generally, if IPIs can be delivered without firmware assistance, they
are assumed to be faster than SBI calls due to the SBI context switch
overhead. However, when SBI is used as the IPI backend, then the context
switch cost must be paid anyway, and performing the cache/TLB flush
directly in the SBI implementation is more efficient than injecting an
interrupt to S-mode. This is the only existing scenario where
riscv_ipi_set_virq_range() is called with use_for_rfence set to false.
sbi_ipi_init() already checks riscv_ipi_have_virq_range(), so it only
calls riscv_ipi_set_virq_range() when no other IPI device is available.
This allows moving the static key and dropping the use_for_rfence
parameter. This decouples the static key from the irqchip driver probe
order.
Furthermore, the static branch only makes sense when CONFIG_RISCV_SBI is
enabled. Optherwise, IPIs must be used. Add a fallback definition of
riscv_use_sbi_for_rfence() which handles this case and removes the need
to check CONFIG_RISCV_SBI elsewhere, such as in cacheflush.c.
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Link: https://lore.kernel.org/r/20240327045035.368512-4-samuel.holland@sifive.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-03-26 21:49:44 -07:00
|
|
|
DECLARE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
|
|
|
|
#define riscv_use_sbi_for_rfence() \
|
|
|
|
static_branch_unlikely(&riscv_sbi_for_rfence)
|
2023-03-28 09:22:19 +05:30
|
|
|
void sbi_ipi_init(void);
|
|
|
|
#else
|
riscv: Use IPIs for remote cache/TLB flushes by default
An IPI backend is always required in an SMP configuration, but an SBI
implementation is not. For example, SBI will be unavailable when the
kernel runs in M mode. For this reason, consider IPI delivery of cache
and TLB flushes to be the base case, and any other implementation (such
as the SBI remote fence extension) to be an optimization.
Generally, if IPIs can be delivered without firmware assistance, they
are assumed to be faster than SBI calls due to the SBI context switch
overhead. However, when SBI is used as the IPI backend, then the context
switch cost must be paid anyway, and performing the cache/TLB flush
directly in the SBI implementation is more efficient than injecting an
interrupt to S-mode. This is the only existing scenario where
riscv_ipi_set_virq_range() is called with use_for_rfence set to false.
sbi_ipi_init() already checks riscv_ipi_have_virq_range(), so it only
calls riscv_ipi_set_virq_range() when no other IPI device is available.
This allows moving the static key and dropping the use_for_rfence
parameter. This decouples the static key from the irqchip driver probe
order.
Furthermore, the static branch only makes sense when CONFIG_RISCV_SBI is
enabled. Optherwise, IPIs must be used. Add a fallback definition of
riscv_use_sbi_for_rfence() which handles this case and removes the need
to check CONFIG_RISCV_SBI elsewhere, such as in cacheflush.c.
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Link: https://lore.kernel.org/r/20240327045035.368512-4-samuel.holland@sifive.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-03-26 21:49:44 -07:00
|
|
|
static inline bool riscv_use_sbi_for_rfence(void) { return false; }
|
2023-03-28 09:22:19 +05:30
|
|
|
static inline void sbi_ipi_init(void) { }
|
|
|
|
#endif
|
|
|
|
|
2019-10-28 13:10:34 +01:00
|
|
|
#endif /* _ASM_RISCV_SBI_H */
|