2021-11-18 00:39:08 -08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
/**
|
|
|
|
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Atish Patra <atish.patra@wdc.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __RISCV_KVM_VCPU_SBI_H__
|
|
|
|
#define __RISCV_KVM_VCPU_SBI_H__
|
|
|
|
|
2021-11-18 00:39:10 -08:00
|
|
|
#define KVM_SBI_IMPID 3
|
|
|
|
|
2023-10-10 15:23:51 +05:30
|
|
|
#define KVM_SBI_VERSION_MAJOR 2
|
2022-10-02 10:18:25 +05:30
|
|
|
#define KVM_SBI_VERSION_MINOR 0
|
2021-11-18 00:39:08 -08:00
|
|
|
|
2023-05-30 19:50:23 +02:00
|
|
|
enum kvm_riscv_sbi_ext_status {
|
2023-12-13 18:09:55 +01:00
|
|
|
KVM_RISCV_SBI_EXT_STATUS_UNINITIALIZED,
|
|
|
|
KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE,
|
|
|
|
KVM_RISCV_SBI_EXT_STATUS_ENABLED,
|
|
|
|
KVM_RISCV_SBI_EXT_STATUS_DISABLED,
|
2023-05-30 19:50:23 +02:00
|
|
|
};
|
|
|
|
|
2022-12-07 09:17:27 +05:30
|
|
|
struct kvm_vcpu_sbi_context {
|
|
|
|
int return_handled;
|
2023-05-30 19:50:23 +02:00
|
|
|
enum kvm_riscv_sbi_ext_status ext_status[KVM_RISCV_SBI_EXT_MAX];
|
2022-12-07 09:17:27 +05:30
|
|
|
};
|
|
|
|
|
2023-02-04 17:15:07 -08:00
|
|
|
struct kvm_vcpu_sbi_return {
|
|
|
|
unsigned long out_val;
|
|
|
|
unsigned long err_val;
|
|
|
|
struct kvm_cpu_trap *utrap;
|
|
|
|
bool uexit;
|
|
|
|
};
|
|
|
|
|
2021-11-18 00:39:08 -08:00
|
|
|
struct kvm_vcpu_sbi_extension {
|
|
|
|
unsigned long extid_start;
|
|
|
|
unsigned long extid_end;
|
2023-10-11 17:52:26 +05:30
|
|
|
|
2023-12-13 18:09:55 +01:00
|
|
|
bool default_disabled;
|
2023-10-11 17:52:26 +05:30
|
|
|
|
2021-11-18 00:39:08 -08:00
|
|
|
/**
|
|
|
|
* SBI extension handler. It can be defined for a given extension or group of
|
|
|
|
* extension. But it should always return linux error codes rather than SBI
|
|
|
|
* specific error codes.
|
|
|
|
*/
|
|
|
|
int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
2023-02-04 17:15:07 -08:00
|
|
|
struct kvm_vcpu_sbi_return *retdata);
|
2023-02-04 17:15:05 -08:00
|
|
|
|
|
|
|
/* Extension specific probe function */
|
|
|
|
unsigned long (*probe)(struct kvm_vcpu *vcpu);
|
2025-05-23 12:19:28 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Init/deinit function called once during VCPU init/destroy. These
|
|
|
|
* might be use if the SBI extensions need to allocate or do specific
|
|
|
|
* init time only configuration.
|
|
|
|
*/
|
|
|
|
int (*init)(struct kvm_vcpu *vcpu);
|
|
|
|
void (*deinit)(struct kvm_vcpu *vcpu);
|
2025-05-23 12:19:29 +02:00
|
|
|
|
|
|
|
void (*reset)(struct kvm_vcpu *vcpu);
|
2021-11-18 00:39:08 -08:00
|
|
|
};
|
|
|
|
|
2021-11-18 00:39:09 -08:00
|
|
|
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
2022-01-31 10:29:31 +05:30
|
|
|
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_run *run,
|
|
|
|
u32 type, u64 flags);
|
2025-04-03 13:25:21 +02:00
|
|
|
void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long pc, unsigned long a1);
|
2025-05-15 16:37:25 +02:00
|
|
|
void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu);
|
2022-12-07 09:17:27 +05:30
|
|
|
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
2022-11-28 11:43:16 +05:30
|
|
|
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg);
|
|
|
|
int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg);
|
2023-12-20 17:00:20 +01:00
|
|
|
int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg);
|
|
|
|
int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg);
|
2022-11-28 11:43:16 +05:30
|
|
|
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
|
|
|
|
struct kvm_vcpu *vcpu, unsigned long extid);
|
2023-12-13 18:09:55 +01:00
|
|
|
bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx);
|
2022-12-07 09:17:27 +05:30
|
|
|
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
2023-10-11 17:52:26 +05:30
|
|
|
void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu);
|
2025-05-23 12:19:28 +02:00
|
|
|
void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu);
|
2025-05-23 12:19:29 +02:00
|
|
|
void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu);
|
2021-11-18 00:39:09 -08:00
|
|
|
|
2023-12-20 17:00:21 +01:00
|
|
|
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
|
|
|
|
unsigned long *reg_val);
|
|
|
|
int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
|
|
|
|
unsigned long reg_val);
|
|
|
|
|
2022-08-14 15:12:36 +01:00
|
|
|
#ifdef CONFIG_RISCV_SBI_V01
|
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
|
|
|
|
#endif
|
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
|
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
|
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
|
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
|
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
|
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
|
2022-07-22 19:46:24 +05:30
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
|
2024-10-17 09:45:40 +02:00
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp;
|
2023-12-20 17:00:17 +01:00
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta;
|
2022-08-14 15:12:36 +01:00
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
|
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
|
|
|
|
|
2023-06-16 15:22:03 +01:00
|
|
|
#ifdef CONFIG_RISCV_PMU_SBI
|
|
|
|
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
|
|
|
|
#endif
|
2021-11-18 00:39:08 -08:00
|
|
|
#endif /* __RISCV_KVM_VCPU_SBI_H__ */
|