2021-09-27 17:10:14 +05:30
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2021-11-06 20:47:06 -07:00
|
|
|
/*
|
2021-09-27 17:10:14 +05:30
|
|
|
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Atish Patra <atish.patra@wdc.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <asm/sbi.h>
|
2021-11-18 00:39:08 -08:00
|
|
|
#include <asm/kvm_vcpu_sbi.h>
|
2021-09-27 17:10:14 +05:30
|
|
|
|
2022-08-14 15:12:36 +01:00
|
|
|
#ifndef CONFIG_RISCV_SBI_V01
|
2021-11-18 00:39:09 -08:00
|
|
|
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
|
|
|
|
.extid_start = -1UL,
|
|
|
|
.extid_end = -1UL,
|
|
|
|
.handler = NULL,
|
|
|
|
};
|
|
|
|
#endif
|
2021-11-18 00:39:11 -08:00
|
|
|
|
2023-06-16 15:22:03 +01:00
|
|
|
#ifndef CONFIG_RISCV_PMU_SBI
|
2023-02-07 01:55:23 -08:00
|
|
|
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
|
|
|
|
.extid_start = -1UL,
|
|
|
|
.extid_end = -1UL,
|
|
|
|
.handler = NULL,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2022-11-28 11:43:16 +05:30
|
|
|
struct kvm_riscv_sbi_extension_entry {
|
2023-05-30 19:50:22 +02:00
|
|
|
enum KVM_RISCV_SBI_EXT_ID ext_idx;
|
2022-11-28 11:43:16 +05:30
|
|
|
const struct kvm_vcpu_sbi_extension *ext_ptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
|
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_V01,
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_v01,
|
|
|
|
},
|
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_base,
|
|
|
|
},
|
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_TIME,
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_time,
|
|
|
|
},
|
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_IPI,
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_ipi,
|
|
|
|
},
|
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_rfence,
|
|
|
|
},
|
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_SRST,
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_srst,
|
|
|
|
},
|
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_HSM,
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_hsm,
|
|
|
|
},
|
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_PMU,
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_pmu,
|
|
|
|
},
|
2022-07-22 19:46:24 +05:30
|
|
|
{
|
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_DBCN,
|
|
|
|
.ext_ptr = &vcpu_sbi_ext_dbcn,
|
|
|
|
},
|
2024-10-17 09:45:40 +02:00
|
|
|
{
|
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_SUSP,
|
|
|
|
.ext_ptr = &vcpu_sbi_ext_susp,
|
|
|
|
},
|
2023-12-20 17:00:17 +01:00
|
|
|
{
|
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_STA,
|
|
|
|
.ext_ptr = &vcpu_sbi_ext_sta,
|
|
|
|
},
|
2022-11-28 11:43:16 +05:30
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_experimental,
|
|
|
|
},
|
|
|
|
{
|
2023-05-30 19:50:22 +02:00
|
|
|
.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
|
2022-11-28 11:43:16 +05:30
|
|
|
.ext_ptr = &vcpu_sbi_ext_vendor,
|
|
|
|
},
|
2021-11-18 00:39:09 -08:00
|
|
|
};
|
|
|
|
|
2023-12-13 18:09:55 +01:00
|
|
|
static const struct kvm_riscv_sbi_extension_entry *
|
|
|
|
riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
|
|
|
|
{
|
|
|
|
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
|
|
|
|
|
|
|
|
if (idx >= KVM_RISCV_SBI_EXT_MAX)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
|
|
|
|
if (sbi_ext[i].ext_idx == idx) {
|
|
|
|
sext = &sbi_ext[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return sext;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
|
|
|
|
const struct kvm_riscv_sbi_extension_entry *sext;
|
|
|
|
|
|
|
|
sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
|
|
|
|
|
|
|
|
return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
|
|
|
|
}
|
|
|
|
|
2021-11-18 00:39:09 -08:00
|
|
|
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
2021-09-27 17:10:14 +05:30
|
|
|
{
|
|
|
|
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
|
|
|
|
|
|
|
vcpu->arch.sbi_context.return_handled = 0;
|
|
|
|
vcpu->stat.ecall_exit_stat++;
|
|
|
|
run->exit_reason = KVM_EXIT_RISCV_SBI;
|
|
|
|
run->riscv_sbi.extension_id = cp->a7;
|
|
|
|
run->riscv_sbi.function_id = cp->a6;
|
|
|
|
run->riscv_sbi.args[0] = cp->a0;
|
|
|
|
run->riscv_sbi.args[1] = cp->a1;
|
|
|
|
run->riscv_sbi.args[2] = cp->a2;
|
|
|
|
run->riscv_sbi.args[3] = cp->a3;
|
|
|
|
run->riscv_sbi.args[4] = cp->a4;
|
|
|
|
run->riscv_sbi.args[5] = cp->a5;
|
2024-08-07 17:49:44 +02:00
|
|
|
run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
|
|
|
|
run->riscv_sbi.ret[1] = 0;
|
2021-09-27 17:10:14 +05:30
|
|
|
}
|
|
|
|
|
2022-01-31 10:29:31 +05:30
|
|
|
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_run *run,
|
KVM: fix bad user ABI for KVM_EXIT_SYSTEM_EVENT
When KVM_EXIT_SYSTEM_EVENT was introduced, it included a flags
member that at the time was unused. Unfortunately this extensibility
mechanism has several issues:
- x86 is not writing the member, so it would not be possible to use it
on x86 except for new events
- the member is not aligned to 64 bits, so the definition of the
uAPI struct is incorrect for 32- on 64-bit userspace. This is a
problem for RISC-V, which supports CONFIG_KVM_COMPAT, but fortunately
usage of flags was only introduced in 5.18.
Since padding has to be introduced, place a new field in there
that tells if the flags field is valid. To allow further extensibility,
in fact, change flags to an array of 16 values, and store how many
of the values are valid. The availability of the new ndata field
is tied to a system capability; all architectures are changed to
fill in the field.
To avoid breaking compilation of userspace that was using the flags
field, provide a userspace-only union to overlap flags with data[0].
The new field is placed at the same offset for both 32- and 64-bit
userspace.
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Peter Gonda <pgonda@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reported-by: kernel test robot <lkp@intel.com>
Message-Id: <20220422103013.34832-1-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-04-22 12:30:13 +02:00
|
|
|
u32 type, u64 reason)
|
2022-01-31 10:29:31 +05:30
|
|
|
{
|
|
|
|
unsigned long i;
|
|
|
|
struct kvm_vcpu *tmp;
|
|
|
|
|
2024-04-17 15:45:25 +08:00
|
|
|
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
|
2025-05-23 12:47:28 +02:00
|
|
|
spin_lock(&tmp->arch.mp_state_lock);
|
2024-04-17 15:45:25 +08:00
|
|
|
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
|
2025-05-23 12:47:28 +02:00
|
|
|
spin_unlock(&tmp->arch.mp_state_lock);
|
2024-04-17 15:45:25 +08:00
|
|
|
}
|
2022-01-31 10:29:31 +05:30
|
|
|
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
|
|
|
|
|
|
|
|
memset(&run->system_event, 0, sizeof(run->system_event));
|
|
|
|
run->system_event.type = type;
|
KVM: fix bad user ABI for KVM_EXIT_SYSTEM_EVENT
When KVM_EXIT_SYSTEM_EVENT was introduced, it included a flags
member that at the time was unused. Unfortunately this extensibility
mechanism has several issues:
- x86 is not writing the member, so it would not be possible to use it
on x86 except for new events
- the member is not aligned to 64 bits, so the definition of the
uAPI struct is incorrect for 32- on 64-bit userspace. This is a
problem for RISC-V, which supports CONFIG_KVM_COMPAT, but fortunately
usage of flags was only introduced in 5.18.
Since padding has to be introduced, place a new field in there
that tells if the flags field is valid. To allow further extensibility,
in fact, change flags to an array of 16 values, and store how many
of the values are valid. The availability of the new ndata field
is tied to a system capability; all architectures are changed to
fill in the field.
To avoid breaking compilation of userspace that was using the flags
field, provide a userspace-only union to overlap flags with data[0].
The new field is placed at the same offset for both 32- and 64-bit
userspace.
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Peter Gonda <pgonda@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reported-by: kernel test robot <lkp@intel.com>
Message-Id: <20220422103013.34832-1-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-04-22 12:30:13 +02:00
|
|
|
run->system_event.ndata = 1;
|
|
|
|
run->system_event.data[0] = reason;
|
2022-01-31 10:29:31 +05:30
|
|
|
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
|
|
|
|
}
|
|
|
|
|
2025-04-03 13:25:21 +02:00
|
|
|
void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long pc, unsigned long a1)
|
|
|
|
{
|
2025-04-03 13:25:22 +02:00
|
|
|
spin_lock(&vcpu->arch.reset_state.lock);
|
|
|
|
vcpu->arch.reset_state.pc = pc;
|
|
|
|
vcpu->arch.reset_state.a1 = a1;
|
|
|
|
spin_unlock(&vcpu->arch.reset_state.lock);
|
2025-04-03 13:25:21 +02:00
|
|
|
|
|
|
|
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
|
|
|
|
}
|
|
|
|
|
2025-05-15 16:37:25 +02:00
|
|
|
void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
|
|
|
|
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
|
|
|
|
struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
|
|
|
|
|
|
|
|
cntx->a0 = vcpu->vcpu_id;
|
|
|
|
|
|
|
|
spin_lock(&vcpu->arch.reset_state.lock);
|
|
|
|
cntx->sepc = reset_state->pc;
|
|
|
|
cntx->a1 = reset_state->a1;
|
|
|
|
spin_unlock(&vcpu->arch.reset_state.lock);
|
|
|
|
|
|
|
|
cntx->sstatus &= ~SR_SIE;
|
|
|
|
csr->vsatp = 0;
|
|
|
|
}
|
|
|
|
|
2021-09-27 17:10:14 +05:30
|
|
|
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
|
|
|
|
|
|
|
/* Handle SBI return only once */
|
|
|
|
if (vcpu->arch.sbi_context.return_handled)
|
|
|
|
return 0;
|
|
|
|
vcpu->arch.sbi_context.return_handled = 1;
|
|
|
|
|
|
|
|
/* Update return values */
|
|
|
|
cp->a0 = run->riscv_sbi.ret[0];
|
|
|
|
cp->a1 = run->riscv_sbi.ret[1];
|
|
|
|
|
|
|
|
/* Move to next instruction */
|
|
|
|
vcpu->arch.guest_context.sepc += 4;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-28 11:43:16 +05:30
|
|
|
static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long reg_num,
|
|
|
|
unsigned long reg_val)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
|
2023-12-13 18:09:55 +01:00
|
|
|
const struct kvm_riscv_sbi_extension_entry *sext;
|
2023-08-03 13:32:53 -03:00
|
|
|
|
|
|
|
if (reg_val != 1 && reg_val != 0)
|
2022-11-28 11:43:16 +05:30
|
|
|
return -EINVAL;
|
|
|
|
|
2023-12-13 18:09:55 +01:00
|
|
|
sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
|
|
|
|
if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
|
2022-11-28 11:43:16 +05:30
|
|
|
return -ENOENT;
|
|
|
|
|
2023-10-11 17:52:26 +05:30
|
|
|
scontext->ext_status[sext->ext_idx] = (reg_val) ?
|
2023-12-13 18:09:55 +01:00
|
|
|
KVM_RISCV_SBI_EXT_STATUS_ENABLED :
|
|
|
|
KVM_RISCV_SBI_EXT_STATUS_DISABLED;
|
2022-11-28 11:43:16 +05:30
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long reg_num,
|
|
|
|
unsigned long *reg_val)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
|
2023-12-13 18:09:55 +01:00
|
|
|
const struct kvm_riscv_sbi_extension_entry *sext;
|
2022-11-28 11:43:16 +05:30
|
|
|
|
2023-12-13 18:09:55 +01:00
|
|
|
sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
|
|
|
|
if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
|
2022-11-28 11:43:16 +05:30
|
|
|
return -ENOENT;
|
|
|
|
|
2023-10-11 17:52:26 +05:30
|
|
|
*reg_val = scontext->ext_status[sext->ext_idx] ==
|
2023-12-13 18:09:55 +01:00
|
|
|
KVM_RISCV_SBI_EXT_STATUS_ENABLED;
|
|
|
|
|
2022-11-28 11:43:16 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long reg_num,
|
|
|
|
unsigned long reg_val, bool enable)
|
|
|
|
{
|
|
|
|
unsigned long i, ext_id;
|
|
|
|
|
|
|
|
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
|
2023-08-03 13:32:53 -03:00
|
|
|
return -ENOENT;
|
2022-11-28 11:43:16 +05:30
|
|
|
|
|
|
|
for_each_set_bit(i, ®_val, BITS_PER_LONG) {
|
|
|
|
ext_id = i + reg_num * BITS_PER_LONG;
|
|
|
|
if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
|
|
|
|
break;
|
|
|
|
|
|
|
|
riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long reg_num,
|
|
|
|
unsigned long *reg_val)
|
|
|
|
{
|
|
|
|
unsigned long i, ext_id, ext_val;
|
|
|
|
|
|
|
|
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
|
2023-08-03 13:32:53 -03:00
|
|
|
return -ENOENT;
|
2022-11-28 11:43:16 +05:30
|
|
|
|
|
|
|
for (i = 0; i < BITS_PER_LONG; i++) {
|
|
|
|
ext_id = i + reg_num * BITS_PER_LONG;
|
|
|
|
if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
|
|
|
|
break;
|
|
|
|
|
|
|
|
ext_val = 0;
|
|
|
|
riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
|
|
|
|
if (ext_val)
|
|
|
|
*reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg)
|
|
|
|
{
|
|
|
|
unsigned long __user *uaddr =
|
|
|
|
(unsigned long __user *)(unsigned long)reg->addr;
|
|
|
|
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
|
|
|
|
KVM_REG_SIZE_MASK |
|
|
|
|
KVM_REG_RISCV_SBI_EXT);
|
|
|
|
unsigned long reg_val, reg_subtype;
|
|
|
|
|
|
|
|
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vcpu->arch.ran_atleast_once)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
|
|
|
|
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
|
|
|
|
|
|
|
|
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
switch (reg_subtype) {
|
|
|
|
case KVM_REG_RISCV_SBI_SINGLE:
|
|
|
|
return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
|
|
|
|
case KVM_REG_RISCV_SBI_MULTI_EN:
|
|
|
|
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
|
|
|
|
case KVM_REG_RISCV_SBI_MULTI_DIS:
|
|
|
|
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
|
|
|
|
default:
|
2023-08-03 13:32:53 -03:00
|
|
|
return -ENOENT;
|
2022-11-28 11:43:16 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
unsigned long __user *uaddr =
|
|
|
|
(unsigned long __user *)(unsigned long)reg->addr;
|
|
|
|
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
|
|
|
|
KVM_REG_SIZE_MASK |
|
|
|
|
KVM_REG_RISCV_SBI_EXT);
|
|
|
|
unsigned long reg_val, reg_subtype;
|
|
|
|
|
|
|
|
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
|
|
|
|
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
|
|
|
|
|
|
|
|
reg_val = 0;
|
|
|
|
switch (reg_subtype) {
|
|
|
|
case KVM_REG_RISCV_SBI_SINGLE:
|
|
|
|
rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, ®_val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_RISCV_SBI_MULTI_EN:
|
|
|
|
case KVM_REG_RISCV_SBI_MULTI_DIS:
|
|
|
|
rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, ®_val);
|
|
|
|
if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
|
|
|
|
reg_val = ~reg_val;
|
|
|
|
break;
|
|
|
|
default:
|
2023-08-03 13:32:53 -03:00
|
|
|
rc = -ENOENT;
|
2022-11-28 11:43:16 +05:30
|
|
|
}
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-12-20 17:00:20 +01:00
|
|
|
int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg)
|
|
|
|
{
|
|
|
|
unsigned long __user *uaddr =
|
|
|
|
(unsigned long __user *)(unsigned long)reg->addr;
|
|
|
|
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
|
|
|
|
KVM_REG_SIZE_MASK |
|
|
|
|
KVM_REG_RISCV_SBI_STATE);
|
|
|
|
unsigned long reg_subtype, reg_val;
|
|
|
|
|
|
|
|
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
|
|
|
|
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
|
|
|
|
|
|
|
|
switch (reg_subtype) {
|
2023-12-20 17:00:21 +01:00
|
|
|
case KVM_REG_RISCV_SBI_STA:
|
|
|
|
return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
|
2023-12-20 17:00:20 +01:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg)
|
|
|
|
{
|
|
|
|
unsigned long __user *uaddr =
|
|
|
|
(unsigned long __user *)(unsigned long)reg->addr;
|
|
|
|
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
|
|
|
|
KVM_REG_SIZE_MASK |
|
|
|
|
KVM_REG_RISCV_SBI_STATE);
|
|
|
|
unsigned long reg_subtype, reg_val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
|
|
|
|
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
|
|
|
|
|
|
|
|
switch (reg_subtype) {
|
2023-12-20 17:00:21 +01:00
|
|
|
case KVM_REG_RISCV_SBI_STA:
|
|
|
|
ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, ®_val);
|
|
|
|
break;
|
2023-12-20 17:00:20 +01:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-28 11:43:16 +05:30
|
|
|
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
|
|
|
|
struct kvm_vcpu *vcpu, unsigned long extid)
|
2021-11-18 00:39:08 -08:00
|
|
|
{
|
2022-11-28 11:43:16 +05:30
|
|
|
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
|
2023-05-30 19:50:24 +02:00
|
|
|
const struct kvm_riscv_sbi_extension_entry *entry;
|
|
|
|
const struct kvm_vcpu_sbi_extension *ext;
|
|
|
|
int i;
|
2021-11-18 00:39:08 -08:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
|
2023-05-30 19:50:24 +02:00
|
|
|
entry = &sbi_ext[i];
|
|
|
|
ext = entry->ext_ptr;
|
|
|
|
|
|
|
|
if (ext->extid_start <= extid && ext->extid_end >= extid) {
|
|
|
|
if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
|
|
|
|
scontext->ext_status[entry->ext_idx] ==
|
2023-12-13 18:09:55 +01:00
|
|
|
KVM_RISCV_SBI_EXT_STATUS_ENABLED)
|
2023-05-30 19:50:24 +02:00
|
|
|
return ext;
|
|
|
|
|
2023-10-11 17:52:26 +05:30
|
|
|
return NULL;
|
2022-11-28 11:43:16 +05:30
|
|
|
}
|
2021-11-18 00:39:08 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
int ret = 1;
|
|
|
|
bool next_sepc = true;
|
|
|
|
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
|
|
|
const struct kvm_vcpu_sbi_extension *sbi_ext;
|
2023-02-04 17:15:07 -08:00
|
|
|
struct kvm_cpu_trap utrap = {0};
|
|
|
|
struct kvm_vcpu_sbi_return sbi_ret = {
|
|
|
|
.out_val = 0,
|
|
|
|
.err_val = 0,
|
|
|
|
.utrap = &utrap,
|
|
|
|
};
|
2021-11-18 00:39:08 -08:00
|
|
|
bool ext_is_v01 = false;
|
|
|
|
|
2022-11-28 11:43:16 +05:30
|
|
|
sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
|
2021-11-18 00:39:08 -08:00
|
|
|
if (sbi_ext && sbi_ext->handler) {
|
2021-11-18 00:39:09 -08:00
|
|
|
#ifdef CONFIG_RISCV_SBI_V01
|
2021-11-18 00:39:08 -08:00
|
|
|
if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
|
|
|
|
cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
|
|
|
|
ext_is_v01 = true;
|
2021-11-18 00:39:09 -08:00
|
|
|
#endif
|
2023-02-04 17:15:07 -08:00
|
|
|
ret = sbi_ext->handler(vcpu, run, &sbi_ret);
|
2021-11-18 00:39:08 -08:00
|
|
|
} else {
|
2021-09-27 17:10:14 +05:30
|
|
|
/* Return error for unsupported SBI calls */
|
|
|
|
cp->a0 = SBI_ERR_NOT_SUPPORTED;
|
2021-11-18 00:39:08 -08:00
|
|
|
goto ecall_done;
|
|
|
|
}
|
|
|
|
|
2023-02-04 17:15:07 -08:00
|
|
|
/*
|
|
|
|
* When the SBI extension returns a Linux error code, it exits the ioctl
|
|
|
|
* loop and forwards the error to userspace.
|
|
|
|
*/
|
|
|
|
if (ret < 0) {
|
|
|
|
next_sepc = false;
|
|
|
|
goto ecall_done;
|
|
|
|
}
|
|
|
|
|
2021-11-18 00:39:08 -08:00
|
|
|
/* Handle special error cases i.e trap, exit or userspace forward */
|
2023-02-04 17:15:07 -08:00
|
|
|
if (sbi_ret.utrap->scause) {
|
2021-11-18 00:39:08 -08:00
|
|
|
/* No need to increment sepc or exit ioctl loop */
|
|
|
|
ret = 1;
|
2023-02-04 17:15:07 -08:00
|
|
|
sbi_ret.utrap->sepc = cp->sepc;
|
|
|
|
kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
|
2021-11-18 00:39:08 -08:00
|
|
|
next_sepc = false;
|
|
|
|
goto ecall_done;
|
2021-10-21 11:57:06 +00:00
|
|
|
}
|
2021-09-27 17:10:14 +05:30
|
|
|
|
2021-11-18 00:39:08 -08:00
|
|
|
/* Exit ioctl loop or Propagate the error code the guest */
|
2023-02-04 17:15:07 -08:00
|
|
|
if (sbi_ret.uexit) {
|
2021-11-18 00:39:08 -08:00
|
|
|
next_sepc = false;
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
2023-02-04 17:15:07 -08:00
|
|
|
cp->a0 = sbi_ret.err_val;
|
2021-11-18 00:39:08 -08:00
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
ecall_done:
|
2021-09-27 17:10:14 +05:30
|
|
|
if (next_sepc)
|
|
|
|
cp->sepc += 4;
|
2023-02-04 17:15:07 -08:00
|
|
|
/* a1 should only be updated when we continue the ioctl loop */
|
|
|
|
if (!ext_is_v01 && ret == 1)
|
|
|
|
cp->a1 = sbi_ret.out_val;
|
2021-09-27 17:10:14 +05:30
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2023-10-11 17:52:26 +05:30
|
|
|
|
|
|
|
void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
|
|
|
|
const struct kvm_riscv_sbi_extension_entry *entry;
|
|
|
|
const struct kvm_vcpu_sbi_extension *ext;
|
2024-11-04 20:15:01 +01:00
|
|
|
int idx, i;
|
2023-10-11 17:52:26 +05:30
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
|
|
|
|
entry = &sbi_ext[i];
|
|
|
|
ext = entry->ext_ptr;
|
2024-11-04 20:15:01 +01:00
|
|
|
idx = entry->ext_idx;
|
|
|
|
|
|
|
|
if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
|
|
|
|
continue;
|
2023-10-11 17:52:26 +05:30
|
|
|
|
|
|
|
if (ext->probe && !ext->probe(vcpu)) {
|
2024-11-04 20:15:01 +01:00
|
|
|
scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
|
2023-10-11 17:52:26 +05:30
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-11-04 20:15:01 +01:00
|
|
|
scontext->ext_status[idx] = ext->default_disabled ?
|
2023-12-13 18:09:55 +01:00
|
|
|
KVM_RISCV_SBI_EXT_STATUS_DISABLED :
|
|
|
|
KVM_RISCV_SBI_EXT_STATUS_ENABLED;
|
2025-05-23 12:19:28 +02:00
|
|
|
|
|
|
|
if (ext->init && ext->init(vcpu) != 0)
|
|
|
|
scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
|
|
|
|
const struct kvm_riscv_sbi_extension_entry *entry;
|
|
|
|
const struct kvm_vcpu_sbi_extension *ext;
|
|
|
|
int idx, i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
|
|
|
|
entry = &sbi_ext[i];
|
|
|
|
ext = entry->ext_ptr;
|
|
|
|
idx = entry->ext_idx;
|
|
|
|
|
|
|
|
if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (scontext->ext_status[idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE ||
|
|
|
|
!ext->deinit)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ext->deinit(vcpu);
|
2023-10-11 17:52:26 +05:30
|
|
|
}
|
|
|
|
}
|
2025-05-23 12:19:29 +02:00
|
|
|
|
|
|
|
void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
|
|
|
|
const struct kvm_riscv_sbi_extension_entry *entry;
|
|
|
|
const struct kvm_vcpu_sbi_extension *ext;
|
|
|
|
int idx, i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
|
|
|
|
entry = &sbi_ext[i];
|
|
|
|
ext = entry->ext_ptr;
|
|
|
|
idx = entry->ext_idx;
|
|
|
|
|
|
|
|
if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (scontext->ext_status[idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED ||
|
|
|
|
!ext->reset)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ext->reset(vcpu);
|
|
|
|
}
|
|
|
|
}
|