linux/arch/loongarch/include/asm/kvm_para.h
Bibo Mao 2737dee106 LoongArch: KVM: Add hypercall service support for usermode VMM
Some VMMs provides special hypercall service in usermode, KVM should not
handle the usermode hypercall service, thus pass it to usermode, let the
usermode VMM handle it.

Here a new code KVM_HCALL_CODE_USER_SERVICE is added for the user-mode
hypercall service, KVM lets all six registers visible to usermode VMM.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-01-13 21:37:17 +08:00

187 lines
4.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH_KVM_PARA_H
#define _ASM_LOONGARCH_KVM_PARA_H
#include <uapi/asm/kvm_para.h>
/*
* Hypercall code field
*/
#define HYPERVISOR_KVM 1
#define HYPERVISOR_VENDOR_SHIFT 8
#define HYPERCALL_ENCODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
#define KVM_HCALL_CODE_SERVICE 0
#define KVM_HCALL_CODE_SWDBG 1
#define KVM_HCALL_CODE_USER_SERVICE 2
#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
#define KVM_HCALL_FUNC_IPI 1
#define KVM_HCALL_FUNC_NOTIFY 2
#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
#define KVM_HCALL_USER_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_USER_SERVICE)
/*
* LoongArch hypercall return code
*/
#define KVM_HCALL_SUCCESS 0
#define KVM_HCALL_INVALID_CODE -1UL
#define KVM_HCALL_INVALID_PARAMETER -2UL
#define KVM_STEAL_PHYS_VALID BIT_ULL(0)
#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6)
struct kvm_steal_time {
__u64 steal;
__u32 version;
__u32 flags;
__u32 pad[12];
};
/*
* Hypercall interface for KVM hypervisor
*
* a0: function identifier
* a1-a5: args
* Return value will be placed in a0.
* Up to 5 arguments are passed in a1, a2, a3, a4, a5.
*/
static __always_inline long kvm_hypercall0(u64 fid)
{
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r" (fun)
: "memory"
);
return ret;
}
static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
{
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r" (fun), "r" (a1)
: "memory"
);
return ret;
}
static __always_inline long kvm_hypercall2(u64 fid,
unsigned long arg0, unsigned long arg1)
{
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
register unsigned long a2 asm("a2") = arg1;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r" (fun), "r" (a1), "r" (a2)
: "memory"
);
return ret;
}
static __always_inline long kvm_hypercall3(u64 fid,
unsigned long arg0, unsigned long arg1, unsigned long arg2)
{
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
register unsigned long a2 asm("a2") = arg1;
register unsigned long a3 asm("a3") = arg2;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r" (fun), "r" (a1), "r" (a2), "r" (a3)
: "memory"
);
return ret;
}
static __always_inline long kvm_hypercall4(u64 fid,
unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3)
{
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
register unsigned long a2 asm("a2") = arg1;
register unsigned long a3 asm("a3") = arg2;
register unsigned long a4 asm("a4") = arg3;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
: "memory"
);
return ret;
}
static __always_inline long kvm_hypercall5(u64 fid,
unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3, unsigned long arg4)
{
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
register unsigned long a2 asm("a2") = arg1;
register unsigned long a3 asm("a3") = arg2;
register unsigned long a4 asm("a4") = arg3;
register unsigned long a5 asm("a5") = arg4;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
: "memory"
);
return ret;
}
#ifdef CONFIG_PARAVIRT
bool kvm_para_available(void);
unsigned int kvm_arch_para_features(void);
#else
static inline bool kvm_para_available(void)
{
return false;
}
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
#endif
static inline unsigned int kvm_arch_para_hints(void)
{
return 0;
}
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif /* _ASM_LOONGARCH_KVM_PARA_H */