2023-10-02 10:01:27 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
|
|
|
|
#define __ASM_LOONGARCH_KVM_VCPU_H__
|
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <asm/loongarch.h>
|
|
|
|
|
|
|
|
/* Controlled by 0x5 guest estat */
|
|
|
|
#define CPU_SIP0 (_ULCAST_(1))
|
|
|
|
#define CPU_SIP1 (_ULCAST_(1) << 1)
|
|
|
|
#define CPU_PMU (_ULCAST_(1) << 10)
|
|
|
|
#define CPU_TIMER (_ULCAST_(1) << 11)
|
|
|
|
#define CPU_IPI (_ULCAST_(1) << 12)
|
|
|
|
|
|
|
|
/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */
|
|
|
|
#define CPU_IP0 (_ULCAST_(1))
|
|
|
|
#define CPU_IP1 (_ULCAST_(1) << 1)
|
|
|
|
#define CPU_IP2 (_ULCAST_(1) << 2)
|
|
|
|
#define CPU_IP3 (_ULCAST_(1) << 3)
|
|
|
|
#define CPU_IP4 (_ULCAST_(1) << 4)
|
|
|
|
#define CPU_IP5 (_ULCAST_(1) << 5)
|
|
|
|
#define CPU_IP6 (_ULCAST_(1) << 6)
|
|
|
|
#define CPU_IP7 (_ULCAST_(1) << 7)
|
|
|
|
|
|
|
|
#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20)
|
|
|
|
|
|
|
|
/* KVM_IRQ_LINE irq field index values */
|
|
|
|
#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24
|
|
|
|
#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff
|
|
|
|
#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16
|
|
|
|
#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff
|
|
|
|
#define KVM_LOONGSON_IRQ_NUM_SHIFT 0
|
|
|
|
#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
|
|
|
|
|
|
|
|
typedef union loongarch_instruction larch_inst;
|
2025-05-20 20:20:18 +08:00
|
|
|
typedef int (*exit_handle_fn)(struct kvm_vcpu *, int);
|
2023-10-02 10:01:27 +08:00
|
|
|
|
|
|
|
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
|
|
|
|
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
|
|
|
|
int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
|
|
|
int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
2025-01-13 21:37:17 +08:00
|
|
|
int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
2023-10-02 10:01:27 +08:00
|
|
|
int kvm_emu_idle(struct kvm_vcpu *vcpu);
|
|
|
|
int kvm_pending_timer(struct kvm_vcpu *vcpu);
|
|
|
|
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
|
|
|
|
void kvm_deliver_intr(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_deliver_exception(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
|
|
void kvm_own_fpu(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_save_fpu(struct loongarch_fpu *fpu);
|
|
|
|
void kvm_restore_fpu(struct loongarch_fpu *fpu);
|
|
|
|
void kvm_restore_fcsr(struct loongarch_fpu *fpu);
|
|
|
|
|
2023-12-19 10:48:28 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_LSX
|
|
|
|
int kvm_own_lsx(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_save_lsx(struct loongarch_fpu *fpu);
|
|
|
|
void kvm_restore_lsx(struct loongarch_fpu *fpu);
|
|
|
|
#else
|
2024-01-26 16:22:07 +08:00
|
|
|
static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; }
|
2023-12-19 10:48:28 +08:00
|
|
|
static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
|
|
|
|
static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
|
2023-12-19 10:48:28 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_HAS_LASX
|
|
|
|
int kvm_own_lasx(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_save_lasx(struct loongarch_fpu *fpu);
|
|
|
|
void kvm_restore_lasx(struct loongarch_fpu *fpu);
|
|
|
|
#else
|
2024-01-26 16:22:07 +08:00
|
|
|
static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; }
|
2023-12-19 10:48:28 +08:00
|
|
|
static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
|
|
|
|
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
|
2023-12-19 10:48:28 +08:00
|
|
|
#endif
|
|
|
|
|
2024-09-11 23:26:32 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_LBT
|
|
|
|
int kvm_own_lbt(struct kvm_vcpu *vcpu);
|
|
|
|
#else
|
|
|
|
static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
|
|
|
|
#endif
|
|
|
|
|
2023-10-02 10:01:27 +08:00
|
|
|
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
|
|
|
|
void kvm_save_timer(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_restore_timer(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
|
|
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
|
2024-05-06 22:00:47 +08:00
|
|
|
struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid);
|
2023-10-02 10:01:27 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Loongarch KVM guest interrupt handling
|
|
|
|
*/
|
|
|
|
static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
|
|
|
|
{
|
|
|
|
set_bit(irq, &vcpu->arch.irq_pending);
|
|
|
|
clear_bit(irq, &vcpu->arch.irq_clear);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
|
|
|
|
{
|
|
|
|
clear_bit(irq, &vcpu->arch.irq_pending);
|
|
|
|
set_bit(irq, &vcpu->arch.irq_clear);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned int code, unsigned int subcode)
|
|
|
|
{
|
|
|
|
/* only one exception can be injected */
|
|
|
|
if (!vcpu->arch.exception_pending) {
|
|
|
|
set_bit(code, &vcpu->arch.exception_pending);
|
|
|
|
vcpu->arch.esubcode = subcode;
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
LoongArch: KVM: Add PV IPI support on host side
On LoongArch system, IPI hw uses iocsr registers. There are one iocsr
register access on IPI sending, and two iocsr access on IPI receiving
for the IPI interrupt handler. In VM mode all iocsr accessing will cause
VM to trap into hypervisor. So with one IPI hw notification there will
be three times of trap.
In this patch PV IPI is added for VM, hypercall instruction is used for
IPI sender, and hypervisor will inject an SWI to the destination vcpu.
During the SWI interrupt handler, only CSR.ESTAT register is written to
clear irq. CSR.ESTAT register access will not trap into hypervisor, so
with PV IPI supported, there is one trap with IPI sender, and no trap
with IPI receiver, there is only one trap with IPI notification.
Also this patch adds IPI multicast support, the method is similar with
x86. With IPI multicast support, IPI notification can be sent to at
most 128 vcpus at one time. It greatly reduces the times of trapping
into hypervisor.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-05-06 22:00:47 +08:00
|
|
|
static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num)
|
|
|
|
{
|
|
|
|
return vcpu->arch.gprs[num];
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val)
|
|
|
|
{
|
|
|
|
vcpu->arch.gprs[num] = val;
|
|
|
|
}
|
|
|
|
|
2024-07-09 16:25:51 +08:00
|
|
|
static inline bool kvm_pvtime_supported(void)
|
|
|
|
{
|
|
|
|
return !!sched_info_on();
|
|
|
|
}
|
|
|
|
|
2024-09-12 20:53:40 +08:00
|
|
|
static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature)
|
|
|
|
{
|
|
|
|
return vcpu->kvm->arch.pv_features & BIT(feature);
|
|
|
|
}
|
|
|
|
|
2023-10-02 10:01:27 +08:00
|
|
|
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
|