2023-10-02 10:01:20 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ASM_LOONGARCH_KVM_HOST_H__
|
|
|
|
#define __ASM_LOONGARCH_KVM_HOST_H__
|
|
|
|
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/hrtimer.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kvm.h>
|
|
|
|
#include <linux/kvm_types.h>
|
|
|
|
#include <linux/mutex.h>
|
2025-03-18 16:48:08 +08:00
|
|
|
#include <linux/perf_event.h>
|
2023-10-02 10:01:20 +08:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
#include <asm/inst.h>
|
|
|
|
#include <asm/kvm_mmu.h>
|
2024-11-13 16:18:27 +08:00
|
|
|
#include <asm/kvm_ipi.h>
|
2024-11-13 16:18:27 +08:00
|
|
|
#include <asm/kvm_eiointc.h>
|
2024-11-13 16:18:27 +08:00
|
|
|
#include <asm/kvm_pch_pic.h>
|
2023-10-02 10:01:20 +08:00
|
|
|
#include <asm/loongarch.h>
|
|
|
|
|
2024-11-13 16:18:27 +08:00
|
|
|
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
|
|
|
|
2023-10-02 10:01:20 +08:00
|
|
|
/* Loongarch KVM register ids */
|
|
|
|
#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
|
|
|
|
#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
|
|
|
|
|
|
|
|
#define KVM_MAX_VCPUS 256
|
|
|
|
#define KVM_MAX_CPUCFG_REGS 21
|
|
|
|
|
|
|
|
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
LoongArch: KVM: Delay secondary mmu tlb flush until guest entry
With hardware assisted virtualization, there are two level HW mmu, one
is GVA to GPA mapping, the other is GPA to HPA mapping which is called
secondary mmu in generic. If there is page fault for secondary mmu,
there needs tlb flush operation indexed with fault GPA address and VMID.
VMID is stored at register CSR.GSTAT and will be reload or recalculated
before guest entry.
Currently CSR.GSTAT is not saved and restored during VCPU context
switch, instead it is recalculated during guest entry. So CSR.GSTAT is
effective only when a VCPU runs in guest mode, however it may not be
effective if the VCPU exits to host mode. Since register CSR.GSTAT may
be stale, it may records the VMID of the last schedule-out VCPU, rather
than the current VCPU.
Function kvm_flush_tlb_gpa() should be called with its real VMID, so
here move it to the guest entrance. Also an arch-specific request id
KVM_REQ_TLB_FLUSH_GPA is added to flush tlb for secondary mmu, and it
can be optimized if VMID is updated, since all guest tlb entries will
be invalid if VMID is updated.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-07-09 16:25:50 +08:00
|
|
|
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
|
2024-07-09 16:25:51 +08:00
|
|
|
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
|
2024-09-12 20:53:40 +08:00
|
|
|
#define KVM_REQ_PMU KVM_ARCH_REQ(2)
|
2023-10-02 10:01:20 +08:00
|
|
|
|
2024-05-06 22:00:47 +08:00
|
|
|
#define KVM_GUESTDBG_SW_BP_MASK \
|
|
|
|
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
|
|
|
|
#define KVM_GUESTDBG_VALID_MASK \
|
|
|
|
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
|
|
|
|
|
2024-07-09 16:25:51 +08:00
|
|
|
#define KVM_DIRTY_LOG_MANUAL_CAPS \
|
|
|
|
(KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET)
|
|
|
|
|
2023-10-02 10:01:20 +08:00
|
|
|
struct kvm_vm_stat {
|
|
|
|
struct kvm_vm_stat_generic generic;
|
|
|
|
u64 pages;
|
|
|
|
u64 hugepages;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct kvm_vcpu_stat {
|
|
|
|
struct kvm_vcpu_stat_generic generic;
|
|
|
|
u64 int_exits;
|
|
|
|
u64 idle_exits;
|
|
|
|
u64 cpucfg_exits;
|
|
|
|
u64 signal_exits;
|
LoongArch: KVM: Add PV IPI support on host side
On LoongArch system, IPI hw uses iocsr registers. There are one iocsr
register access on IPI sending, and two iocsr access on IPI receiving
for the IPI interrupt handler. In VM mode all iocsr accessing will cause
VM to trap into hypervisor. So with one IPI hw notification there will
be three times of trap.
In this patch PV IPI is added for VM, hypercall instruction is used for
IPI sender, and hypervisor will inject an SWI to the destination vcpu.
During the SWI interrupt handler, only CSR.ESTAT register is written to
clear irq. CSR.ESTAT register access will not trap into hypervisor, so
with PV IPI supported, there is one trap with IPI sender, and no trap
with IPI receiver, there is only one trap with IPI notification.
Also this patch adds IPI multicast support, the method is similar with
x86. With IPI multicast support, IPI notification can be sent to at
most 128 vcpus at one time. It greatly reduces the times of trapping
into hypervisor.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-05-06 22:00:47 +08:00
|
|
|
u64 hypercall_exits;
|
2025-07-21 09:26:32 +08:00
|
|
|
u64 ipi_read_exits;
|
|
|
|
u64 ipi_write_exits;
|
|
|
|
u64 eiointc_read_exits;
|
|
|
|
u64 eiointc_write_exits;
|
|
|
|
u64 pch_pic_read_exits;
|
|
|
|
u64 pch_pic_write_exits;
|
2023-10-02 10:01:20 +08:00
|
|
|
};
|
|
|
|
|
2023-12-19 10:48:27 +08:00
|
|
|
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)
|
|
|
|
#define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1)
|
2023-10-02 10:01:20 +08:00
|
|
|
struct kvm_arch_memory_slot {
|
2023-12-19 10:48:27 +08:00
|
|
|
unsigned long flags;
|
2023-10-02 10:01:20 +08:00
|
|
|
};
|
|
|
|
|
2024-09-12 20:53:40 +08:00
|
|
|
#define HOST_MAX_PMNUM 16
|
2023-10-02 10:01:20 +08:00
|
|
|
struct kvm_context {
|
|
|
|
unsigned long vpid_cache;
|
|
|
|
struct kvm_vcpu *last_vcpu;
|
2024-09-12 20:53:40 +08:00
|
|
|
/* Host PMU CSR */
|
|
|
|
u64 perf_ctrl[HOST_MAX_PMNUM];
|
|
|
|
u64 perf_cntr[HOST_MAX_PMNUM];
|
2023-10-02 10:01:20 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct kvm_world_switch {
|
|
|
|
int (*exc_entry)(void);
|
|
|
|
int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
|
|
|
unsigned long page_order;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MAX_PGTABLE_LEVELS 4
|
|
|
|
|
2024-05-06 22:00:47 +08:00
|
|
|
/*
|
|
|
|
* Physical CPUID is used for interrupt routing, there are different
|
|
|
|
* definitions about physical cpuid on different hardwares.
|
|
|
|
*
|
|
|
|
* For LOONGARCH_CSR_CPUID register, max CPUID size if 512
|
|
|
|
* For IPI hardware, max destination CPUID size 1024
|
2024-11-13 16:18:27 +08:00
|
|
|
* For eiointc interrupt controller, max destination CPUID size is 256
|
2024-05-06 22:00:47 +08:00
|
|
|
* For msgint interrupt controller, max supported CPUID size is 65536
|
|
|
|
*
|
|
|
|
* Currently max CPUID is defined as 256 for KVM hypervisor, in future
|
|
|
|
* it will be expanded to 4096, including 16 packages at most. And every
|
|
|
|
* package supports at most 256 vcpus
|
|
|
|
*/
|
|
|
|
#define KVM_MAX_PHYID 256
|
|
|
|
|
|
|
|
struct kvm_phyid_info {
|
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
bool enabled;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct kvm_phyid_map {
|
|
|
|
int max_phyid;
|
|
|
|
struct kvm_phyid_info phys_map[KVM_MAX_PHYID];
|
|
|
|
};
|
|
|
|
|
2023-10-02 10:01:20 +08:00
|
|
|
struct kvm_arch {
|
|
|
|
/* Guest physical mm */
|
|
|
|
kvm_pte_t *pgd;
|
|
|
|
unsigned long gpa_size;
|
|
|
|
unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
|
|
|
|
unsigned int pte_shifts[MAX_PGTABLE_LEVELS];
|
|
|
|
unsigned int root_level;
|
2024-05-06 22:00:47 +08:00
|
|
|
spinlock_t phyid_map_lock;
|
|
|
|
struct kvm_phyid_map *phyid_map;
|
2024-09-12 20:53:40 +08:00
|
|
|
/* Enabled PV features */
|
|
|
|
unsigned long pv_features;
|
2023-10-02 10:01:20 +08:00
|
|
|
|
|
|
|
s64 time_offset;
|
|
|
|
struct kvm_context __percpu *vmcs;
|
2024-11-13 16:18:27 +08:00
|
|
|
struct loongarch_ipi *ipi;
|
2024-11-13 16:18:27 +08:00
|
|
|
struct loongarch_eiointc *eiointc;
|
2024-11-13 16:18:27 +08:00
|
|
|
struct loongarch_pch_pic *pch_pic;
|
2023-10-02 10:01:20 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define CSR_MAX_NUMS 0x800
|
|
|
|
|
|
|
|
struct loongarch_csrs {
|
|
|
|
unsigned long csrs[CSR_MAX_NUMS];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Resume Flags */
|
|
|
|
#define RESUME_HOST 0
|
|
|
|
#define RESUME_GUEST 1
|
|
|
|
|
|
|
|
enum emulation_result {
|
|
|
|
EMULATE_DONE, /* no further processing */
|
|
|
|
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
|
|
|
|
EMULATE_DO_IOCSR, /* handle IOCSR request */
|
|
|
|
EMULATE_FAIL, /* can't emulate this instruction */
|
|
|
|
EMULATE_EXCEPT, /* A guest exception has been generated */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define KVM_LARCH_FPU (0x1 << 0)
|
2023-12-19 10:48:28 +08:00
|
|
|
#define KVM_LARCH_LSX (0x1 << 1)
|
2023-12-19 10:48:28 +08:00
|
|
|
#define KVM_LARCH_LASX (0x1 << 2)
|
2024-09-11 23:26:32 +08:00
|
|
|
#define KVM_LARCH_LBT (0x1 << 3)
|
2024-09-12 20:53:40 +08:00
|
|
|
#define KVM_LARCH_PMU (0x1 << 4)
|
|
|
|
#define KVM_LARCH_SWCSR_LATEST (0x1 << 5)
|
|
|
|
#define KVM_LARCH_HWCSR_USABLE (0x1 << 6)
|
2023-10-02 10:01:20 +08:00
|
|
|
|
2024-09-12 20:53:40 +08:00
|
|
|
#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
|
|
|
|
#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
|
|
|
|
BIT(KVM_FEATURE_STEAL_TIME) | \
|
2025-01-13 21:37:17 +08:00
|
|
|
BIT(KVM_FEATURE_USER_HCALL) | \
|
2024-09-12 20:53:40 +08:00
|
|
|
BIT(KVM_FEATURE_VIRT_EXTIOI))
|
|
|
|
|
2023-10-02 10:01:20 +08:00
|
|
|
struct kvm_vcpu_arch {
|
|
|
|
/*
|
|
|
|
* Switch pointer-to-function type to unsigned long
|
|
|
|
* for loading the value into register directly.
|
|
|
|
*/
|
|
|
|
unsigned long host_eentry;
|
|
|
|
unsigned long guest_eentry;
|
|
|
|
|
|
|
|
/* Pointers stored here for easy accessing from assembly code */
|
|
|
|
int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
|
|
|
|
2025-03-18 16:48:08 +08:00
|
|
|
/* GPA (=HVA) of PGD for secondary mmu */
|
|
|
|
unsigned long kvm_pgd;
|
|
|
|
|
2023-10-02 10:01:20 +08:00
|
|
|
/* Host registers preserved across guest mode execution */
|
|
|
|
unsigned long host_sp;
|
|
|
|
unsigned long host_tp;
|
|
|
|
unsigned long host_pgd;
|
|
|
|
|
|
|
|
/* Host CSRs are used when handling exits from guest */
|
|
|
|
unsigned long badi;
|
|
|
|
unsigned long badv;
|
|
|
|
unsigned long host_ecfg;
|
|
|
|
unsigned long host_estat;
|
|
|
|
unsigned long host_percpu;
|
|
|
|
|
|
|
|
/* GPRs */
|
|
|
|
unsigned long gprs[32];
|
|
|
|
unsigned long pc;
|
|
|
|
|
|
|
|
/* Which auxiliary state is loaded (KVM_LARCH_*) */
|
|
|
|
unsigned int aux_inuse;
|
|
|
|
|
|
|
|
/* FPU state */
|
|
|
|
struct loongarch_fpu fpu FPU_ALIGN;
|
2024-09-11 23:26:32 +08:00
|
|
|
struct loongarch_lbt lbt;
|
2023-10-02 10:01:20 +08:00
|
|
|
|
|
|
|
/* CSR state */
|
|
|
|
struct loongarch_csrs *csr;
|
|
|
|
|
2024-09-12 20:53:40 +08:00
|
|
|
/* Guest max PMU CSR id */
|
|
|
|
int max_pmu_csrid;
|
|
|
|
|
2023-10-02 10:01:20 +08:00
|
|
|
/* GPR used as IO source/target */
|
|
|
|
u32 io_gpr;
|
|
|
|
|
|
|
|
/* KVM register to control count timer */
|
|
|
|
u32 count_ctl;
|
|
|
|
struct hrtimer swtimer;
|
|
|
|
|
|
|
|
/* Bitmask of intr that are pending */
|
|
|
|
unsigned long irq_pending;
|
|
|
|
/* Bitmask of pending intr to be cleared */
|
|
|
|
unsigned long irq_clear;
|
|
|
|
|
|
|
|
/* Bitmask of exceptions that are pending */
|
|
|
|
unsigned long exception_pending;
|
|
|
|
unsigned int esubcode;
|
|
|
|
|
|
|
|
/* Cache for pages needed inside spinlock regions */
|
|
|
|
struct kvm_mmu_memory_cache mmu_page_cache;
|
|
|
|
|
|
|
|
/* vcpu's vpid */
|
|
|
|
u64 vpid;
|
LoongArch: KVM: Delay secondary mmu tlb flush until guest entry
With hardware assisted virtualization, there are two level HW mmu, one
is GVA to GPA mapping, the other is GPA to HPA mapping which is called
secondary mmu in generic. If there is page fault for secondary mmu,
there needs tlb flush operation indexed with fault GPA address and VMID.
VMID is stored at register CSR.GSTAT and will be reload or recalculated
before guest entry.
Currently CSR.GSTAT is not saved and restored during VCPU context
switch, instead it is recalculated during guest entry. So CSR.GSTAT is
effective only when a VCPU runs in guest mode, however it may not be
effective if the VCPU exits to host mode. Since register CSR.GSTAT may
be stale, it may records the VMID of the last schedule-out VCPU, rather
than the current VCPU.
Function kvm_flush_tlb_gpa() should be called with its real VMID, so
here move it to the guest entrance. Also an arch-specific request id
KVM_REQ_TLB_FLUSH_GPA is added to flush tlb for secondary mmu, and it
can be optimized if VMID is updated, since all guest tlb entries will
be invalid if VMID is updated.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-07-09 16:25:50 +08:00
|
|
|
gpa_t flush_gpa;
|
2023-10-02 10:01:20 +08:00
|
|
|
|
|
|
|
/* Frequency of stable timer in Hz */
|
|
|
|
u64 timer_mhz;
|
|
|
|
ktime_t expire;
|
|
|
|
|
|
|
|
/* Last CPU the vCPU state was loaded on */
|
|
|
|
int last_sched_cpu;
|
|
|
|
/* mp state */
|
|
|
|
struct kvm_mp_state mp_state;
|
2024-11-13 16:18:27 +08:00
|
|
|
/* ipi state */
|
|
|
|
struct ipi_state ipi_state;
|
2023-10-02 10:01:20 +08:00
|
|
|
/* cpucfg */
|
|
|
|
u32 cpucfg[KVM_MAX_CPUCFG_REGS];
|
2024-07-09 16:25:51 +08:00
|
|
|
|
|
|
|
/* paravirt steal time */
|
|
|
|
struct {
|
|
|
|
u64 guest_addr;
|
|
|
|
u64 last_steal;
|
|
|
|
struct gfn_to_hva_cache cache;
|
|
|
|
} st;
|
2023-10-02 10:01:20 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
|
|
|
|
{
|
|
|
|
return csr->csrs[reg];
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
|
|
|
|
{
|
|
|
|
csr->csrs[reg] = val;
|
|
|
|
}
|
|
|
|
|
2023-12-19 10:48:28 +08:00
|
|
|
static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
|
|
|
|
{
|
|
|
|
return arch->cpucfg[2] & CPUCFG2_FP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
|
|
|
|
{
|
|
|
|
return arch->cpucfg[2] & CPUCFG2_LSX;
|
|
|
|
}
|
|
|
|
|
2023-12-19 10:48:28 +08:00
|
|
|
static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
|
|
|
|
{
|
|
|
|
return arch->cpucfg[2] & CPUCFG2_LASX;
|
|
|
|
}
|
|
|
|
|
2024-09-11 23:26:32 +08:00
|
|
|
static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
|
|
|
|
{
|
|
|
|
return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT);
|
|
|
|
}
|
|
|
|
|
2024-09-12 20:53:40 +08:00
|
|
|
static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
|
|
|
|
{
|
|
|
|
return arch->cpucfg[6] & CPUCFG6_PMP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
|
|
|
|
{
|
|
|
|
return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
|
|
|
|
}
|
|
|
|
|
2025-03-18 16:48:08 +08:00
|
|
|
bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu);
|
|
|
|
|
2023-10-02 10:01:20 +08:00
|
|
|
/* Debug: dump vcpu state */
|
|
|
|
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
|
|
/* MMU handling */
|
|
|
|
void kvm_flush_tlb_all(void);
|
|
|
|
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
|
2025-05-20 20:20:18 +08:00
|
|
|
int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write, int ecode);
|
2023-10-02 10:01:20 +08:00
|
|
|
|
|
|
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
|
|
|
|
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
|
|
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
|
|
|
|
|
|
|
static inline void update_pc(struct kvm_vcpu_arch *arch)
|
|
|
|
{
|
|
|
|
arch->pc += 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
|
|
|
|
* @vcpu: Virtual CPU.
|
|
|
|
*
|
|
|
|
* Returns: Whether the TLBL exception was likely due to an instruction
|
|
|
|
* fetch fault rather than a data load fault.
|
|
|
|
*/
|
|
|
|
static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
|
|
|
|
{
|
|
|
|
return arch->pc == arch->badv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Misc */
|
|
|
|
static inline void kvm_arch_hardware_unsetup(void) {}
|
|
|
|
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
|
|
|
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
|
|
|
|
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
|
|
|
|
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
|
|
|
|
static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {}
|
|
|
|
void kvm_check_vpid(struct kvm_vcpu *vcpu);
|
|
|
|
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
|
|
|
|
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
|
|
|
|
void kvm_init_vmcs(struct kvm *kvm);
|
|
|
|
void kvm_exc_entry(void);
|
|
|
|
int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
|
|
|
|
|
|
|
extern unsigned long vpid_mask;
|
|
|
|
extern const unsigned long kvm_exception_size;
|
|
|
|
extern const unsigned long kvm_enter_guest_size;
|
|
|
|
extern struct kvm_world_switch *kvm_loongarch_ops;
|
|
|
|
|
|
|
|
#define SW_GCSR (1 << 0)
|
|
|
|
#define HW_GCSR (1 << 1)
|
|
|
|
#define INVALID_GCSR (1 << 2)
|
|
|
|
|
|
|
|
int get_gcsr_flag(int csr);
|
|
|
|
void set_hw_gcsr(int csr_id, unsigned long val);
|
|
|
|
|
|
|
|
#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
|