mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Currently, all kvm_riscv_hfence_xyz() APIs assume VMID to be the host VMID of the Guest/VM which resticts use of these APIs only for host TLB maintenance. Let's allow passing VMID as a parameter to all kvm_riscv_hfence_xyz() APIs so that they can be re-used for nested virtualization related TLB maintenance. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Tested-by: Atish Patra <atishp@rivosinc.com> Reviewed-by: Nutty Liu <liujingqi@lanxincomputing.com> Link: https://lore.kernel.org/r/20250618113532.471448-13-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
84 lines
2.7 KiB
C
84 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (c) 2025 Ventana Micro Systems Inc.
|
|
*/
|
|
|
|
#ifndef __RISCV_KVM_TLB_H_
|
|
#define __RISCV_KVM_TLB_H_
|
|
|
|
#include <linux/kvm_types.h>
|
|
|
|
enum kvm_riscv_hfence_type {
|
|
KVM_RISCV_HFENCE_UNKNOWN = 0,
|
|
KVM_RISCV_HFENCE_GVMA_VMID_GPA,
|
|
KVM_RISCV_HFENCE_GVMA_VMID_ALL,
|
|
KVM_RISCV_HFENCE_VVMA_ASID_GVA,
|
|
KVM_RISCV_HFENCE_VVMA_ASID_ALL,
|
|
KVM_RISCV_HFENCE_VVMA_GVA,
|
|
KVM_RISCV_HFENCE_VVMA_ALL
|
|
};
|
|
|
|
struct kvm_riscv_hfence {
|
|
enum kvm_riscv_hfence_type type;
|
|
unsigned long asid;
|
|
unsigned long vmid;
|
|
unsigned long order;
|
|
gpa_t addr;
|
|
gpa_t size;
|
|
};
|
|
|
|
#define KVM_RISCV_VCPU_MAX_HFENCE 64
|
|
|
|
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
|
|
|
|
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
|
|
gpa_t gpa, gpa_t gpsz,
|
|
unsigned long order);
|
|
void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
|
|
void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
|
|
unsigned long order);
|
|
void kvm_riscv_local_hfence_gvma_all(void);
|
|
void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
|
|
unsigned long asid,
|
|
unsigned long gva,
|
|
unsigned long gvsz,
|
|
unsigned long order);
|
|
void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
|
|
unsigned long asid);
|
|
void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
|
|
unsigned long gva, unsigned long gvsz,
|
|
unsigned long order);
|
|
void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
|
|
|
|
void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
|
|
void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
|
|
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_riscv_fence_i(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask);
|
|
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
gpa_t gpa, gpa_t gpsz,
|
|
unsigned long order, unsigned long vmid);
|
|
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long vmid);
|
|
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long gva, unsigned long gvsz,
|
|
unsigned long order, unsigned long asid,
|
|
unsigned long vmid);
|
|
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long asid, unsigned long vmid);
|
|
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long gva, unsigned long gvsz,
|
|
unsigned long order, unsigned long vmid);
|
|
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long vmid);
|
|
|
|
#endif
|