2022-11-10 19:02:45 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2021 Google LLC
|
|
|
|
* Author: Fuad Tabba <tabba@google.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ARM64_KVM_NVHE_PKVM_H__
|
|
|
|
#define __ARM64_KVM_NVHE_PKVM_H__
|
|
|
|
|
|
|
|
#include <asm/kvm_pkvm.h>
|
|
|
|
|
2022-11-10 19:02:52 +00:00
|
|
|
#include <nvhe/gfp.h>
|
|
|
|
#include <nvhe/spinlock.h>
|
|
|
|
|
2022-11-10 19:02:45 +00:00
|
|
|
/*
|
|
|
|
* Holds the relevant data for maintaining the vcpu state completely at hyp.
|
|
|
|
*/
|
|
|
|
struct pkvm_hyp_vcpu {
|
|
|
|
struct kvm_vcpu vcpu;
|
|
|
|
|
|
|
|
/* Backpointer to the host's (untrusted) vCPU instance. */
|
|
|
|
struct kvm_vcpu *host_vcpu;
|
2024-12-18 19:40:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this hyp vCPU is loaded, then this is a backpointer to the
|
|
|
|
* per-cpu pointer tracking us. Otherwise, NULL if not loaded.
|
|
|
|
*/
|
|
|
|
struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
|
2022-11-10 19:02:45 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Holds the relevant data for running a protected vm.
|
|
|
|
*/
|
|
|
|
struct pkvm_hyp_vm {
|
|
|
|
struct kvm kvm;
|
|
|
|
|
|
|
|
/* Backpointer to the host's (untrusted) KVM instance. */
|
|
|
|
struct kvm *host_kvm;
|
|
|
|
|
|
|
|
/* The guest's stage-2 page-table managed by the hypervisor. */
|
|
|
|
struct kvm_pgtable pgt;
|
2022-11-10 19:02:52 +00:00
|
|
|
struct kvm_pgtable_mm_ops mm_ops;
|
|
|
|
struct hyp_pool pool;
|
|
|
|
hyp_spinlock_t lock;
|
2022-11-10 19:02:45 +00:00
|
|
|
|
|
|
|
/* Array of the hyp vCPU structures for this VM. */
|
|
|
|
struct pkvm_hyp_vcpu *vcpus[];
|
|
|
|
};
|
|
|
|
|
2024-12-16 10:50:46 +00:00
|
|
|
extern hyp_spinlock_t vm_table_lock;
|
|
|
|
|
2022-11-10 19:02:45 +00:00
|
|
|
static inline struct pkvm_hyp_vm *
|
|
|
|
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
|
|
|
|
{
|
|
|
|
return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
|
|
|
|
}
|
|
|
|
|
2024-04-23 16:05:26 +01:00
|
|
|
static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
|
|
|
|
{
|
|
|
|
return vcpu_is_protected(&hyp_vcpu->vcpu);
|
|
|
|
}
|
|
|
|
|
2024-12-18 19:40:52 +00:00
|
|
|
static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm)
|
|
|
|
{
|
|
|
|
return kvm_vm_is_protected(&hyp_vm->kvm);
|
|
|
|
}
|
|
|
|
|
2022-11-10 19:02:45 +00:00
|
|
|
void pkvm_hyp_vm_table_init(void *tbl);
|
|
|
|
|
|
|
|
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
|
|
|
|
unsigned long pgd_hva);
|
|
|
|
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
|
|
|
|
unsigned long vcpu_hva);
|
|
|
|
int __pkvm_teardown_vm(pkvm_handle_t handle);
|
|
|
|
|
2022-11-10 19:02:59 +00:00
|
|
|
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
|
|
|
|
unsigned int vcpu_idx);
|
|
|
|
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
|
2024-12-18 19:40:50 +00:00
|
|
|
struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
|
2022-11-10 19:02:59 +00:00
|
|
|
|
2024-12-18 19:40:49 +00:00
|
|
|
struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
|
2024-12-18 19:40:52 +00:00
|
|
|
struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle);
|
2024-12-18 19:40:49 +00:00
|
|
|
void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
|
|
|
|
|
2024-12-16 10:50:50 +00:00
|
|
|
bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
|
|
|
|
bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
|
|
|
|
void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
|
|
|
|
int kvm_check_pvm_sysreg_table(void);
|
|
|
|
|
2022-11-10 19:02:45 +00:00
|
|
|
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
|