2023-02-09 17:58:05 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __ARM64_KVM_NESTED_H
|
|
|
|
#define __ARM64_KVM_NESTED_H
|
|
|
|
|
2016-12-25 10:49:48 -05:00
|
|
|
#include <linux/bitfield.h>
|
2023-02-09 17:58:05 +00:00
|
|
|
#include <linux/kvm_host.h>
|
2016-12-25 10:49:48 -05:00
|
|
|
#include <asm/kvm_emulate.h>
|
2024-06-14 15:45:48 +01:00
|
|
|
#include <asm/kvm_pgtable.h>
|
2023-02-09 17:58:05 +00:00
|
|
|
|
|
|
|
static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
|
|
|
|
cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
|
2023-09-20 19:50:36 +00:00
|
|
|
vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
|
2023-02-09 17:58:05 +00:00
|
|
|
}
|
|
|
|
|
2016-12-25 10:49:48 -05:00
|
|
|
/* Translation helpers from non-VHE EL2 to EL1 */
|
|
|
|
static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)
|
|
|
|
{
|
|
|
|
return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
|
|
|
|
{
|
|
|
|
return TCR_EPD1_MASK | /* disable TTBR1_EL1 */
|
|
|
|
((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) |
|
|
|
|
tcr_el2_ps_to_tcr_el1_ips(tcr) |
|
|
|
|
(tcr & TCR_EL2_TG0_MASK) |
|
|
|
|
(tcr & TCR_EL2_ORGN0_MASK) |
|
|
|
|
(tcr & TCR_EL2_IRGN0_MASK) |
|
|
|
|
(tcr & TCR_EL2_T0SZ_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
|
|
|
|
{
|
2024-12-19 17:33:50 +00:00
|
|
|
u64 cpacr_el1 = CPACR_EL1_RES1;
|
2016-12-25 10:49:48 -05:00
|
|
|
|
|
|
|
if (cptr_el2 & CPTR_EL2_TTA)
|
2024-12-19 17:33:50 +00:00
|
|
|
cpacr_el1 |= CPACR_EL1_TTA;
|
2016-12-25 10:49:48 -05:00
|
|
|
if (!(cptr_el2 & CPTR_EL2_TFP))
|
2024-12-19 17:33:50 +00:00
|
|
|
cpacr_el1 |= CPACR_EL1_FPEN;
|
2016-12-25 10:49:48 -05:00
|
|
|
if (!(cptr_el2 & CPTR_EL2_TZ))
|
2024-12-19 17:33:50 +00:00
|
|
|
cpacr_el1 |= CPACR_EL1_ZEN;
|
2016-12-25 10:49:48 -05:00
|
|
|
|
2024-06-20 16:46:49 +00:00
|
|
|
cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
|
|
|
|
|
2016-12-25 10:49:48 -05:00
|
|
|
return cpacr_el1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val)
|
|
|
|
{
|
|
|
|
/* Only preserve the minimal set of bits we support */
|
|
|
|
val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA |
|
|
|
|
SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE);
|
|
|
|
val |= SCTLR_EL1_RES1;
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
|
|
|
|
{
|
|
|
|
/* Clear the ASID field */
|
|
|
|
return ttbr0 & ~GENMASK_ULL(63, 48);
|
|
|
|
}
|
|
|
|
|
2024-04-19 11:29:26 +01:00
|
|
|
extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
|
2024-12-19 14:41:13 -08:00
|
|
|
extern bool forward_debug_exception(struct kvm_vcpu *vcpu);
|
2024-06-14 15:45:37 +01:00
|
|
|
extern void kvm_init_nested(struct kvm *kvm);
|
|
|
|
extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
|
|
|
|
extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
|
|
|
|
extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
|
2024-06-14 15:45:43 +01:00
|
|
|
|
|
|
|
union tlbi_info;
|
|
|
|
|
|
|
|
extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
|
|
|
|
const union tlbi_info *info,
|
|
|
|
void (*)(struct kvm_s2_mmu *,
|
|
|
|
const union tlbi_info *));
|
2024-06-14 15:45:37 +01:00
|
|
|
extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
|
|
|
|
extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
|
KVM: arm64: nv: Add trap forwarding infrastructure
A significant part of what a NV hypervisor needs to do is to decide
whether a trap from a L2+ guest has to be forwarded to a L1 guest
or handled locally. This is done by checking for the trap bits that
the guest hypervisor has set and acting accordingly, as described by
the architecture.
A previous approach was to sprinkle a bunch of checks in all the
system register accessors, but this is pretty error prone and doesn't
help getting an overview of what is happening.
Instead, implement a set of global tables that describe a trap bit,
combinations of trap bits, behaviours on trap, and what bits must
be evaluated on a system register trap.
Although this is painful to describe, this allows to specify each
and every control bit in a static manner. To make it efficient,
the table is inserted in an xarray that is global to the system,
and checked each time we trap a system register while running
a L2 guest.
Add the basic infrastructure for now, while additional patches will
implement configuration registers.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Jing Zhang <jingzhangos@google.com>
Reviewed-by: Miguel Luis <miguel.luis@oracle.com>
Link: https://lore.kernel.org/r/20230815183903.2735724-15-maz@kernel.org
2023-08-15 19:38:48 +01:00
|
|
|
|
2024-10-07 23:30:27 +00:00
|
|
|
extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu);
|
2025-07-08 10:25:11 -07:00
|
|
|
extern void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu);
|
|
|
|
extern void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu);
|
2024-10-07 23:30:27 +00:00
|
|
|
|
2024-06-14 15:45:38 +01:00
|
|
|
struct kvm_s2_trans {
|
|
|
|
phys_addr_t output;
|
|
|
|
unsigned long block_size;
|
|
|
|
bool writable;
|
|
|
|
bool readable;
|
|
|
|
int level;
|
|
|
|
u32 esr;
|
2024-06-21 14:59:36 +01:00
|
|
|
u64 desc;
|
2024-06-14 15:45:38 +01:00
|
|
|
};
|
|
|
|
|
2024-06-14 15:45:39 +01:00
|
|
|
static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
|
|
|
|
{
|
|
|
|
return trans->output;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
|
|
|
|
{
|
|
|
|
return trans->block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
|
|
|
|
{
|
|
|
|
return trans->esr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
|
|
|
|
{
|
|
|
|
return trans->readable;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
|
|
|
|
{
|
|
|
|
return trans->writable;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
|
|
|
|
{
|
2024-06-21 14:59:36 +01:00
|
|
|
return !(trans->desc & BIT(54));
|
2024-06-14 15:45:39 +01:00
|
|
|
}
|
|
|
|
|
2024-06-14 15:45:38 +01:00
|
|
|
extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
|
|
|
|
struct kvm_s2_trans *result);
|
2024-06-14 15:45:39 +01:00
|
|
|
extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_s2_trans *trans);
|
|
|
|
extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
|
2024-06-14 15:45:40 +01:00
|
|
|
extern void kvm_nested_s2_wp(struct kvm *kvm);
|
2024-10-07 23:30:26 +00:00
|
|
|
extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block);
|
2024-06-14 15:45:40 +01:00
|
|
|
extern void kvm_nested_s2_flush(struct kvm *kvm);
|
2024-06-14 15:45:38 +01:00
|
|
|
|
2024-06-14 15:45:47 +01:00
|
|
|
unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
|
|
|
|
|
2024-06-14 15:45:42 +01:00
|
|
|
static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = vpcu->kvm;
|
|
|
|
u8 CRm = sys_reg_CRm(instr);
|
|
|
|
|
|
|
|
if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
|
|
|
|
sys_reg_Op1(instr) == TLBI_Op1_EL1))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
|
|
|
|
(sys_reg_CRn(instr) == TLBI_CRn_nXS &&
|
|
|
|
kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CRm == TLBI_CRm_nROS &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
|
|
|
|
CRm == TLBI_CRm_RNS) &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = vpcu->kvm;
|
|
|
|
u8 CRm = sys_reg_CRm(instr);
|
|
|
|
|
|
|
|
if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
|
|
|
|
sys_reg_Op1(instr) == TLBI_Op1_EL2))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
|
|
|
|
(sys_reg_CRn(instr) == TLBI_CRn_nXS &&
|
|
|
|
kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CRm == TLBI_CRm_nROS &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
|
|
|
|
CRm == TLBI_CRm_RNS) &&
|
|
|
|
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2025-01-12 16:50:29 +00:00
|
|
|
int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu);
|
2025-02-20 13:49:02 +00:00
|
|
|
u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val);
|
2023-02-09 17:58:19 +00:00
|
|
|
|
2024-04-19 11:29:32 +01:00
|
|
|
#ifdef CONFIG_ARM64_PTR_AUTH
|
|
|
|
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
|
|
|
|
#else
|
|
|
|
static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
|
|
|
|
{
|
|
|
|
/* We really should never execute this... */
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
*elr = 0xbad9acc0debadbad;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2024-06-14 15:45:48 +01:00
|
|
|
#define KVM_NV_GUEST_MAP_SZ (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0)
|
|
|
|
|
|
|
|
static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
|
|
|
|
{
|
|
|
|
return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
|
|
|
|
}
|
|
|
|
|
2024-08-10 18:42:41 +01:00
|
|
|
/* Adjust alignment for the contiguous bit as per StageOA() */
|
|
|
|
#define contiguous_bit_shift(d, wi, l) \
|
|
|
|
({ \
|
|
|
|
u8 shift = 0; \
|
|
|
|
\
|
|
|
|
if ((d) & PTE_CONT) { \
|
|
|
|
switch (BIT((wi)->pgshift)) { \
|
|
|
|
case SZ_4K: \
|
|
|
|
shift = 4; \
|
|
|
|
break; \
|
|
|
|
case SZ_16K: \
|
|
|
|
shift = (l) == 2 ? 5 : 7; \
|
|
|
|
break; \
|
|
|
|
case SZ_64K: \
|
|
|
|
shift = 5; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
shift; \
|
|
|
|
})
|
|
|
|
|
2025-05-14 11:34:48 +01:00
|
|
|
static inline u64 decode_range_tlbi(u64 val, u64 *range, u16 *asid)
|
|
|
|
{
|
|
|
|
u64 base, tg, num, scale;
|
|
|
|
int shift;
|
|
|
|
|
|
|
|
tg = FIELD_GET(GENMASK(47, 46), val);
|
|
|
|
|
|
|
|
switch(tg) {
|
|
|
|
case 1:
|
|
|
|
shift = 12;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
shift = 14;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
default: /* IMPDEF: handle tg==0 as 64k */
|
|
|
|
shift = 16;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
base = (val & GENMASK(36, 0)) << shift;
|
|
|
|
|
|
|
|
if (asid)
|
|
|
|
*asid = FIELD_GET(TLBIR_ASID_MASK, val);
|
|
|
|
|
|
|
|
scale = FIELD_GET(GENMASK(45, 44), val);
|
|
|
|
num = FIELD_GET(GENMASK(43, 39), val);
|
|
|
|
*range = __TLBI_RANGE_PAGES(num, scale) << shift;
|
|
|
|
|
|
|
|
return base;
|
|
|
|
}
|
|
|
|
|
2024-06-18 10:12:15 +01:00
|
|
|
static inline unsigned int ps_to_output_size(unsigned int ps)
|
|
|
|
{
|
|
|
|
switch (ps) {
|
|
|
|
case 0: return 32;
|
|
|
|
case 1: return 36;
|
|
|
|
case 2: return 40;
|
|
|
|
case 3: return 42;
|
|
|
|
case 4: return 44;
|
|
|
|
case 5:
|
|
|
|
default:
|
|
|
|
return 48;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-05-14 11:34:46 +01:00
|
|
|
enum trans_regime {
|
|
|
|
TR_EL10,
|
|
|
|
TR_EL20,
|
|
|
|
TR_EL2,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct s1_walk_info {
|
|
|
|
u64 baddr;
|
|
|
|
enum trans_regime regime;
|
|
|
|
unsigned int max_oa_bits;
|
|
|
|
unsigned int pgshift;
|
|
|
|
unsigned int txsz;
|
|
|
|
int sl;
|
|
|
|
bool as_el0;
|
|
|
|
bool hpd;
|
|
|
|
bool e0poe;
|
|
|
|
bool poe;
|
|
|
|
bool pan;
|
|
|
|
bool be;
|
|
|
|
bool s2;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct s1_walk_result {
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
u64 desc;
|
|
|
|
u64 pa;
|
|
|
|
s8 level;
|
|
|
|
u8 APTable;
|
2025-05-14 11:34:47 +01:00
|
|
|
bool nG;
|
|
|
|
u16 asid;
|
2025-05-14 11:34:46 +01:00
|
|
|
bool UXNTable;
|
|
|
|
bool PXNTable;
|
|
|
|
bool uwxn;
|
|
|
|
bool uov;
|
|
|
|
bool ur;
|
|
|
|
bool uw;
|
|
|
|
bool ux;
|
|
|
|
bool pwxn;
|
|
|
|
bool pov;
|
|
|
|
bool pr;
|
|
|
|
bool pw;
|
|
|
|
bool px;
|
|
|
|
};
|
|
|
|
struct {
|
|
|
|
u8 fst;
|
|
|
|
bool ptw;
|
|
|
|
bool s2;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
bool failed;
|
|
|
|
};
|
|
|
|
|
|
|
|
int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
|
|
|
|
struct s1_walk_result *wr, u64 va);
|
|
|
|
|
KVM: arm64: nv: Add pseudo-TLB backing VNCR_EL2
FEAT_NV2 introduces an interesting problem for NV, as VNCR_EL2.BADDR
is a virtual address in the EL2&0 (or EL2, but we thankfully ignore
this) translation regime.
As we need to replicate such mapping in the real EL2, it means that
we need to remember that there is such a translation, and that any
TLBI affecting EL2 can possibly affect this translation.
It also means that any invalidation driven by an MMU notifier must
be able to shoot down any such mapping.
All in all, we need a data structure that represents this mapping,
and that is extremely close to a TLB. Given that we can only use
one of those per vcpu at any given time, we only allocate one.
No effort is made to keep that structure small. If we need to
start caching multiple of them, we may want to revisit that design
point. But for now, it is kept simple so that we can reason about it.
Oh, and add a braindump of how things are supposed to work, because
I will definitely page this out at some point. Yes, pun intended.
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250514103501.2225951-8-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
2025-05-14 11:34:50 +01:00
|
|
|
/* VNCR management */
|
|
|
|
int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu);
|
2025-05-14 11:34:52 +01:00
|
|
|
int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu);
|
2025-05-14 11:34:56 +01:00
|
|
|
void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val);
|
KVM: arm64: nv: Add pseudo-TLB backing VNCR_EL2
FEAT_NV2 introduces an interesting problem for NV, as VNCR_EL2.BADDR
is a virtual address in the EL2&0 (or EL2, but we thankfully ignore
this) translation regime.
As we need to replicate such mapping in the real EL2, it means that
we need to remember that there is such a translation, and that any
TLBI affecting EL2 can possibly affect this translation.
It also means that any invalidation driven by an MMU notifier must
be able to shoot down any such mapping.
All in all, we need a data structure that represents this mapping,
and that is extremely close to a TLB. Given that we can only use
one of those per vcpu at any given time, we only allocate one.
No effort is made to keep that structure small. If we need to
start caching multiple of them, we may want to revisit that design
point. But for now, it is kept simple so that we can reason about it.
Oh, and add a braindump of how things are supposed to work, because
I will definitely page this out at some point. Yes, pun intended.
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250514103501.2225951-8-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
2025-05-14 11:34:50 +01:00
|
|
|
|
KVM: arm64: nv: Handle mapping of VNCR_EL2 at EL2
Now that we can handle faults triggered through VNCR_EL2, we need
to map the corresponding page at EL2. But where, you'll ask?
Since each CPU in the system can run a vcpu, we need a per-CPU
mapping. For that, we carve a NR_CPUS range in the fixmap, giving
us a per-CPU va at which to map the guest's VNCR's page.
The mapping occurs both on vcpu load and on the back of a fault,
both generating a request that will take care of the mapping.
That mapping will also get dropped on vcpu put.
Yes, this is a bit heavy handed, but it is simple. Eventually,
we may want to have a per-VM, per-CPU mapping, which would avoid
all the TLBI overhead.
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250514103501.2225951-11-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
2025-05-14 11:34:53 +01:00
|
|
|
#define vncr_fixmap(c) \
|
|
|
|
({ \
|
|
|
|
u32 __c = (c); \
|
|
|
|
BUG_ON(__c >= NR_CPUS); \
|
|
|
|
(FIX_VNCR - __c); \
|
|
|
|
})
|
|
|
|
|
2023-02-09 17:58:05 +00:00
|
|
|
#endif /* __ARM64_KVM_NESTED_H */
|