KVM: arm64: gic-v5: Support GICv3 compat

Add support for GICv3 compat mode (FEAT_GCIE_LEGACY) which allows a
GICv5 host to run GICv3-based VMs. This change enables the
VHE/nVHE/hVHE/protected modes, but does not support nested
virtualization.

A lazy-disable approach is taken for compat mode; it is enabled on the
vgic_v3_load path but not disabled on the vgic_v3_put path. A
non-GICv3 VM, i.e., one based on GICv5, is responsible for disabling
compat mode on the corresponding vgic_v5_load path. Currently, GICv5
is not supported, and hence compat mode is not disabled again once it
is enabled, and this function is intentionally omitted from the code.

Co-authored-by: Timothy Hayes <timothy.hayes@arm.com>
Signed-off-by: Timothy Hayes <timothy.hayes@arm.com>
Signed-off-by: Sascha Bischoff <sascha.bischoff@arm.com>
Link: https://lore.kernel.org/r/20250627100847.1022515-5-sascha.bischoff@arm.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Sascha Bischoff 2025-06-27 10:09:02 +00:00 committed by Oliver Upton
parent b62f4b5dec
commit c017e49ed1
5 changed files with 72 additions and 12 deletions

View file

@ -296,12 +296,19 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
}
/*
* Prevent the guest from touching the ICC_SRE_EL1 system
* register. Note that this may not have any effect, as
* ICC_SRE_EL2.Enable being RAO/WI is a valid implementation.
* GICv5 BET0 FEAT_GCIE_LEGACY doesn't include ICC_SRE_EL2. This is due
* to be relaxed in a future spec release, at which point this in
* condition can be dropped.
*/
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
ICC_SRE_EL2);
if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) {
/*
* Prevent the guest from touching the ICC_SRE_EL1 system
* register. Note that this may not have any effect, as
* ICC_SRE_EL2.Enable being RAO/WI is a valid implementation.
*/
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
ICC_SRE_EL2);
}
/*
* If we need to trap system registers, we must write
@ -322,8 +329,14 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
}
val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
/*
* Can be dropped in the future when GICv5 spec is relaxed. See comment
* above.
*/
if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) {
val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
}
if (!cpu_if->vgic_sre) {
/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
@ -423,9 +436,19 @@ void __vgic_v3_init_lrs(void)
*/
u64 __vgic_v3_get_gic_config(void)
{
u64 val, sre = read_gicreg(ICC_SRE_EL1);
u64 val, sre;
unsigned long flags = 0;
/*
* In compat mode, we cannot access ICC_SRE_EL1 at any EL
* other than EL1 itself; just return the
* ICH_VTR_EL2. ICC_IDR0_EL1 is only implemented on a GICv5
* system, so we first check if we have GICv5 support.
*/
if (cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
return read_gicreg(ICH_VTR_EL2);
sre = read_gicreg(ICC_SRE_EL1);
/*
* To check whether we have a MMIO-based (GICv2 compatible)
* CPU interface, we need to disable the system register
@ -471,6 +494,16 @@ u64 __vgic_v3_get_gic_config(void)
return val;
}
static void __vgic_v3_compat_mode_enable(void)
{
if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
return;
sysreg_clear_set_s(SYS_ICH_VCTLR_EL2, 0, ICH_VCTLR_EL2_V3);
/* Wait for V3 to become enabled */
isb();
}
static u64 __vgic_v3_read_vmcr(void)
{
return read_gicreg(ICH_VMCR_EL2);
@ -490,6 +523,8 @@ void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
{
__vgic_v3_compat_mode_enable();
/*
* If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
* is dependent on ICC_SRE_EL1.SRE, and we have to perform the

View file

@ -1811,7 +1811,7 @@ static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
}
if (kvm_vgic_global_state.type == VGIC_V3) {
if (vgic_is_v3(vcpu->kvm)) {
val &= ~ID_AA64PFR0_EL1_GIC_MASK;
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
}
@ -1953,6 +1953,14 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
return -EINVAL;
/*
* If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then
* we support GICv3. Fail attempts to do anything but set that to IMP.
*/
if (vgic_is_v3_compat(vcpu->kvm) &&
FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP)
return -EINVAL;
return set_id_reg(vcpu, rd, user_val);
}

View file

@ -674,10 +674,12 @@ void kvm_vgic_init_cpu_hardware(void)
* We want to make sure the list registers start out clear so that we
* only have the program the used registers.
*/
if (kvm_vgic_global_state.type == VGIC_V2)
if (kvm_vgic_global_state.type == VGIC_V2) {
vgic_v2_init_lrs();
else
} else if (kvm_vgic_global_state.type == VGIC_V3 ||
kvm_vgic_global_state.has_gcie_v3_compat) {
kvm_call_hyp(__vgic_v3_init_lrs);
}
}
/**

View file

@ -389,6 +389,17 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu);
void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu);
void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu);
static inline bool vgic_is_v3_compat(struct kvm *kvm)
{
return cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF) &&
kvm_vgic_global_state.has_gcie_v3_compat;
}
static inline bool vgic_is_v3(struct kvm *kvm)
{
return kvm_vgic_global_state.type == VGIC_V3 || vgic_is_v3_compat(kvm);
}
int vgic_its_debug_init(struct kvm_device *dev);
void vgic_its_debug_destroy(struct kvm_device *dev);

View file

@ -38,6 +38,7 @@
enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */
VGIC_V3, /* New fancy GICv3 */
VGIC_V5, /* Newer, fancier GICv5 */
};
/* same for all guests, as depending only on the _host's_ GIC model */
@ -77,9 +78,12 @@ struct vgic_global {
/* Pseudo GICv3 from outer space */
bool no_hw_deactivation;
/* GIC system register CPU interface */
/* GICv3 system register CPU interface */
struct static_key_false gicv3_cpuif;
/* GICv3 compat mode on a GICv5 host */
bool has_gcie_v3_compat;
u32 ich_vtr_el2;
};