mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
KVM/arm64 fixes for 6.16, take #2
- Rework of system register accessors for system registers that are directly writen to memory, so that sanitisation of the in-memory value happens at the correct time (after the read, or before the write). For convenience, RMW-style accessors are also provided. - Multiple fixes for the so-called "arch-timer-edge-cases' selftest, which was always broken. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmhCs3IACgkQI9DQutE9 ekMxlBAApd03crgHQy8V7I997D9TA/Ph4PkUOZOg091JAABkOZBCLd3H8hbe7Va6 2XPD7IeTQUEP/8Xwc0+sWF3X4bIqU3PlxZ/TI4IgNDxazz2l+1LTHCrWrP47VXMr j5czEzWkSX/59LFc0jL3T0VxKhN9fI+aSE9UZCCXc0BGyLIlRNclO4ho87xkgbxM AuhM0VslXtAZBF9DBrtOQ1EodI5Cc7vH38id/8SCL9f74rKln4UViSuPhRQxgzgy 7T523OERyAINJ8e6UNd0Tg5GFYdj2bMeivnTleaFFxmCH+tAKYtSTV8d6n0fzsOF 1D+6uU93v4ky3DWwCvmEXLzijH6pRrLjMLsC4Sx1kFCPe05Zaui/g65n4REflZm6 0xZ2bnTsZP1/MYrZya/XpXipF0EGITqsOuKpHgEO495TIgmAZKev+GIp3NDooSYk dZWN0U0ctePV2+WFoxNyN+r9nrg/xSujnyU0k3kMmRcfRHcATzZG6jYOj8CrLdNO jWZ56XhghiJj01B1IjVskuSyTwcoRMH4h//C7oAAFQoOuZtEgduGeZUQxz7EoBxX /I4Cg4+9P/m310gjdEVMGPdvrFQgweJc8K3+mT3WGRA8AT4Nhi6pxZxnzWeABuUD 4HpVruNxygMwODilk3YruJ/yat7FqTBTdRZt4w+cwpBTi8VPPqs= =OMHL -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-6.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 fixes for 6.16, take #2 - Rework of system register accessors for system registers that are directly writen to memory, so that sanitisation of the in-memory value happens at the correct time (after the read, or before the write). For convenience, RMW-style accessors are also provided. - Multiple fixes for the so-called "arch-timer-edge-cases' selftest, which was always broken.
This commit is contained in:
commit
ce360c2bfd
16 changed files with 151 additions and 116 deletions
|
@ -1107,14 +1107,36 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
|
|||
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
|
||||
|
||||
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
|
||||
#define __vcpu_sys_reg(v,r) \
|
||||
(*({ \
|
||||
|
||||
#define __vcpu_assign_sys_reg(v, r, val) \
|
||||
do { \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
|
||||
u64 __v = (val); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
|
||||
*__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\
|
||||
__r; \
|
||||
}))
|
||||
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
|
||||
\
|
||||
ctxt_sys_reg(ctxt, (r)) = __v; \
|
||||
} while (0)
|
||||
|
||||
#define __vcpu_rmw_sys_reg(v, r, op, val) \
|
||||
do { \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
u64 __v = ctxt_sys_reg(ctxt, (r)); \
|
||||
__v op (val); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
|
||||
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
|
||||
\
|
||||
ctxt_sys_reg(ctxt, (r)) = __v; \
|
||||
} while (0)
|
||||
|
||||
#define __vcpu_sys_reg(v,r) \
|
||||
({ \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
u64 __v = ctxt_sys_reg(ctxt, (r)); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
|
||||
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
|
||||
__v; \
|
||||
})
|
||||
|
||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
|
||||
|
|
|
@ -108,16 +108,16 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
|||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTV_CTL_EL0, ctl);
|
||||
break;
|
||||
case TIMER_PTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTP_CTL_EL0, ctl);
|
||||
break;
|
||||
case TIMER_HVTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHV_CTL_EL2, ctl);
|
||||
break;
|
||||
case TIMER_HPTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHP_CTL_EL2, ctl);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
|
@ -130,16 +130,16 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
|||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTV_CVAL_EL0, cval);
|
||||
break;
|
||||
case TIMER_PTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, cval);
|
||||
break;
|
||||
case TIMER_HVTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHV_CVAL_EL2, cval);
|
||||
break;
|
||||
case TIMER_HPTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, cval);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
|
@ -1036,7 +1036,7 @@ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
|
|||
if (vcpu_has_nv(vcpu)) {
|
||||
struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
|
||||
|
||||
offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
|
||||
offs->vcpu_offset = __ctxt_sys_reg(&vcpu->arch.ctxt, CNTVOFF_EL2);
|
||||
offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
|
||||
}
|
||||
|
||||
|
|
|
@ -216,9 +216,9 @@ void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
|
|||
void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
if (val & OSLAR_EL1_OSLK)
|
||||
__vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK;
|
||||
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
|
||||
else
|
||||
__vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK;
|
||||
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
|
||||
|
||||
preempt_disable();
|
||||
kvm_arch_vcpu_put(vcpu);
|
||||
|
|
|
@ -103,8 +103,8 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
|
|||
fp_state.sve_state = vcpu->arch.sve_state;
|
||||
fp_state.sve_vl = vcpu->arch.sve_max_vl;
|
||||
fp_state.sme_state = NULL;
|
||||
fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR);
|
||||
fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR);
|
||||
fp_state.svcr = __ctxt_sys_reg(&vcpu->arch.ctxt, SVCR);
|
||||
fp_state.fpmr = __ctxt_sys_reg(&vcpu->arch.ctxt, FPMR);
|
||||
fp_state.fp_type = &vcpu->arch.fp_type;
|
||||
|
||||
if (vcpu_has_sve(vcpu))
|
||||
|
|
|
@ -37,7 +37,7 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
|||
if (unlikely(vcpu_has_nv(vcpu)))
|
||||
vcpu_write_sys_reg(vcpu, val, reg);
|
||||
else if (!__vcpu_write_sys_reg_to_cpu(val, reg))
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
}
|
||||
|
||||
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
||||
|
@ -51,7 +51,7 @@ static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
|||
} else if (has_vhe()) {
|
||||
write_sysreg_el1(val, SYS_SPSR);
|
||||
} else {
|
||||
__vcpu_sys_reg(vcpu, SPSR_EL1) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
|
|||
if (!vcpu_el1_is_32bit(vcpu))
|
||||
return;
|
||||
|
||||
__vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
|
||||
__vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2));
|
||||
}
|
||||
|
||||
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
|
||||
|
@ -456,7 +456,7 @@ static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
if (vcpu_has_sve(vcpu)) {
|
||||
zcr_el1 = read_sysreg_el1(SYS_ZCR);
|
||||
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
|
||||
__vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
|
||||
|
||||
/*
|
||||
* The guest's state is always saved using the guest's max VL.
|
||||
|
|
|
@ -307,11 +307,11 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
|
||||
vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
|
||||
|
||||
__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
|
||||
__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
|
||||
__vcpu_assign_sys_reg(vcpu, DACR32_EL2, read_sysreg(dacr32_el2));
|
||||
__vcpu_assign_sys_reg(vcpu, IFSR32_EL2, read_sysreg(ifsr32_el2));
|
||||
|
||||
if (has_vhe() || kvm_debug_regs_in_use(vcpu))
|
||||
__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
|
||||
__vcpu_assign_sys_reg(vcpu, DBGVCR32_EL2, read_sysreg(dbgvcr32_el2));
|
||||
}
|
||||
|
||||
static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -26,7 +26,7 @@ void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
|
|||
|
||||
static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
|
||||
__vcpu_assign_sys_reg(vcpu, ZCR_EL1, read_sysreg_el1(SYS_ZCR));
|
||||
/*
|
||||
* On saving/restoring guest sve state, always use the maximum VL for
|
||||
* the guest. The layout of the data when saving the sve state depends
|
||||
|
@ -79,7 +79,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
|
|||
|
||||
has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
|
||||
if (has_fpmr)
|
||||
__vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
|
||||
__vcpu_assign_sys_reg(vcpu, FPMR, read_sysreg_s(SYS_FPMR));
|
||||
|
||||
if (system_supports_sve())
|
||||
__hyp_sve_restore_host();
|
||||
|
|
|
@ -223,9 +223,9 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
val = read_sysreg_el0(SYS_CNTP_CVAL);
|
||||
if (map.direct_ptimer == vcpu_ptimer(vcpu))
|
||||
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, val);
|
||||
if (map.direct_ptimer == vcpu_hptimer(vcpu))
|
||||
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, val);
|
||||
|
||||
offset = read_sysreg_s(SYS_CNTPOFF_EL2);
|
||||
|
||||
|
|
|
@ -18,17 +18,17 @@
|
|||
static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* These registers are common with EL1 */
|
||||
__vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1);
|
||||
__vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1);
|
||||
__vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1));
|
||||
__vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1));
|
||||
|
||||
__vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR);
|
||||
__vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0);
|
||||
__vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1);
|
||||
__vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR);
|
||||
__vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR);
|
||||
__vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR);
|
||||
__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR);
|
||||
__vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR);
|
||||
__vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR));
|
||||
__vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0));
|
||||
__vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1));
|
||||
__vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR));
|
||||
__vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR));
|
||||
__vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR));
|
||||
__vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR));
|
||||
__vcpu_assign_sys_reg(vcpu, AMAIR_EL2, read_sysreg_el1(SYS_AMAIR));
|
||||
|
||||
/*
|
||||
* In VHE mode those registers are compatible between EL1 and EL2,
|
||||
|
@ -46,21 +46,21 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
|
|||
* are always trapped, ensuring that the in-memory
|
||||
* copy is always up-to-date. A small blessing...
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR);
|
||||
__vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0);
|
||||
__vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1);
|
||||
__vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR);
|
||||
__vcpu_assign_sys_reg(vcpu, SCTLR_EL2, read_sysreg_el1(SYS_SCTLR));
|
||||
__vcpu_assign_sys_reg(vcpu, TTBR0_EL2, read_sysreg_el1(SYS_TTBR0));
|
||||
__vcpu_assign_sys_reg(vcpu, TTBR1_EL2, read_sysreg_el1(SYS_TTBR1));
|
||||
__vcpu_assign_sys_reg(vcpu, TCR_EL2, read_sysreg_el1(SYS_TCR));
|
||||
|
||||
if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
|
||||
__vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2);
|
||||
__vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2));
|
||||
|
||||
if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
|
||||
__vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0);
|
||||
__vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR);
|
||||
__vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0));
|
||||
__vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR));
|
||||
}
|
||||
|
||||
if (ctxt_has_s1poe(&vcpu->arch.ctxt))
|
||||
__vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR);
|
||||
__vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -70,13 +70,13 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
val = read_sysreg_el1(SYS_CNTKCTL);
|
||||
val &= CNTKCTL_VALID_BITS;
|
||||
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS;
|
||||
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val;
|
||||
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS);
|
||||
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val);
|
||||
}
|
||||
|
||||
__vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1);
|
||||
__vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR);
|
||||
__vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR);
|
||||
__vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
|
||||
__vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR));
|
||||
__vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR));
|
||||
}
|
||||
|
||||
static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -1757,7 +1757,7 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
|
|||
|
||||
out:
|
||||
for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
|
||||
(void)__vcpu_sys_reg(vcpu, sr);
|
||||
__vcpu_rmw_sys_reg(vcpu, sr, |=, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
|
|||
val |= lower_32_bits(val);
|
||||
}
|
||||
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
|
||||
/* Recreate the perf event to reflect the updated sample_period */
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
|
@ -204,7 +204,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
|||
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
||||
{
|
||||
kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
|
||||
__vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val);
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
}
|
||||
|
||||
|
@ -239,7 +239,7 @@ static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
|
|||
|
||||
reg = counter_index_to_reg(pmc->idx);
|
||||
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
|
||||
kvm_pmu_release_perf_event(pmc);
|
||||
}
|
||||
|
@ -503,14 +503,14 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
|
|||
reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
|
||||
if (!kvm_pmc_is_64bit(pmc))
|
||||
reg = lower_32_bits(reg);
|
||||
__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
|
||||
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg);
|
||||
|
||||
/* No overflow? move on */
|
||||
if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
|
||||
continue;
|
||||
|
||||
/* Mark overflow */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i));
|
||||
|
||||
if (kvm_pmu_counter_can_chain(pmc))
|
||||
kvm_pmu_counter_increment(vcpu, BIT(i + 1),
|
||||
|
@ -556,7 +556,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
|||
perf_event->attr.sample_period = period;
|
||||
perf_event->hw.sample_period = period;
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx));
|
||||
|
||||
if (kvm_pmu_counter_can_chain(pmc))
|
||||
kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
|
||||
|
@ -602,7 +602,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
/* The reset bits don't indicate any state, and shouldn't be saved. */
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
|
||||
__vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P)));
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_C)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
@ -779,7 +779,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
|||
u64 reg;
|
||||
|
||||
reg = counter_index_to_evtreg(pmc->idx);
|
||||
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
|
||||
__vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm)));
|
||||
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
}
|
||||
|
@ -914,9 +914,9 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask);
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, mask);
|
||||
}
|
||||
|
@ -1038,7 +1038,7 @@ static void kvm_arm_set_nr_counters(struct kvm *kvm, unsigned int nr)
|
|||
u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
val &= ~MDCR_EL2_HPMN;
|
||||
val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters);
|
||||
__vcpu_sys_reg(vcpu, MDCR_EL2) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -228,7 +228,7 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
|||
* to reverse-translate virtual EL2 system registers for a
|
||||
* non-VHE guest hypervisor.
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
|
||||
switch (reg) {
|
||||
case CNTHCTL_EL2:
|
||||
|
@ -263,7 +263,7 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
|||
return;
|
||||
|
||||
memory_write:
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
}
|
||||
|
||||
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
|
||||
|
@ -605,7 +605,7 @@ static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
|
||||
return -EINVAL;
|
||||
|
||||
__vcpu_sys_reg(vcpu, rd->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -791,7 +791,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
mask |= GENMASK(n - 1, 0);
|
||||
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= mask;
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -799,7 +799,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -811,7 +811,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
return 0;
|
||||
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -819,7 +819,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK;
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -835,7 +835,7 @@ static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
* The value of PMCR.N field is included when the
|
||||
* vCPU register is read via kvm_vcpu_read_pmcr().
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, r->reg) = pmcr;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -907,7 +907,7 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
return false;
|
||||
|
||||
if (p->is_write)
|
||||
__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
|
||||
__vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
|
||||
else
|
||||
/* return PMSELR.SEL field */
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
|
||||
|
@ -1076,7 +1076,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
|
|||
{
|
||||
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val & mask;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return 0;
|
||||
|
@ -1103,10 +1103,10 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
val = p->regval & mask;
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMCNTENSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
|
||||
else
|
||||
/* accessing PMCNTENCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, val);
|
||||
} else {
|
||||
|
@ -1129,10 +1129,10 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMINTENSET_EL1 */
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
|
||||
else
|
||||
/* accessing PMINTENCLR_EL1 */
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
|
||||
}
|
||||
|
@ -1151,10 +1151,10 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
if (p->is_write) {
|
||||
if (r->CRm & 0x2)
|
||||
/* accessing PMOVSSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
|
||||
else
|
||||
/* accessing PMOVSCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
|
||||
}
|
||||
|
@ -1185,8 +1185,8 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
if (!vcpu_mode_priv(vcpu))
|
||||
return undef_access(vcpu, p, r);
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
|
||||
p->regval & ARMV8_PMU_USERENR_MASK;
|
||||
__vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
|
||||
(p->regval & ARMV8_PMU_USERENR_MASK));
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
|
||||
& ARMV8_PMU_USERENR_MASK;
|
||||
|
@ -1237,7 +1237,7 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
|||
if (!kvm_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, val);
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return 0;
|
||||
|
@ -2213,7 +2213,7 @@ static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
if (kvm_has_mte(vcpu->kvm))
|
||||
clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = clidr;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, clidr);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -2227,7 +2227,7 @@ static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
|
||||
return -EINVAL;
|
||||
|
||||
__vcpu_sys_reg(vcpu, rd->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2404,7 +2404,7 @@ static bool access_sp_el1(struct kvm_vcpu *vcpu,
|
|||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
__vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
|
||||
__vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
|
||||
else
|
||||
p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
|
||||
|
||||
|
@ -2428,7 +2428,7 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
|
|||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
__vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
|
||||
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
|
||||
else
|
||||
p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
|
||||
|
||||
|
@ -2440,7 +2440,7 @@ static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
|
|||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
__vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
|
||||
else
|
||||
p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
|
||||
|
||||
|
@ -2454,7 +2454,9 @@ static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
|
||||
val |= HCR_E2H;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, val);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
|
@ -2625,7 +2627,7 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
|
|||
u64_replace_bits(val, hpmn, MDCR_EL2_HPMN);
|
||||
}
|
||||
|
||||
__vcpu_sys_reg(vcpu, MDCR_EL2) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
|
||||
|
||||
/*
|
||||
* Request a reload of the PMU to enable/disable the counters
|
||||
|
@ -2754,7 +2756,7 @@ static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
|||
|
||||
static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
__vcpu_sys_reg(vcpu, r->reg) = vcpu->kvm->arch.nr_pmu_counters;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
|
||||
return vcpu->kvm->arch.nr_pmu_counters;
|
||||
}
|
||||
|
||||
|
@ -4790,7 +4792,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
|||
r->reset(vcpu, r);
|
||||
|
||||
if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
|
||||
(void)__vcpu_sys_reg(vcpu, r->reg);
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
|
||||
}
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
|
||||
|
@ -5012,7 +5014,7 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
|||
if (r->set_user) {
|
||||
ret = (r->set_user)(vcpu, r, val);
|
||||
} else {
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, val);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ static inline u64 reset_unknown(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= NR_SYS_REGS);
|
||||
__vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, 0x1de7ec7edbadc0deULL);
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ static inline u64 reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= NR_SYS_REGS);
|
||||
__vcpu_sys_reg(vcpu, r->reg) = r->val;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, r->val);
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
|
|
|
@ -356,12 +356,12 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
|
|||
val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
|
||||
val &= ~ICH_HCR_EL2_EOIcount_MASK;
|
||||
val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK);
|
||||
__vcpu_sys_reg(vcpu, ICH_HCR_EL2) = val;
|
||||
__vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = s_cpu_if->vgic_vmcr;
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_HCR_EL2, val);
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_VMCR_EL2, s_cpu_if->vgic_vmcr);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
__vcpu_sys_reg(vcpu, ICH_AP0RN(i)) = s_cpu_if->vgic_ap0r[i];
|
||||
__vcpu_sys_reg(vcpu, ICH_AP1RN(i)) = s_cpu_if->vgic_ap1r[i];
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_AP0RN(i), s_cpu_if->vgic_ap0r[i]);
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_AP1RN(i), s_cpu_if->vgic_ap1r[i]);
|
||||
}
|
||||
|
||||
for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
|
||||
|
@ -370,7 +370,7 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
|
|||
val &= ~ICH_LR_STATE;
|
||||
val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE;
|
||||
|
||||
__vcpu_sys_reg(vcpu, ICH_LRN(i)) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_LRN(i), val);
|
||||
s_cpu_if->vgic_lr[i] = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,8 @@
|
|||
#include "gic.h"
|
||||
#include "vgic.h"
|
||||
|
||||
static const uint64_t CVAL_MAX = ~0ULL;
|
||||
/* Depends on counter width. */
|
||||
static uint64_t CVAL_MAX;
|
||||
/* tval is a signed 32-bit int. */
|
||||
static const int32_t TVAL_MAX = INT32_MAX;
|
||||
static const int32_t TVAL_MIN = INT32_MIN;
|
||||
|
@ -30,8 +31,8 @@ static const int32_t TVAL_MIN = INT32_MIN;
|
|||
/* After how much time we say there is no IRQ. */
|
||||
static const uint32_t TIMEOUT_NO_IRQ_US = 50000;
|
||||
|
||||
/* A nice counter value to use as the starting one for most tests. */
|
||||
static const uint64_t DEF_CNT = (CVAL_MAX / 2);
|
||||
/* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */
|
||||
static uint64_t DEF_CNT;
|
||||
|
||||
/* Number of runs. */
|
||||
static const uint32_t NR_TEST_ITERS_DEF = 5;
|
||||
|
@ -191,8 +192,8 @@ static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles,
|
|||
{
|
||||
atomic_set(&shared_data.handled, 0);
|
||||
atomic_set(&shared_data.spurious, 0);
|
||||
timer_set_ctl(timer, ctl);
|
||||
timer_set_tval(timer, tval_cycles);
|
||||
timer_set_ctl(timer, ctl);
|
||||
}
|
||||
|
||||
static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl,
|
||||
|
@ -732,12 +733,6 @@ static void test_move_counters_ahead_of_timers(enum arch_timer timer)
|
|||
test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1,
|
||||
wm);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
|
||||
sleep_method_t sm = sleep_method[i];
|
||||
|
||||
test_set_cnt_after_cval_no_irq(timer, 0, DEF_CNT, CVAL_MAX, sm);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -849,17 +844,17 @@ static void guest_code(enum arch_timer timer)
|
|||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static cpu_set_t default_cpuset;
|
||||
|
||||
static uint32_t next_pcpu(void)
|
||||
{
|
||||
uint32_t max = get_nprocs();
|
||||
uint32_t cur = sched_getcpu();
|
||||
uint32_t next = cur;
|
||||
cpu_set_t cpuset;
|
||||
cpu_set_t cpuset = default_cpuset;
|
||||
|
||||
TEST_ASSERT(max > 1, "Need at least two physical cpus");
|
||||
|
||||
sched_getaffinity(0, sizeof(cpuset), &cpuset);
|
||||
|
||||
do {
|
||||
next = (next + 1) % CPU_SETSIZE;
|
||||
} while (!CPU_ISSET(next, &cpuset));
|
||||
|
@ -975,6 +970,8 @@ static void test_vm_create(struct kvm_vm **vm, struct kvm_vcpu **vcpu,
|
|||
test_init_timer_irq(*vm, *vcpu);
|
||||
vgic_v3_setup(*vm, 1, 64);
|
||||
sync_global_to_guest(*vm, test_args);
|
||||
sync_global_to_guest(*vm, CVAL_MAX);
|
||||
sync_global_to_guest(*vm, DEF_CNT);
|
||||
}
|
||||
|
||||
static void test_print_help(char *name)
|
||||
|
@ -986,7 +983,7 @@ static void test_print_help(char *name)
|
|||
pr_info("\t-b: Test both physical and virtual timers (default: true)\n");
|
||||
pr_info("\t-l: Delta (in ms) used for long wait time test (default: %u)\n",
|
||||
LONG_WAIT_TEST_MS);
|
||||
pr_info("\t-l: Delta (in ms) used for wait times (default: %u)\n",
|
||||
pr_info("\t-w: Delta (in ms) used for wait times (default: %u)\n",
|
||||
WAIT_TEST_MS);
|
||||
pr_info("\t-p: Test physical timer (default: true)\n");
|
||||
pr_info("\t-v: Test virtual timer (default: true)\n");
|
||||
|
@ -1035,6 +1032,17 @@ static bool parse_args(int argc, char *argv[])
|
|||
return false;
|
||||
}
|
||||
|
||||
static void set_counter_defaults(void)
|
||||
{
|
||||
const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
|
||||
uint64_t freq = read_sysreg(CNTFRQ_EL0);
|
||||
uint64_t width = ilog2(MIN_ROLLOVER_SECS * freq);
|
||||
|
||||
width = clamp(width, 56, 64);
|
||||
CVAL_MAX = GENMASK_ULL(width - 1, 0);
|
||||
DEF_CNT = CVAL_MAX / 2;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
@ -1046,6 +1054,9 @@ int main(int argc, char *argv[])
|
|||
if (!parse_args(argc, argv))
|
||||
exit(KSFT_SKIP);
|
||||
|
||||
sched_getaffinity(0, sizeof(default_cpuset), &default_cpuset);
|
||||
set_counter_defaults();
|
||||
|
||||
if (test_args.test_virtual) {
|
||||
test_vm_create(&vm, &vcpu, VIRTUAL);
|
||||
test_run(vm, vcpu);
|
||||
|
|
Loading…
Add table
Reference in a new issue