mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Perf and PMU updates: - Add support for new (v3) Hisilicon SLLC and DDRC PMUs - Add support for Arm-NI PMU integrations that share interrupts between clock domains within a given instance - Allow SPE to be configured with a lower sample period than the minimum recommendation advertised by PMSIDR_EL1.Interval - Add suppport for Arm's "Branch Record Buffer Extension" (BRBE) - Adjust the perf watchdog period according to cpu frequency changes - Minor driver fixes and cleanups Hardware features: - Support for MTE store-only checking (FEAT_MTE_STORE_ONLY) - Support for reporting the non-address bits during a synchronous MTE tag check fault (FEAT_MTE_TAGGED_FAR) - Optimise the TLBI when folding/unfolding contiguous PTEs on hardware with FEAT_BBM (break-before-make) level 2 and no TLB conflict aborts Software features: - Enable HAVE_LIVEPATCH after implementing arch_stack_walk_reliable() and using the text-poke API for late module relocations - Force VMAP_STACK always on and change arm64_efi_rt_init() to use arch_alloc_vmap_stack() in order to avoid KASAN false positives ACPI: - Improve SPCR handling and messaging on systems lacking an SPCR table Debug: - Simplify the debug exception entry path - Drop redundant DBG_MDSCR_* macros Kselftests: - Cleanups and improvements for SME, SVE and FPSIMD tests Miscellaneous: - Optimise loop to reduce redundant operations in contpte_ptep_get() - Remove ISB when resetting POR_EL0 during signal handling - Mark the kernel as tainted on SEA and SError panic - Remove redundant gcs_free() call -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE5RElWfyWxS+3PLO2a9axLQDIXvEFAmiDkgoACgkQa9axLQDI XvFucQ//bYugRP5/Sdlrq5eDKWBGi1HufYzwfDEBLc4S75Eu8mGL/tuThfu9yFn+ qCowtt4U84HdWsZDTSVo6lym6v2vJUpGOMgXzepvJaFBRnqGv9X9NxH6RQO1LTnu Pm7rO+7I9tNpfuc7Zu9pHDggsJEw+WzVfmEF6WPSFlT9mUNv6NbSx4rbLQKU86Dm ouTqXaePEQZ5oiRXVasxyT0otGtiACD20WpgOtNjYGzsfUVwCf/C83V/2DLwwbhr 9cW9lCtFxA/yFdQcA9ThRzWZ9Eo5LAHqjGIq00+zOjuzgDbBtcTT79gpChkhovIR FBIsWHd9j9i3nYxzf4V4eRKQnyqS3NQWv7g7uKFwNgARif1Zk0VJ77QIlAYk5xLI ENTRjLKz5WNGGnhdkeCvDlVyxX+OktgcVTp3vqRxAKCRahMMUqBrwxiM8RzVF37e yzkEQayL8F7uZqy9H7Sjn48UpHZux6frJ1bBQw1oEvR9QmAoAdqavPMSAYIOT3Zr ze4WIljq/cFr3kBPIFP5pK1e0qYMHXZpSKIm8MAv6y/7KmQuVbMjZthpuPbLSIw0 Q7C0KalB8lToPIbO7qMni/he0dCN4K2+E1YHFTR+pzfcoLuW4rjSg7i8tqMLKMJ8 H+SeGLyPtM5A6bdAPTTpqefcgUUe7064ENUqrGUpDEynGXA7boE= =5h1C -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Catalin Marinas: "A quick summary: perf support for Branch Record Buffer Extensions (BRBE), typical PMU hardware updates, small additions to MTE for store-only tag checking and exposing non-address bits to signal handlers, HAVE_LIVEPATCH enabled on arm64, VMAP_STACK forced on. There is also a TLBI optimisation on hardware that does not require break-before-make when changing the user PTEs between contiguous and non-contiguous. More details: Perf and PMU updates: - Add support for new (v3) Hisilicon SLLC and DDRC PMUs - Add support for Arm-NI PMU integrations that share interrupts between clock domains within a given instance - Allow SPE to be configured with a lower sample period than the minimum recommendation advertised by PMSIDR_EL1.Interval - Add suppport for Arm's "Branch Record Buffer Extension" (BRBE) - Adjust the perf watchdog period according to cpu frequency changes - Minor driver fixes and cleanups Hardware features: - Support for MTE store-only checking (FEAT_MTE_STORE_ONLY) - Support for reporting the non-address bits during a synchronous MTE tag check fault (FEAT_MTE_TAGGED_FAR) - Optimise the TLBI when folding/unfolding contiguous PTEs on hardware with FEAT_BBM (break-before-make) level 2 and no TLB conflict aborts Software features: - Enable HAVE_LIVEPATCH after implementing arch_stack_walk_reliable() and using the text-poke API for late module relocations - Force VMAP_STACK always on and change arm64_efi_rt_init() to use arch_alloc_vmap_stack() in order to avoid KASAN false positives ACPI: - Improve SPCR handling and messaging on systems lacking an SPCR table Debug: - Simplify the debug exception entry path - Drop redundant DBG_MDSCR_* macros Kselftests: - Cleanups and improvements for SME, SVE and FPSIMD tests Miscellaneous: - Optimise loop to reduce redundant operations in contpte_ptep_get() - Remove ISB when resetting POR_EL0 during signal handling - Mark the kernel as tainted on SEA and SError panic - Remove redundant gcs_free() call" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (93 commits) arm64/gcs: task_gcs_el0_enable() should use passed task arm64: Kconfig: Keep selects somewhat alphabetically ordered arm64: signal: Remove ISB when resetting POR_EL0 kselftest/arm64: Handle attempts to disable SM on SME only systems kselftest/arm64: Fix SVE write data generation for SME only systems kselftest/arm64: Test SME on SME only systems in fp-ptrace kselftest/arm64: Test FPSIMD format data writes via NT_ARM_SVE in fp-ptrace kselftest/arm64: Allow sve-ptrace to run on SME only systems arm64/mm: Drop redundant addr increment in set_huge_pte_at() kselftest/arm4: Provide local defines for AT_HWCAP3 arm64: Mark kernel as tainted on SAE and SError panic arm64/gcs: Don't call gcs_free() when releasing task_struct drivers/perf: hisi: Support PMUs with no interrupt drivers/perf: hisi: Relax the event number check of v2 PMUs drivers/perf: hisi: Add support for HiSilicon SLLC v3 PMU driver drivers/perf: hisi: Use ACPI driver_data to retrieve SLLC PMU information drivers/perf: hisi: Add support for HiSilicon DDRC v3 PMU driver drivers/perf: hisi: Simplify the probe process for each DDRC version perf/arm-ni: Support sharing IRQs within an NI instance perf/arm-ni: Consolidate CPU affinity handling ...
269 lines
7.7 KiB
C
269 lines
7.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Debug and Guest Debug support
|
|
*
|
|
* Copyright (C) 2015 - Linaro Ltd
|
|
* Authors: Alex Bennée <alex.bennee@linaro.org>
|
|
* Oliver Upton <oliver.upton@linux.dev>
|
|
*/
|
|
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/hw_breakpoint.h>
|
|
|
|
#include <asm/debug-monitors.h>
|
|
#include <asm/kvm_asm.h>
|
|
#include <asm/kvm_arm.h>
|
|
#include <asm/kvm_emulate.h>
|
|
|
|
/**
|
|
* kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
|
|
*
|
|
* @vcpu: the vcpu pointer
|
|
*
|
|
* This ensures we will trap access to:
|
|
* - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
|
|
* - Debug ROM Address (MDCR_EL2_TDRA)
|
|
* - OS related registers (MDCR_EL2_TDOSA)
|
|
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
|
|
* - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
|
|
* - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
|
|
*/
|
|
static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
|
|
{
|
|
preempt_disable();
|
|
|
|
/*
|
|
* This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
|
|
* to disable guest access to the profiling and trace buffers
|
|
*/
|
|
vcpu->arch.mdcr_el2 = FIELD_PREP(MDCR_EL2_HPMN,
|
|
*host_data_ptr(nr_event_counters));
|
|
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
|
|
MDCR_EL2_TPMS |
|
|
MDCR_EL2_TTRF |
|
|
MDCR_EL2_TPMCR |
|
|
MDCR_EL2_TDRA |
|
|
MDCR_EL2_TDOSA);
|
|
|
|
/* Is the VM being debugged by userspace? */
|
|
if (vcpu->guest_debug)
|
|
/* Route all software debug exceptions to EL2 */
|
|
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
|
|
|
|
/*
|
|
* Trap debug registers if the guest doesn't have ownership of them.
|
|
*/
|
|
if (!kvm_guest_owns_debug_regs(vcpu))
|
|
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
|
|
|
|
/* Write MDCR_EL2 directly if we're already at EL2 */
|
|
if (has_vhe())
|
|
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
|
|
|
preempt_enable();
|
|
}
|
|
|
|
void kvm_init_host_debug_data(void)
|
|
{
|
|
u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
|
|
|
|
if (cpuid_feature_extract_signed_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT) > 0)
|
|
*host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N,
|
|
read_sysreg(pmcr_el0));
|
|
|
|
*host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0);
|
|
*host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0);
|
|
|
|
if (has_vhe())
|
|
return;
|
|
|
|
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
|
|
!(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P))
|
|
host_data_set_flag(HAS_SPE);
|
|
|
|
/* Check if we have BRBE implemented and available at the host */
|
|
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT))
|
|
host_data_set_flag(HAS_BRBE);
|
|
|
|
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) {
|
|
/* Force disable trace in protected mode in case of no TRBE */
|
|
if (is_protected_kvm_enabled())
|
|
host_data_set_flag(EL1_TRACING_CONFIGURED);
|
|
|
|
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
|
|
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
|
|
host_data_set_flag(HAS_TRBE);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
|
|
* has taken over MDSCR_EL1.
|
|
*
|
|
* - Userspace is single-stepping the guest, and MDSCR_EL1.SS is forced to 1.
|
|
*
|
|
* - Userspace is using the breakpoint/watchpoint registers to debug the
|
|
* guest, and MDSCR_EL1.MDE is forced to 1.
|
|
*
|
|
* - The guest has enabled the OS Lock, and KVM is forcing MDSCR_EL1.MDE to 0,
|
|
* masking all debug exceptions affected by the OS Lock.
|
|
*/
|
|
static void setup_external_mdscr(struct kvm_vcpu *vcpu)
|
|
{
|
|
/*
|
|
* Use the guest's MDSCR_EL1 as a starting point, since there are
|
|
* several other features controlled by MDSCR_EL1 that are not relevant
|
|
* to the host.
|
|
*
|
|
* Clear the bits that KVM may use which also satisfies emulation of
|
|
* the OS Lock as MDSCR_EL1.MDE is cleared.
|
|
*/
|
|
u64 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1) & ~(MDSCR_EL1_SS |
|
|
MDSCR_EL1_MDE |
|
|
MDSCR_EL1_KDE);
|
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
|
mdscr |= MDSCR_EL1_SS;
|
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW)
|
|
mdscr |= MDSCR_EL1_MDE | MDSCR_EL1_KDE;
|
|
|
|
vcpu->arch.external_mdscr_el1 = mdscr;
|
|
}
|
|
|
|
void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
|
|
{
|
|
u64 mdscr;
|
|
|
|
/* Must be called before kvm_vcpu_load_vhe() */
|
|
KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm);
|
|
|
|
/*
|
|
* Determine which of the possible debug states we're in:
|
|
*
|
|
* - VCPU_DEBUG_HOST_OWNED: KVM has taken ownership of the guest's
|
|
* breakpoint/watchpoint registers, or needs to use MDSCR_EL1 to do
|
|
* software step or emulate the effects of the OS Lock being enabled.
|
|
*
|
|
* - VCPU_DEBUG_GUEST_OWNED: The guest has debug exceptions enabled, and
|
|
* the breakpoint/watchpoint registers need to be loaded eagerly.
|
|
*
|
|
* - VCPU_DEBUG_FREE: Neither of the above apply, no breakpoint/watchpoint
|
|
* context needs to be loaded on the CPU.
|
|
*/
|
|
if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
|
|
vcpu->arch.debug_owner = VCPU_DEBUG_HOST_OWNED;
|
|
setup_external_mdscr(vcpu);
|
|
|
|
/*
|
|
* Steal the guest's single-step state machine if userspace wants
|
|
* single-step the guest.
|
|
*/
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
|
if (*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
|
|
vcpu_clear_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
|
|
else
|
|
vcpu_set_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
|
|
|
|
if (!vcpu_get_flag(vcpu, HOST_SS_ACTIVE_PENDING))
|
|
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
|
|
else
|
|
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
|
|
}
|
|
} else {
|
|
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
|
|
|
|
if (mdscr & (MDSCR_EL1_KDE | MDSCR_EL1_MDE))
|
|
vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
|
|
else
|
|
vcpu->arch.debug_owner = VCPU_DEBUG_FREE;
|
|
}
|
|
|
|
kvm_arm_setup_mdcr_el2(vcpu);
|
|
}
|
|
|
|
void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
|
|
return;
|
|
|
|
/*
|
|
* Save the host's software step state and restore the guest's before
|
|
* potentially returning to userspace.
|
|
*/
|
|
if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
|
|
vcpu_set_flag(vcpu, HOST_SS_ACTIVE_PENDING);
|
|
else
|
|
vcpu_clear_flag(vcpu, HOST_SS_ACTIVE_PENDING);
|
|
|
|
if (vcpu_get_flag(vcpu, GUEST_SS_ACTIVE_PENDING))
|
|
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
|
|
else
|
|
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
|
|
}
|
|
|
|
/*
|
|
* Updates ownership of the debug registers after a trapped guest access to a
|
|
* breakpoint/watchpoint register. Host ownership of the debug registers is of
|
|
* strictly higher priority, and it is the responsibility of the VMM to emulate
|
|
* guest debug exceptions in this configuration.
|
|
*/
|
|
void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (kvm_host_owns_debug_regs(vcpu))
|
|
return;
|
|
|
|
vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
|
|
kvm_arm_setup_mdcr_el2(vcpu);
|
|
}
|
|
|
|
void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
|
|
{
|
|
if (val & OSLAR_EL1_OSLK)
|
|
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
|
|
else
|
|
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
|
|
|
|
preempt_disable();
|
|
kvm_arch_vcpu_put(vcpu);
|
|
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
|
preempt_enable();
|
|
}
|
|
|
|
void kvm_enable_trbe(void)
|
|
{
|
|
if (has_vhe() || is_protected_kvm_enabled() ||
|
|
WARN_ON_ONCE(preemptible()))
|
|
return;
|
|
|
|
host_data_set_flag(TRBE_ENABLED);
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_enable_trbe);
|
|
|
|
void kvm_disable_trbe(void)
|
|
{
|
|
if (has_vhe() || is_protected_kvm_enabled() ||
|
|
WARN_ON_ONCE(preemptible()))
|
|
return;
|
|
|
|
host_data_clear_flag(TRBE_ENABLED);
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_disable_trbe);
|
|
|
|
void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest)
|
|
{
|
|
if (is_protected_kvm_enabled() || WARN_ON_ONCE(preemptible()))
|
|
return;
|
|
|
|
if (has_vhe()) {
|
|
write_sysreg_s(trfcr_while_in_guest, SYS_TRFCR_EL12);
|
|
return;
|
|
}
|
|
|
|
*host_data_ptr(trfcr_while_in_guest) = trfcr_while_in_guest;
|
|
if (read_sysreg_s(SYS_TRFCR_EL1) != trfcr_while_in_guest)
|
|
host_data_set_flag(EL1_TRACING_CONFIGURED);
|
|
else
|
|
host_data_clear_flag(EL1_TRACING_CONFIGURED);
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_tracing_set_el1_configuration);
|