mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 08:44:41 +00:00 
			
		
		
		
	ARM: 6521/1: perf: use raw_spinlock_t for pmu_lock
For kernels built with PREEMPT_RT, critical sections protected by standard spinlocks are preemptible. This is not acceptable on perf as (a) we may be scheduled onto a different CPU whilst reading/writing banked PMU registers and (b) the latency when reading the PMU registers becomes unpredictable. This patch upgrades the pmu_lock spinlock to a raw_spinlock instead. Reported-by: Jamie Iles <jamie@jamieiles.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
		
							parent
							
								
									4d6b7a779b
								
							
						
					
					
						commit
						961ec6daa7
					
				
					 4 changed files with 35 additions and 35 deletions
				
			
		|  | @ -32,7 +32,7 @@ static struct platform_device *pmu_device; | |||
|  * Hardware lock to serialize accesses to PMU registers. Needed for the | ||||
|  * read/modify/write sequences. | ||||
|  */ | ||||
| static DEFINE_SPINLOCK(pmu_lock); | ||||
| static DEFINE_RAW_SPINLOCK(pmu_lock); | ||||
| 
 | ||||
| /*
 | ||||
|  * ARMv6 supports a maximum of 3 events, starting from index 1. If we add | ||||
|  |  | |||
|  | @ -426,12 +426,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, | |||
| 	 * Mask out the current event and set the counter to count the event | ||||
| 	 * that we're interested in. | ||||
| 	 */ | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = armv6_pmcr_read(); | ||||
| 	val &= ~mask; | ||||
| 	val |= evt; | ||||
| 	armv6_pmcr_write(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static irqreturn_t | ||||
|  | @ -500,11 +500,11 @@ armv6pmu_start(void) | |||
| { | ||||
| 	unsigned long flags, val; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = armv6_pmcr_read(); | ||||
| 	val |= ARMV6_PMCR_ENABLE; | ||||
| 	armv6_pmcr_write(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void | ||||
|  | @ -512,11 +512,11 @@ armv6pmu_stop(void) | |||
| { | ||||
| 	unsigned long flags, val; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = armv6_pmcr_read(); | ||||
| 	val &= ~ARMV6_PMCR_ENABLE; | ||||
| 	armv6_pmcr_write(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static int | ||||
|  | @ -570,12 +570,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
| 	 * of ETM bus signal assertion cycles. The external reporting should | ||||
| 	 * be disabled and so this should never increment. | ||||
| 	 */ | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = armv6_pmcr_read(); | ||||
| 	val &= ~mask; | ||||
| 	val |= evt; | ||||
| 	armv6_pmcr_write(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void | ||||
|  | @ -599,12 +599,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | |||
| 	 * Unlike UP ARMv6, we don't have a way of stopping the counters. We | ||||
| 	 * simply disable the interrupt reporting. | ||||
| 	 */ | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = armv6_pmcr_read(); | ||||
| 	val &= ~mask; | ||||
| 	val |= evt; | ||||
| 	armv6_pmcr_write(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static const struct arm_pmu armv6pmu = { | ||||
|  |  | |||
|  | @ -689,7 +689,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
| 	 * Enable counter and interrupt, and set the counter to count | ||||
| 	 * the event that we're interested in. | ||||
| 	 */ | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Disable counter | ||||
|  | @ -713,7 +713,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
| 	 */ | ||||
| 	armv7_pmnc_enable_counter(idx); | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||||
|  | @ -723,7 +723,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
| 	/*
 | ||||
| 	 * Disable counter and interrupt | ||||
| 	 */ | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Disable counter | ||||
|  | @ -735,7 +735,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
| 	 */ | ||||
| 	armv7_pmnc_disable_intens(idx); | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | ||||
|  | @ -805,20 +805,20 @@ static void armv7pmu_start(void) | |||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	/* Enable all counters */ | ||||
| 	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void armv7pmu_stop(void) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	/* Disable all counters */ | ||||
| 	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||||
|  |  | |||
|  | @ -291,12 +291,12 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = xscale1pmu_read_pmnc(); | ||||
| 	val &= ~mask; | ||||
| 	val |= evt; | ||||
| 	xscale1pmu_write_pmnc(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void | ||||
|  | @ -322,12 +322,12 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = xscale1pmu_read_pmnc(); | ||||
| 	val &= ~mask; | ||||
| 	val |= evt; | ||||
| 	xscale1pmu_write_pmnc(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static int | ||||
|  | @ -355,11 +355,11 @@ xscale1pmu_start(void) | |||
| { | ||||
| 	unsigned long flags, val; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = xscale1pmu_read_pmnc(); | ||||
| 	val |= XSCALE_PMU_ENABLE; | ||||
| 	xscale1pmu_write_pmnc(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void | ||||
|  | @ -367,11 +367,11 @@ xscale1pmu_stop(void) | |||
| { | ||||
| 	unsigned long flags, val; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = xscale1pmu_read_pmnc(); | ||||
| 	val &= ~XSCALE_PMU_ENABLE; | ||||
| 	xscale1pmu_write_pmnc(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static inline u32 | ||||
|  | @ -635,10 +635,10 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	xscale2pmu_write_event_select(evtsel); | ||||
| 	xscale2pmu_write_int_enable(ien); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void | ||||
|  | @ -678,10 +678,10 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	xscale2pmu_write_event_select(evtsel); | ||||
| 	xscale2pmu_write_int_enable(ien); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static int | ||||
|  | @ -705,11 +705,11 @@ xscale2pmu_start(void) | |||
| { | ||||
| 	unsigned long flags, val; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; | ||||
| 	val |= XSCALE_PMU_ENABLE; | ||||
| 	xscale2pmu_write_pmnc(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void | ||||
|  | @ -717,11 +717,11 @@ xscale2pmu_stop(void) | |||
| { | ||||
| 	unsigned long flags, val; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	raw_spin_lock_irqsave(&pmu_lock, flags); | ||||
| 	val = xscale2pmu_read_pmnc(); | ||||
| 	val &= ~XSCALE_PMU_ENABLE; | ||||
| 	xscale2pmu_write_pmnc(val); | ||||
| 	spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| 	raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static inline u32 | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Will Deacon
						Will Deacon