mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 08:44:41 +00:00 
			
		
		
		
	 24811637db
			
		
	
	
		24811637db
		
	
	
	
	
		
			
			Instead of playing silly games with CONFIG_DEBUG_PREEMPT toggling between this_cpu_*() and __this_cpu_*() use raw_cpu_*(), which is exactly what we want here. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Waiman Long <longman@redhat.com> Cc: Will Deacon <will.deacon@arm.com> Cc: huang ying <huang.ying.caritas@gmail.com> Link: https://lkml.kernel.org/r/20190527082326.GP2623@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			60 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			60 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| /*
 | |
|  * This program is free software; you can redistribute it and/or modify
 | |
|  * it under the terms of the GNU General Public License as published by
 | |
|  * the Free Software Foundation; either version 2 of the License, or
 | |
|  * (at your option) any later version.
 | |
|  *
 | |
|  * This program is distributed in the hope that it will be useful,
 | |
|  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
|  * GNU General Public License for more details.
 | |
|  *
 | |
|  * Authors: Waiman Long <longman@redhat.com>
 | |
|  */
 | |
| 
 | |
| #ifndef __LOCKING_LOCK_EVENTS_H
 | |
| #define __LOCKING_LOCK_EVENTS_H
 | |
| 
 | |
| enum lock_events {
 | |
| 
 | |
| #include "lock_events_list.h"
 | |
| 
 | |
| 	lockevent_num,	/* Total number of lock event counts */
 | |
| 	LOCKEVENT_reset_cnts = lockevent_num,
 | |
| };
 | |
| 
 | |
| #ifdef CONFIG_LOCK_EVENT_COUNTS
 | |
| /*
 | |
|  * Per-cpu counters
 | |
|  */
 | |
| DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]);
 | |
| 
 | |
| /*
 | |
|  * Increment the statistical counters. use raw_cpu_inc() because of lower
 | |
|  * overhead and we don't care if we loose the occasional update.
 | |
|  */
 | |
| static inline void __lockevent_inc(enum lock_events event, bool cond)
 | |
| {
 | |
| 	if (cond)
 | |
| 		raw_cpu_inc(lockevents[event]);
 | |
| }
 | |
| 
 | |
| #define lockevent_inc(ev)	  __lockevent_inc(LOCKEVENT_ ##ev, true)
 | |
| #define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c)
 | |
| 
 | |
| static inline void __lockevent_add(enum lock_events event, int inc)
 | |
| {
 | |
| 	raw_cpu_add(lockevents[event], inc);
 | |
| }
 | |
| 
 | |
| #define lockevent_add(ev, c)	__lockevent_add(LOCKEVENT_ ##ev, c)
 | |
| 
 | |
| #else  /* CONFIG_LOCK_EVENT_COUNTS */
 | |
| 
 | |
| #define lockevent_inc(ev)
 | |
| #define lockevent_add(ev, c)
 | |
| #define lockevent_cond_inc(ev, c)
 | |
| 
 | |
| #endif /* CONFIG_LOCK_EVENT_COUNTS */
 | |
| #endif /* __LOCKING_LOCK_EVENTS_H */
 |