mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	local_t: Remove cpu_local_xx macros
These macros have not been used for awhile now. Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
							parent
							
								
									32032df6c2
								
							
						
					
					
						commit
						38b7827fcd
					
				
					 6 changed files with 0 additions and 148 deletions
				
			
		|  | @ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l) | |||
| #define __local_add(i,l)	((l)->a.counter+=(i)) | ||||
| #define __local_sub(i,l)	((l)->a.counter-=(i)) | ||||
| 
 | ||||
| /* Use these for per-cpu local_t variables: on some archs they are
 | ||||
|  * much more efficient than these naive implementations.  Note they take | ||||
|  * a variable, not an address. | ||||
|  */ | ||||
| #define cpu_local_read(l)	local_read(&__get_cpu_var(l)) | ||||
| #define cpu_local_set(l, i)	local_set(&__get_cpu_var(l), (i)) | ||||
| 
 | ||||
| #define cpu_local_inc(l)	local_inc(&__get_cpu_var(l)) | ||||
| #define cpu_local_dec(l)	local_dec(&__get_cpu_var(l)) | ||||
| #define cpu_local_add(i, l)	local_add((i), &__get_cpu_var(l)) | ||||
| #define cpu_local_sub(i, l)	local_sub((i), &__get_cpu_var(l)) | ||||
| 
 | ||||
| #define __cpu_local_inc(l)	__local_inc(&__get_cpu_var(l)) | ||||
| #define __cpu_local_dec(l)	__local_dec(&__get_cpu_var(l)) | ||||
| #define __cpu_local_add(i, l)	__local_add((i), &__get_cpu_var(l)) | ||||
| #define __cpu_local_sub(i, l)	__local_sub((i), &__get_cpu_var(l)) | ||||
| 
 | ||||
| #endif /* _ALPHA_LOCAL_H */ | ||||
|  |  | |||
|  | @ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long  mask, local_t *addr) | |||
|  * a variable, not an address. | ||||
|  */ | ||||
| 
 | ||||
| /* Need to disable preemption for the cpu local counters otherwise we could
 | ||||
|    still access a variable of a previous CPU in a non local way. */ | ||||
| #define cpu_local_wrap_v(l)	 	\ | ||||
| 	({ local_t res__;		\ | ||||
| 	   preempt_disable(); 		\ | ||||
| 	   res__ = (l);			\ | ||||
| 	   preempt_enable();		\ | ||||
| 	   res__; }) | ||||
| #define cpu_local_wrap(l)		\ | ||||
| 	({ preempt_disable();		\ | ||||
| 	   l;				\ | ||||
| 	   preempt_enable(); })		\ | ||||
| 
 | ||||
| #define cpu_local_read(l)    cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | ||||
| #define cpu_local_set(l, i)  cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | ||||
| #define cpu_local_inc(l)     cpu_local_wrap(local_inc(&__get_cpu_var(l))) | ||||
| #define cpu_local_dec(l)     cpu_local_wrap(local_dec(&__get_cpu_var(l))) | ||||
| #define cpu_local_add(i, l)  cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | ||||
| #define cpu_local_sub(i, l)  cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | ||||
| 
 | ||||
| #define __cpu_local_inc(l)	cpu_local_inc(l) | ||||
| #define __cpu_local_dec(l)	cpu_local_dec(l) | ||||
| #define __cpu_local_add(i, l)	cpu_local_add((i), (l)) | ||||
| #define __cpu_local_sub(i, l)	cpu_local_sub((i), (l)) | ||||
| 
 | ||||
| #endif /* __M32R_LOCAL_H */ | ||||
|  |  | |||
|  | @ -193,29 +193,4 @@ static __inline__ long local_sub_return(long i, local_t * l) | |||
| #define __local_add(i, l)	((l)->a.counter+=(i)) | ||||
| #define __local_sub(i, l)	((l)->a.counter-=(i)) | ||||
| 
 | ||||
| /* Need to disable preemption for the cpu local counters otherwise we could
 | ||||
|    still access a variable of a previous CPU in a non atomic way. */ | ||||
| #define cpu_local_wrap_v(l)	 	\ | ||||
| 	({ local_t res__;		\ | ||||
| 	   preempt_disable(); 		\ | ||||
| 	   res__ = (l);			\ | ||||
| 	   preempt_enable();		\ | ||||
| 	   res__; }) | ||||
| #define cpu_local_wrap(l)		\ | ||||
| 	({ preempt_disable();		\ | ||||
| 	   l;				\ | ||||
| 	   preempt_enable(); })		\ | ||||
| 
 | ||||
| #define cpu_local_read(l)    cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | ||||
| #define cpu_local_set(l, i)  cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | ||||
| #define cpu_local_inc(l)     cpu_local_wrap(local_inc(&__get_cpu_var(l))) | ||||
| #define cpu_local_dec(l)     cpu_local_wrap(local_dec(&__get_cpu_var(l))) | ||||
| #define cpu_local_add(i, l)  cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | ||||
| #define cpu_local_sub(i, l)  cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | ||||
| 
 | ||||
| #define __cpu_local_inc(l)	cpu_local_inc(l) | ||||
| #define __cpu_local_dec(l)	cpu_local_dec(l) | ||||
| #define __cpu_local_add(i, l)	cpu_local_add((i), (l)) | ||||
| #define __cpu_local_sub(i, l)	cpu_local_sub((i), (l)) | ||||
| 
 | ||||
| #endif /* _ARCH_MIPS_LOCAL_H */ | ||||
|  |  | |||
|  | @ -172,29 +172,4 @@ static __inline__ long local_dec_if_positive(local_t *l) | |||
| #define __local_add(i,l)	((l)->a.counter+=(i)) | ||||
| #define __local_sub(i,l)	((l)->a.counter-=(i)) | ||||
| 
 | ||||
| /* Need to disable preemption for the cpu local counters otherwise we could
 | ||||
|    still access a variable of a previous CPU in a non atomic way. */ | ||||
| #define cpu_local_wrap_v(l)	 	\ | ||||
| 	({ local_t res__;		\ | ||||
| 	   preempt_disable(); 		\ | ||||
| 	   res__ = (l);			\ | ||||
| 	   preempt_enable();		\ | ||||
| 	   res__; }) | ||||
| #define cpu_local_wrap(l)		\ | ||||
| 	({ preempt_disable();		\ | ||||
| 	   l;				\ | ||||
| 	   preempt_enable(); })		\ | ||||
| 
 | ||||
| #define cpu_local_read(l)    cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | ||||
| #define cpu_local_set(l, i)  cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | ||||
| #define cpu_local_inc(l)     cpu_local_wrap(local_inc(&__get_cpu_var(l))) | ||||
| #define cpu_local_dec(l)     cpu_local_wrap(local_dec(&__get_cpu_var(l))) | ||||
| #define cpu_local_add(i, l)  cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | ||||
| #define cpu_local_sub(i, l)  cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | ||||
| 
 | ||||
| #define __cpu_local_inc(l)	cpu_local_inc(l) | ||||
| #define __cpu_local_dec(l)	cpu_local_dec(l) | ||||
| #define __cpu_local_add(i, l)	cpu_local_add((i), (l)) | ||||
| #define __cpu_local_sub(i, l)	cpu_local_sub((i), (l)) | ||||
| 
 | ||||
| #endif /* _ARCH_POWERPC_LOCAL_H */ | ||||
|  |  | |||
|  | @ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l) | |||
| #define __local_add(i, l)	local_add((i), (l)) | ||||
| #define __local_sub(i, l)	local_sub((i), (l)) | ||||
| 
 | ||||
| /* Use these for per-cpu local_t variables: on some archs they are
 | ||||
|  * much more efficient than these naive implementations.  Note they take | ||||
|  * a variable, not an address. | ||||
|  * | ||||
|  * X86_64: This could be done better if we moved the per cpu data directly | ||||
|  * after GS. | ||||
|  */ | ||||
| 
 | ||||
| /* Need to disable preemption for the cpu local counters otherwise we could
 | ||||
|    still access a variable of a previous CPU in a non atomic way. */ | ||||
| #define cpu_local_wrap_v(l)		\ | ||||
| ({					\ | ||||
| 	local_t res__;			\ | ||||
| 	preempt_disable(); 		\ | ||||
| 	res__ = (l);			\ | ||||
| 	preempt_enable();		\ | ||||
| 	res__;				\ | ||||
| }) | ||||
| #define cpu_local_wrap(l)		\ | ||||
| ({					\ | ||||
| 	preempt_disable();		\ | ||||
| 	(l);				\ | ||||
| 	preempt_enable();		\ | ||||
| })					\ | ||||
| 
 | ||||
| #define cpu_local_read(l)    cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) | ||||
| #define cpu_local_set(l, i)  cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) | ||||
| #define cpu_local_inc(l)     cpu_local_wrap(local_inc(&__get_cpu_var((l)))) | ||||
| #define cpu_local_dec(l)     cpu_local_wrap(local_dec(&__get_cpu_var((l)))) | ||||
| #define cpu_local_add(i, l)  cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) | ||||
| #define cpu_local_sub(i, l)  cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) | ||||
| 
 | ||||
| #define __cpu_local_inc(l)	cpu_local_inc((l)) | ||||
| #define __cpu_local_dec(l)	cpu_local_dec((l)) | ||||
| #define __cpu_local_add(i, l)	cpu_local_add((i), (l)) | ||||
| #define __cpu_local_sub(i, l)	cpu_local_sub((i), (l)) | ||||
| 
 | ||||
| #endif /* _ASM_X86_LOCAL_H */ | ||||
|  |  | |||
|  | @ -52,23 +52,4 @@ typedef struct | |||
| #define __local_add(i,l)	local_set((l), local_read(l) + (i)) | ||||
| #define __local_sub(i,l)	local_set((l), local_read(l) - (i)) | ||||
| 
 | ||||
| /* Use these for per-cpu local_t variables: on some archs they are
 | ||||
|  * much more efficient than these naive implementations.  Note they take | ||||
|  * a variable (eg. mystruct.foo), not an address. | ||||
|  */ | ||||
| #define cpu_local_read(l)	local_read(&__get_cpu_var(l)) | ||||
| #define cpu_local_set(l, i)	local_set(&__get_cpu_var(l), (i)) | ||||
| #define cpu_local_inc(l)	local_inc(&__get_cpu_var(l)) | ||||
| #define cpu_local_dec(l)	local_dec(&__get_cpu_var(l)) | ||||
| #define cpu_local_add(i, l)	local_add((i), &__get_cpu_var(l)) | ||||
| #define cpu_local_sub(i, l)	local_sub((i), &__get_cpu_var(l)) | ||||
| 
 | ||||
| /* Non-atomic increments, ie. preemption disabled and won't be touched
 | ||||
|  * in interrupt, etc.  Some archs can optimize this case well. | ||||
|  */ | ||||
| #define __cpu_local_inc(l)	__local_inc(&__get_cpu_var(l)) | ||||
| #define __cpu_local_dec(l)	__local_dec(&__get_cpu_var(l)) | ||||
| #define __cpu_local_add(i, l)	__local_add((i), &__get_cpu_var(l)) | ||||
| #define __cpu_local_sub(i, l)	__local_sub((i), &__get_cpu_var(l)) | ||||
| 
 | ||||
| #endif /* _ASM_GENERIC_LOCAL_H */ | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Christoph Lameter
						Christoph Lameter