mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 08:44:41 +00:00 
			
		
		
		
	kernel: remove fastcall in kernel/*
[akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									fc9b52cd8f
								
							
						
					
					
						commit
						7ad5b3a505
					
				
					 12 changed files with 67 additions and 68 deletions
				
			
		|  | @ -458,7 +458,7 @@ struct files_struct *get_files_struct(struct task_struct *task) | ||||||
| 	return files; | 	return files; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void fastcall put_files_struct(struct files_struct *files) | void put_files_struct(struct files_struct *files) | ||||||
| { | { | ||||||
| 	struct fdtable *fdt; | 	struct fdtable *fdt; | ||||||
| 
 | 
 | ||||||
|  | @ -887,7 +887,7 @@ static inline void exit_child_reaper(struct task_struct *tsk) | ||||||
| 	zap_pid_ns_processes(tsk->nsproxy->pid_ns); | 	zap_pid_ns_processes(tsk->nsproxy->pid_ns); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| fastcall NORET_TYPE void do_exit(long code) | NORET_TYPE void do_exit(long code) | ||||||
| { | { | ||||||
| 	struct task_struct *tsk = current; | 	struct task_struct *tsk = current; | ||||||
| 	int group_dead; | 	int group_dead; | ||||||
|  |  | ||||||
|  | @ -390,7 +390,7 @@ struct mm_struct * mm_alloc(void) | ||||||
|  * is dropped: either by a lazy thread or by |  * is dropped: either by a lazy thread or by | ||||||
|  * mmput. Free the page directory and the mm. |  * mmput. Free the page directory and the mm. | ||||||
|  */ |  */ | ||||||
| void fastcall __mmdrop(struct mm_struct *mm) | void __mmdrop(struct mm_struct *mm) | ||||||
| { | { | ||||||
| 	BUG_ON(mm == &init_mm); | 	BUG_ON(mm == &init_mm); | ||||||
| 	mm_free_pgd(mm); | 	mm_free_pgd(mm); | ||||||
|  |  | ||||||
|  | @ -286,7 +286,7 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | ||||||
|  *	Note: The caller is expected to handle the ack, clear, mask and |  *	Note: The caller is expected to handle the ack, clear, mask and | ||||||
|  *	unmask issues if necessary. |  *	unmask issues if necessary. | ||||||
|  */ |  */ | ||||||
| void fastcall | void | ||||||
| handle_simple_irq(unsigned int irq, struct irq_desc *desc) | handle_simple_irq(unsigned int irq, struct irq_desc *desc) | ||||||
| { | { | ||||||
| 	struct irqaction *action; | 	struct irqaction *action; | ||||||
|  | @ -327,7 +327,7 @@ out_unlock: | ||||||
|  *	it after the associated handler has acknowledged the device, so the |  *	it after the associated handler has acknowledged the device, so the | ||||||
|  *	interrupt line is back to inactive. |  *	interrupt line is back to inactive. | ||||||
|  */ |  */ | ||||||
| void fastcall | void | ||||||
| handle_level_irq(unsigned int irq, struct irq_desc *desc) | handle_level_irq(unsigned int irq, struct irq_desc *desc) | ||||||
| { | { | ||||||
| 	unsigned int cpu = smp_processor_id(); | 	unsigned int cpu = smp_processor_id(); | ||||||
|  | @ -375,7 +375,7 @@ out_unlock: | ||||||
|  *	for modern forms of interrupt handlers, which handle the flow |  *	for modern forms of interrupt handlers, which handle the flow | ||||||
|  *	details in hardware, transparently. |  *	details in hardware, transparently. | ||||||
|  */ |  */ | ||||||
| void fastcall | void | ||||||
| handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | ||||||
| { | { | ||||||
| 	unsigned int cpu = smp_processor_id(); | 	unsigned int cpu = smp_processor_id(); | ||||||
|  | @ -434,7 +434,7 @@ out: | ||||||
|  *	the handler was running. If all pending interrupts are handled, the |  *	the handler was running. If all pending interrupts are handled, the | ||||||
|  *	loop is left. |  *	loop is left. | ||||||
|  */ |  */ | ||||||
| void fastcall | void | ||||||
| handle_edge_irq(unsigned int irq, struct irq_desc *desc) | handle_edge_irq(unsigned int irq, struct irq_desc *desc) | ||||||
| { | { | ||||||
| 	const unsigned int cpu = smp_processor_id(); | 	const unsigned int cpu = smp_processor_id(); | ||||||
|  | @ -505,7 +505,7 @@ out_unlock: | ||||||
|  * |  * | ||||||
|  *	Per CPU interrupts on SMP machines without locking requirements |  *	Per CPU interrupts on SMP machines without locking requirements | ||||||
|  */ |  */ | ||||||
| void fastcall | void | ||||||
| handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | ||||||
| { | { | ||||||
| 	irqreturn_t action_ret; | 	irqreturn_t action_ret; | ||||||
|  |  | ||||||
|  | @ -25,7 +25,7 @@ | ||||||
|  * |  * | ||||||
|  * Handles spurious and unhandled IRQ's. It also prints a debugmessage. |  * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | ||||||
|  */ |  */ | ||||||
| void fastcall | void | ||||||
| handle_bad_irq(unsigned int irq, struct irq_desc *desc) | handle_bad_irq(unsigned int irq, struct irq_desc *desc) | ||||||
| { | { | ||||||
| 	print_irq_desc(irq, desc); | 	print_irq_desc(irq, desc); | ||||||
|  | @ -163,7 +163,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | ||||||
|  * This is the original x86 implementation which is used for every |  * This is the original x86 implementation which is used for every | ||||||
|  * interrupt type. |  * interrupt type. | ||||||
|  */ |  */ | ||||||
| fastcall unsigned int __do_IRQ(unsigned int irq) | unsigned int __do_IRQ(unsigned int irq) | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = irq_desc + irq; | 	struct irq_desc *desc = irq_desc + irq; | ||||||
| 	struct irqaction *action; | 	struct irqaction *action; | ||||||
|  |  | ||||||
|  | @ -107,7 +107,7 @@ void debug_mutex_init(struct mutex *lock, const char *name, | ||||||
|  * use of the mutex is forbidden. The mutex must not be locked when |  * use of the mutex is forbidden. The mutex must not be locked when | ||||||
|  * this function is called. |  * this function is called. | ||||||
|  */ |  */ | ||||||
| void fastcall mutex_destroy(struct mutex *lock) | void mutex_destroy(struct mutex *lock) | ||||||
| { | { | ||||||
| 	DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock)); | 	DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock)); | ||||||
| 	lock->magic = NULL; | 	lock->magic = NULL; | ||||||
|  |  | ||||||
|  | @ -58,7 +58,7 @@ EXPORT_SYMBOL(__mutex_init); | ||||||
|  * We also put the fastpath first in the kernel image, to make sure the |  * We also put the fastpath first in the kernel image, to make sure the | ||||||
|  * branch is predicted by the CPU as default-untaken. |  * branch is predicted by the CPU as default-untaken. | ||||||
|  */ |  */ | ||||||
| static void fastcall noinline __sched | static void noinline __sched | ||||||
| __mutex_lock_slowpath(atomic_t *lock_count); | __mutex_lock_slowpath(atomic_t *lock_count); | ||||||
| 
 | 
 | ||||||
| /***
 | /***
 | ||||||
|  | @ -82,7 +82,7 @@ __mutex_lock_slowpath(atomic_t *lock_count); | ||||||
|  * |  * | ||||||
|  * This function is similar to (but not equivalent to) down(). |  * This function is similar to (but not equivalent to) down(). | ||||||
|  */ |  */ | ||||||
| void inline fastcall __sched mutex_lock(struct mutex *lock) | void inline __sched mutex_lock(struct mutex *lock) | ||||||
| { | { | ||||||
| 	might_sleep(); | 	might_sleep(); | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -95,8 +95,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock) | ||||||
| EXPORT_SYMBOL(mutex_lock); | EXPORT_SYMBOL(mutex_lock); | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| static void fastcall noinline __sched | static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | ||||||
| __mutex_unlock_slowpath(atomic_t *lock_count); |  | ||||||
| 
 | 
 | ||||||
| /***
 | /***
 | ||||||
|  * mutex_unlock - release the mutex |  * mutex_unlock - release the mutex | ||||||
|  | @ -109,7 +108,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count); | ||||||
|  * |  * | ||||||
|  * This function is similar to (but not equivalent to) up(). |  * This function is similar to (but not equivalent to) up(). | ||||||
|  */ |  */ | ||||||
| void fastcall __sched mutex_unlock(struct mutex *lock) | void __sched mutex_unlock(struct mutex *lock) | ||||||
| { | { | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * The unlocking fastpath is the 0->1 transition from 'locked' | 	 * The unlocking fastpath is the 0->1 transition from 'locked' | ||||||
|  | @ -234,7 +233,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | ||||||
| /*
 | /*
 | ||||||
|  * Release the lock, slowpath: |  * Release the lock, slowpath: | ||||||
|  */ |  */ | ||||||
| static fastcall inline void | static inline void | ||||||
| __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | ||||||
| { | { | ||||||
| 	struct mutex *lock = container_of(lock_count, struct mutex, count); | 	struct mutex *lock = container_of(lock_count, struct mutex, count); | ||||||
|  | @ -271,7 +270,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | ||||||
| /*
 | /*
 | ||||||
|  * Release the lock, slowpath: |  * Release the lock, slowpath: | ||||||
|  */ |  */ | ||||||
| static fastcall noinline void | static noinline void | ||||||
| __mutex_unlock_slowpath(atomic_t *lock_count) | __mutex_unlock_slowpath(atomic_t *lock_count) | ||||||
| { | { | ||||||
| 	__mutex_unlock_common_slowpath(lock_count, 1); | 	__mutex_unlock_common_slowpath(lock_count, 1); | ||||||
|  | @ -282,10 +281,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count) | ||||||
|  * Here come the less common (and hence less performance-critical) APIs: |  * Here come the less common (and hence less performance-critical) APIs: | ||||||
|  * mutex_lock_interruptible() and mutex_trylock(). |  * mutex_lock_interruptible() and mutex_trylock(). | ||||||
|  */ |  */ | ||||||
| static int fastcall noinline __sched | static noinline int __sched | ||||||
| __mutex_lock_killable_slowpath(atomic_t *lock_count); | __mutex_lock_killable_slowpath(atomic_t *lock_count); | ||||||
| 
 | 
 | ||||||
| static noinline int fastcall __sched | static noinline int __sched | ||||||
| __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | ||||||
| 
 | 
 | ||||||
| /***
 | /***
 | ||||||
|  | @ -299,7 +298,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | ||||||
|  * |  * | ||||||
|  * This function is similar to (but not equivalent to) down_interruptible(). |  * This function is similar to (but not equivalent to) down_interruptible(). | ||||||
|  */ |  */ | ||||||
| int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | int __sched mutex_lock_interruptible(struct mutex *lock) | ||||||
| { | { | ||||||
| 	might_sleep(); | 	might_sleep(); | ||||||
| 	return __mutex_fastpath_lock_retval | 	return __mutex_fastpath_lock_retval | ||||||
|  | @ -308,7 +307,7 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | ||||||
| 
 | 
 | ||||||
| EXPORT_SYMBOL(mutex_lock_interruptible); | EXPORT_SYMBOL(mutex_lock_interruptible); | ||||||
| 
 | 
 | ||||||
| int fastcall __sched mutex_lock_killable(struct mutex *lock) | int __sched mutex_lock_killable(struct mutex *lock) | ||||||
| { | { | ||||||
| 	might_sleep(); | 	might_sleep(); | ||||||
| 	return __mutex_fastpath_lock_retval | 	return __mutex_fastpath_lock_retval | ||||||
|  | @ -316,7 +315,7 @@ int fastcall __sched mutex_lock_killable(struct mutex *lock) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(mutex_lock_killable); | EXPORT_SYMBOL(mutex_lock_killable); | ||||||
| 
 | 
 | ||||||
| static void fastcall noinline __sched | static noinline void __sched | ||||||
| __mutex_lock_slowpath(atomic_t *lock_count) | __mutex_lock_slowpath(atomic_t *lock_count) | ||||||
| { | { | ||||||
| 	struct mutex *lock = container_of(lock_count, struct mutex, count); | 	struct mutex *lock = container_of(lock_count, struct mutex, count); | ||||||
|  | @ -324,7 +323,7 @@ __mutex_lock_slowpath(atomic_t *lock_count) | ||||||
| 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); | 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int fastcall noinline __sched | static noinline int __sched | ||||||
| __mutex_lock_killable_slowpath(atomic_t *lock_count) | __mutex_lock_killable_slowpath(atomic_t *lock_count) | ||||||
| { | { | ||||||
| 	struct mutex *lock = container_of(lock_count, struct mutex, count); | 	struct mutex *lock = container_of(lock_count, struct mutex, count); | ||||||
|  | @ -332,7 +331,7 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count) | ||||||
| 	return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); | 	return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static noinline int fastcall __sched | static noinline int __sched | ||||||
| __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | ||||||
| { | { | ||||||
| 	struct mutex *lock = container_of(lock_count, struct mutex, count); | 	struct mutex *lock = container_of(lock_count, struct mutex, count); | ||||||
|  | @ -381,7 +380,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | ||||||
|  * This function must not be used in interrupt context. The |  * This function must not be used in interrupt context. The | ||||||
|  * mutex must be released by the same task that acquired it. |  * mutex must be released by the same task that acquired it. | ||||||
|  */ |  */ | ||||||
| int fastcall __sched mutex_trylock(struct mutex *lock) | int __sched mutex_trylock(struct mutex *lock) | ||||||
| { | { | ||||||
| 	return __mutex_fastpath_trylock(&lock->count, | 	return __mutex_fastpath_trylock(&lock->count, | ||||||
| 					__mutex_trylock_slowpath); | 					__mutex_trylock_slowpath); | ||||||
|  |  | ||||||
							
								
								
									
										18
									
								
								kernel/pid.c
									
										
									
									
									
								
							
							
						
						
									
										18
									
								
								kernel/pid.c
									
										
									
									
									
								
							|  | @ -111,7 +111,7 @@ EXPORT_SYMBOL(is_container_init); | ||||||
| 
 | 
 | ||||||
| static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); | static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); | ||||||
| 
 | 
 | ||||||
| static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid) | static void free_pidmap(struct pid_namespace *pid_ns, int pid) | ||||||
| { | { | ||||||
| 	struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE; | 	struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE; | ||||||
| 	int offset = pid & BITS_PER_PAGE_MASK; | 	int offset = pid & BITS_PER_PAGE_MASK; | ||||||
|  | @ -198,7 +198,7 @@ int next_pidmap(struct pid_namespace *pid_ns, int last) | ||||||
| 	return -1; | 	return -1; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| fastcall void put_pid(struct pid *pid) | void put_pid(struct pid *pid) | ||||||
| { | { | ||||||
| 	struct pid_namespace *ns; | 	struct pid_namespace *ns; | ||||||
| 
 | 
 | ||||||
|  | @ -220,7 +220,7 @@ static void delayed_put_pid(struct rcu_head *rhp) | ||||||
| 	put_pid(pid); | 	put_pid(pid); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| fastcall void free_pid(struct pid *pid) | void free_pid(struct pid *pid) | ||||||
| { | { | ||||||
| 	/* We can be called with write_lock_irq(&tasklist_lock) held */ | 	/* We can be called with write_lock_irq(&tasklist_lock) held */ | ||||||
| 	int i; | 	int i; | ||||||
|  | @ -286,7 +286,7 @@ out_free: | ||||||
| 	goto out; | 	goto out; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns) | struct pid *find_pid_ns(int nr, struct pid_namespace *ns) | ||||||
| { | { | ||||||
| 	struct hlist_node *elem; | 	struct hlist_node *elem; | ||||||
| 	struct upid *pnr; | 	struct upid *pnr; | ||||||
|  | @ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(find_pid); | ||||||
| /*
 | /*
 | ||||||
|  * attach_pid() must be called with the tasklist_lock write-held. |  * attach_pid() must be called with the tasklist_lock write-held. | ||||||
|  */ |  */ | ||||||
| int fastcall attach_pid(struct task_struct *task, enum pid_type type, | int attach_pid(struct task_struct *task, enum pid_type type, | ||||||
| 		struct pid *pid) | 		struct pid *pid) | ||||||
| { | { | ||||||
| 	struct pid_link *link; | 	struct pid_link *link; | ||||||
|  | @ -328,7 +328,7 @@ int fastcall attach_pid(struct task_struct *task, enum pid_type type, | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void fastcall detach_pid(struct task_struct *task, enum pid_type type) | void detach_pid(struct task_struct *task, enum pid_type type) | ||||||
| { | { | ||||||
| 	struct pid_link *link; | 	struct pid_link *link; | ||||||
| 	struct pid *pid; | 	struct pid *pid; | ||||||
|  | @ -348,7 +348,7 @@ void fastcall detach_pid(struct task_struct *task, enum pid_type type) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ | ||||||
| void fastcall transfer_pid(struct task_struct *old, struct task_struct *new, | void transfer_pid(struct task_struct *old, struct task_struct *new, | ||||||
| 			   enum pid_type type) | 			   enum pid_type type) | ||||||
| { | { | ||||||
| 	new->pids[type].pid = old->pids[type].pid; | 	new->pids[type].pid = old->pids[type].pid; | ||||||
|  | @ -356,7 +356,7 @@ void fastcall transfer_pid(struct task_struct *old, struct task_struct *new, | ||||||
| 	old->pids[type].pid = NULL; | 	old->pids[type].pid = NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) | struct task_struct *pid_task(struct pid *pid, enum pid_type type) | ||||||
| { | { | ||||||
| 	struct task_struct *result = NULL; | 	struct task_struct *result = NULL; | ||||||
| 	if (pid) { | 	if (pid) { | ||||||
|  | @ -408,7 +408,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type) | ||||||
| 	return pid; | 	return pid; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type) | struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) | ||||||
| { | { | ||||||
| 	struct task_struct *result; | 	struct task_struct *result; | ||||||
| 	rcu_read_lock(); | 	rcu_read_lock(); | ||||||
|  |  | ||||||
|  | @ -1893,13 +1893,13 @@ out: | ||||||
| 	return success; | 	return success; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| int fastcall wake_up_process(struct task_struct *p) | int wake_up_process(struct task_struct *p) | ||||||
| { | { | ||||||
| 	return try_to_wake_up(p, TASK_ALL, 0); | 	return try_to_wake_up(p, TASK_ALL, 0); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(wake_up_process); | EXPORT_SYMBOL(wake_up_process); | ||||||
| 
 | 
 | ||||||
| int fastcall wake_up_state(struct task_struct *p, unsigned int state) | int wake_up_state(struct task_struct *p, unsigned int state) | ||||||
| { | { | ||||||
| 	return try_to_wake_up(p, state, 0); | 	return try_to_wake_up(p, state, 0); | ||||||
| } | } | ||||||
|  | @ -1986,7 +1986,7 @@ void sched_fork(struct task_struct *p, int clone_flags) | ||||||
|  * that must be done for every newly created context, then puts the task |  * that must be done for every newly created context, then puts the task | ||||||
|  * on the runqueue and wakes it. |  * on the runqueue and wakes it. | ||||||
|  */ |  */ | ||||||
| void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 	struct rq *rq; | 	struct rq *rq; | ||||||
|  | @ -3753,7 +3753,7 @@ void scheduler_tick(void) | ||||||
| 
 | 
 | ||||||
| #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) | ||||||
| 
 | 
 | ||||||
| void fastcall add_preempt_count(int val) | void add_preempt_count(int val) | ||||||
| { | { | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Underflow? | 	 * Underflow? | ||||||
|  | @ -3769,7 +3769,7 @@ void fastcall add_preempt_count(int val) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(add_preempt_count); | EXPORT_SYMBOL(add_preempt_count); | ||||||
| 
 | 
 | ||||||
| void fastcall sub_preempt_count(int val) | void sub_preempt_count(int val) | ||||||
| { | { | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Underflow? | 	 * Underflow? | ||||||
|  | @ -4067,7 +4067,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | ||||||
|  * @nr_exclusive: how many wake-one or wake-many threads to wake up |  * @nr_exclusive: how many wake-one or wake-many threads to wake up | ||||||
|  * @key: is directly passed to the wakeup function |  * @key: is directly passed to the wakeup function | ||||||
|  */ |  */ | ||||||
| void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, | void __wake_up(wait_queue_head_t *q, unsigned int mode, | ||||||
| 			int nr_exclusive, void *key) | 			int nr_exclusive, void *key) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
|  | @ -4081,7 +4081,7 @@ EXPORT_SYMBOL(__wake_up); | ||||||
| /*
 | /*
 | ||||||
|  * Same as __wake_up but called with the spinlock in wait_queue_head_t held. |  * Same as __wake_up but called with the spinlock in wait_queue_head_t held. | ||||||
|  */ |  */ | ||||||
| void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | ||||||
| { | { | ||||||
| 	__wake_up_common(q, mode, 1, 0, NULL); | 	__wake_up_common(q, mode, 1, 0, NULL); | ||||||
| } | } | ||||||
|  | @ -4099,7 +4099,7 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | ||||||
|  * |  * | ||||||
|  * On UP it can prevent extra preemption. |  * On UP it can prevent extra preemption. | ||||||
|  */ |  */ | ||||||
| void fastcall | void | ||||||
| __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
|  |  | ||||||
|  | @ -320,7 +320,7 @@ void irq_exit(void) | ||||||
| /*
 | /*
 | ||||||
|  * This function must run with irqs disabled! |  * This function must run with irqs disabled! | ||||||
|  */ |  */ | ||||||
| inline fastcall void raise_softirq_irqoff(unsigned int nr) | inline void raise_softirq_irqoff(unsigned int nr) | ||||||
| { | { | ||||||
| 	__raise_softirq_irqoff(nr); | 	__raise_softirq_irqoff(nr); | ||||||
| 
 | 
 | ||||||
|  | @ -337,7 +337,7 @@ inline fastcall void raise_softirq_irqoff(unsigned int nr) | ||||||
| 		wakeup_softirqd(); | 		wakeup_softirqd(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void fastcall raise_softirq(unsigned int nr) | void raise_softirq(unsigned int nr) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
|  | @ -363,7 +363,7 @@ struct tasklet_head | ||||||
| static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL }; | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL }; | ||||||
| static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL }; | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL }; | ||||||
| 
 | 
 | ||||||
| void fastcall __tasklet_schedule(struct tasklet_struct *t) | void __tasklet_schedule(struct tasklet_struct *t) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
|  | @ -376,7 +376,7 @@ void fastcall __tasklet_schedule(struct tasklet_struct *t) | ||||||
| 
 | 
 | ||||||
| EXPORT_SYMBOL(__tasklet_schedule); | EXPORT_SYMBOL(__tasklet_schedule); | ||||||
| 
 | 
 | ||||||
| void fastcall __tasklet_hi_schedule(struct tasklet_struct *t) | void __tasklet_hi_schedule(struct tasklet_struct *t) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -327,7 +327,7 @@ static void timer_stats_account_timer(struct timer_list *timer) {} | ||||||
|  * init_timer() must be done to a timer prior calling *any* of the |  * init_timer() must be done to a timer prior calling *any* of the | ||||||
|  * other timer functions. |  * other timer functions. | ||||||
|  */ |  */ | ||||||
| void fastcall init_timer(struct timer_list *timer) | void init_timer(struct timer_list *timer) | ||||||
| { | { | ||||||
| 	timer->entry.next = NULL; | 	timer->entry.next = NULL; | ||||||
| 	timer->base = __raw_get_cpu_var(tvec_bases); | 	timer->base = __raw_get_cpu_var(tvec_bases); | ||||||
|  | @ -339,7 +339,7 @@ void fastcall init_timer(struct timer_list *timer) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(init_timer); | EXPORT_SYMBOL(init_timer); | ||||||
| 
 | 
 | ||||||
| void fastcall init_timer_deferrable(struct timer_list *timer) | void init_timer_deferrable(struct timer_list *timer) | ||||||
| { | { | ||||||
| 	init_timer(timer); | 	init_timer(timer); | ||||||
| 	timer_set_deferrable(timer); | 	timer_set_deferrable(timer); | ||||||
|  | @ -1042,7 +1042,7 @@ static void process_timeout(unsigned long __data) | ||||||
|  * |  * | ||||||
|  * In all cases the return value is guaranteed to be non-negative. |  * In all cases the return value is guaranteed to be non-negative. | ||||||
|  */ |  */ | ||||||
| fastcall signed long __sched schedule_timeout(signed long timeout) | signed long __sched schedule_timeout(signed long timeout) | ||||||
| { | { | ||||||
| 	struct timer_list timer; | 	struct timer_list timer; | ||||||
| 	unsigned long expire; | 	unsigned long expire; | ||||||
|  |  | ||||||
|  | @ -18,7 +18,7 @@ void init_waitqueue_head(wait_queue_head_t *q) | ||||||
| 
 | 
 | ||||||
| EXPORT_SYMBOL(init_waitqueue_head); | EXPORT_SYMBOL(init_waitqueue_head); | ||||||
| 
 | 
 | ||||||
| void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
|  | @ -29,7 +29,7 @@ void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(add_wait_queue); | EXPORT_SYMBOL(add_wait_queue); | ||||||
| 
 | 
 | ||||||
| void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
|  | @ -40,7 +40,7 @@ void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(add_wait_queue_exclusive); | EXPORT_SYMBOL(add_wait_queue_exclusive); | ||||||
| 
 | 
 | ||||||
| void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
|  | @ -63,7 +63,7 @@ EXPORT_SYMBOL(remove_wait_queue); | ||||||
|  * stops them from bleeding out - it would still allow subsequent |  * stops them from bleeding out - it would still allow subsequent | ||||||
|  * loads to move into the critical region). |  * loads to move into the critical region). | ||||||
|  */ |  */ | ||||||
| void fastcall | void | ||||||
| prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) | prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
|  | @ -82,7 +82,7 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(prepare_to_wait); | EXPORT_SYMBOL(prepare_to_wait); | ||||||
| 
 | 
 | ||||||
| void fastcall | void | ||||||
| prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
|  | @ -101,7 +101,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(prepare_to_wait_exclusive); | EXPORT_SYMBOL(prepare_to_wait_exclusive); | ||||||
| 
 | 
 | ||||||
| void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
|  | @ -157,7 +157,7 @@ EXPORT_SYMBOL(wake_bit_function); | ||||||
|  * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are |  * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are | ||||||
|  * permitted return codes. Nonzero return codes halt waiting and return. |  * permitted return codes. Nonzero return codes halt waiting and return. | ||||||
|  */ |  */ | ||||||
| int __sched fastcall | int __sched | ||||||
| __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, | __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||||||
| 			int (*action)(void *), unsigned mode) | 			int (*action)(void *), unsigned mode) | ||||||
| { | { | ||||||
|  | @ -173,7 +173,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(__wait_on_bit); | EXPORT_SYMBOL(__wait_on_bit); | ||||||
| 
 | 
 | ||||||
| int __sched fastcall out_of_line_wait_on_bit(void *word, int bit, | int __sched out_of_line_wait_on_bit(void *word, int bit, | ||||||
| 					int (*action)(void *), unsigned mode) | 					int (*action)(void *), unsigned mode) | ||||||
| { | { | ||||||
| 	wait_queue_head_t *wq = bit_waitqueue(word, bit); | 	wait_queue_head_t *wq = bit_waitqueue(word, bit); | ||||||
|  | @ -183,7 +183,7 @@ int __sched fastcall out_of_line_wait_on_bit(void *word, int bit, | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(out_of_line_wait_on_bit); | EXPORT_SYMBOL(out_of_line_wait_on_bit); | ||||||
| 
 | 
 | ||||||
| int __sched fastcall | int __sched | ||||||
| __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, | __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||||||
| 			int (*action)(void *), unsigned mode) | 			int (*action)(void *), unsigned mode) | ||||||
| { | { | ||||||
|  | @ -201,7 +201,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(__wait_on_bit_lock); | EXPORT_SYMBOL(__wait_on_bit_lock); | ||||||
| 
 | 
 | ||||||
| int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit, | int __sched out_of_line_wait_on_bit_lock(void *word, int bit, | ||||||
| 					int (*action)(void *), unsigned mode) | 					int (*action)(void *), unsigned mode) | ||||||
| { | { | ||||||
| 	wait_queue_head_t *wq = bit_waitqueue(word, bit); | 	wait_queue_head_t *wq = bit_waitqueue(word, bit); | ||||||
|  | @ -211,7 +211,7 @@ int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit, | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); | EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); | ||||||
| 
 | 
 | ||||||
| void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) | void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) | ||||||
| { | { | ||||||
| 	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); | 	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); | ||||||
| 	if (waitqueue_active(wq)) | 	if (waitqueue_active(wq)) | ||||||
|  | @ -236,13 +236,13 @@ EXPORT_SYMBOL(__wake_up_bit); | ||||||
|  * may need to use a less regular barrier, such fs/inode.c's smp_mb(), |  * may need to use a less regular barrier, such fs/inode.c's smp_mb(), | ||||||
|  * because spin_unlock() does not guarantee a memory barrier. |  * because spin_unlock() does not guarantee a memory barrier. | ||||||
|  */ |  */ | ||||||
| void fastcall wake_up_bit(void *word, int bit) | void wake_up_bit(void *word, int bit) | ||||||
| { | { | ||||||
| 	__wake_up_bit(bit_waitqueue(word, bit), word, bit); | 	__wake_up_bit(bit_waitqueue(word, bit), word, bit); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(wake_up_bit); | EXPORT_SYMBOL(wake_up_bit); | ||||||
| 
 | 
 | ||||||
| fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit) | wait_queue_head_t *bit_waitqueue(void *word, int bit) | ||||||
| { | { | ||||||
| 	const int shift = BITS_PER_LONG == 32 ? 5 : 6; | 	const int shift = BITS_PER_LONG == 32 ? 5 : 6; | ||||||
| 	const struct zone *zone = page_zone(virt_to_page(word)); | 	const struct zone *zone = page_zone(virt_to_page(word)); | ||||||
|  |  | ||||||
|  | @ -161,7 +161,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | ||||||
|  * We queue the work to the CPU it was submitted, but there is no |  * We queue the work to the CPU it was submitted, but there is no | ||||||
|  * guarantee that it will be processed by that CPU. |  * guarantee that it will be processed by that CPU. | ||||||
|  */ |  */ | ||||||
| int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | ||||||
| { | { | ||||||
| 	int ret = 0; | 	int ret = 0; | ||||||
| 
 | 
 | ||||||
|  | @ -192,7 +192,7 @@ void delayed_work_timer_fn(unsigned long __data) | ||||||
|  * |  * | ||||||
|  * Returns 0 if @work was already on a queue, non-zero otherwise. |  * Returns 0 if @work was already on a queue, non-zero otherwise. | ||||||
|  */ |  */ | ||||||
| int fastcall queue_delayed_work(struct workqueue_struct *wq, | int queue_delayed_work(struct workqueue_struct *wq, | ||||||
| 			struct delayed_work *dwork, unsigned long delay) | 			struct delayed_work *dwork, unsigned long delay) | ||||||
| { | { | ||||||
| 	timer_stats_timer_set_start_info(&dwork->timer); | 	timer_stats_timer_set_start_info(&dwork->timer); | ||||||
|  | @ -388,7 +388,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | ||||||
|  * This function used to run the workqueues itself.  Now we just wait for the |  * This function used to run the workqueues itself.  Now we just wait for the | ||||||
|  * helper threads to do it. |  * helper threads to do it. | ||||||
|  */ |  */ | ||||||
| void fastcall flush_workqueue(struct workqueue_struct *wq) | void flush_workqueue(struct workqueue_struct *wq) | ||||||
| { | { | ||||||
| 	const cpumask_t *cpu_map = wq_cpu_map(wq); | 	const cpumask_t *cpu_map = wq_cpu_map(wq); | ||||||
| 	int cpu; | 	int cpu; | ||||||
|  | @ -546,7 +546,7 @@ static struct workqueue_struct *keventd_wq __read_mostly; | ||||||
|  * |  * | ||||||
|  * This puts a job in the kernel-global workqueue. |  * This puts a job in the kernel-global workqueue. | ||||||
|  */ |  */ | ||||||
| int fastcall schedule_work(struct work_struct *work) | int schedule_work(struct work_struct *work) | ||||||
| { | { | ||||||
| 	return queue_work(keventd_wq, work); | 	return queue_work(keventd_wq, work); | ||||||
| } | } | ||||||
|  | @ -560,7 +560,7 @@ EXPORT_SYMBOL(schedule_work); | ||||||
|  * After waiting for a given time this puts a job in the kernel-global |  * After waiting for a given time this puts a job in the kernel-global | ||||||
|  * workqueue. |  * workqueue. | ||||||
|  */ |  */ | ||||||
| int fastcall schedule_delayed_work(struct delayed_work *dwork, | int schedule_delayed_work(struct delayed_work *dwork, | ||||||
| 					unsigned long delay) | 					unsigned long delay) | ||||||
| { | { | ||||||
| 	timer_stats_timer_set_start_info(&dwork->timer); | 	timer_stats_timer_set_start_info(&dwork->timer); | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Harvey Harrison
						Harvey Harrison