mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 08:44:41 +00:00 
			
		
		
		
	block: Use the queue_flag_*() functions instead of open-coding these
Except for changing the atomic queue flag manipulations that are protected by the queue lock into non-atomic manipulations, this patch does not change any functionality. Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Ming Lei <ming.lei@redhat.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									66f91322f3
								
							
						
					
					
						commit
						f78bac2c8e
					
				
					 4 changed files with 7 additions and 7 deletions
				
			
		|  | @ -994,7 +994,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, | |||
| 	 * registered by blk_register_queue(). | ||||
| 	 */ | ||||
| 	q->bypass_depth = 1; | ||||
| 	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); | ||||
| 	queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); | ||||
| 
 | ||||
| 	init_waitqueue_head(&q->mq_freeze_wq); | ||||
| 
 | ||||
|  |  | |||
|  | @ -2678,7 +2678,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, | |||
| 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; | ||||
| 
 | ||||
| 	if (!(set->flags & BLK_MQ_F_SG_MERGE)) | ||||
| 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; | ||||
| 		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); | ||||
| 
 | ||||
| 	q->sg_reserved_size = INT_MAX; | ||||
| 
 | ||||
|  |  | |||
|  | @ -861,9 +861,9 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable) | |||
| { | ||||
| 	spin_lock_irq(q->queue_lock); | ||||
| 	if (queueable) | ||||
| 		clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); | ||||
| 		queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q); | ||||
| 	else | ||||
| 		set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); | ||||
| 		queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q); | ||||
| 	spin_unlock_irq(q->queue_lock); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); | ||||
|  |  | |||
|  | @ -152,7 +152,7 @@ void blk_stat_add_callback(struct request_queue *q, | |||
| 
 | ||||
| 	spin_lock(&q->stats->lock); | ||||
| 	list_add_tail_rcu(&cb->list, &q->stats->callbacks); | ||||
| 	set_bit(QUEUE_FLAG_STATS, &q->queue_flags); | ||||
| 	queue_flag_set(QUEUE_FLAG_STATS, q); | ||||
| 	spin_unlock(&q->stats->lock); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(blk_stat_add_callback); | ||||
|  | @ -163,7 +163,7 @@ void blk_stat_remove_callback(struct request_queue *q, | |||
| 	spin_lock(&q->stats->lock); | ||||
| 	list_del_rcu(&cb->list); | ||||
| 	if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) | ||||
| 		clear_bit(QUEUE_FLAG_STATS, &q->queue_flags); | ||||
| 		queue_flag_clear(QUEUE_FLAG_STATS, q); | ||||
| 	spin_unlock(&q->stats->lock); | ||||
| 
 | ||||
| 	del_timer_sync(&cb->timer); | ||||
|  | @ -191,7 +191,7 @@ void blk_stat_enable_accounting(struct request_queue *q) | |||
| { | ||||
| 	spin_lock(&q->stats->lock); | ||||
| 	q->stats->enable_accounting = true; | ||||
| 	set_bit(QUEUE_FLAG_STATS, &q->queue_flags); | ||||
| 	queue_flag_set(QUEUE_FLAG_STATS, q); | ||||
| 	spin_unlock(&q->stats->lock); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Bart Van Assche
						Bart Van Assche