mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-11-01 09:13:37 +00:00 
			
		
		
		
	blk-rq-qos: move rq_qos_add and rq_qos_del out of line
These two functions are rather larger and not in a fast path, so move them out of line. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Tejun Heo <tj@kernel.org> Link: https://lore.kernel.org/r/20230203150400.3199230-13-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									4e1d91ae87
								
							
						
					
					
						commit
						b494f9c566
					
				
					 2 changed files with 62 additions and 59 deletions
				
			
		| 
						 | 
				
			
			@ -294,3 +294,63 @@ void rq_qos_exit(struct request_queue *q)
 | 
			
		|||
		rqos->ops->exit(rqos);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * No IO can be in-flight when adding rqos, so freeze queue, which
 | 
			
		||||
	 * is fine since we only support rq_qos for blk-mq queue.
 | 
			
		||||
	 *
 | 
			
		||||
	 * Reuse ->queue_lock for protecting against other concurrent
 | 
			
		||||
	 * rq_qos adding/deleting
 | 
			
		||||
	 */
 | 
			
		||||
	blk_mq_freeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&q->queue_lock);
 | 
			
		||||
	if (rq_qos_id(q, rqos->id))
 | 
			
		||||
		goto ebusy;
 | 
			
		||||
	rqos->next = q->rq_qos;
 | 
			
		||||
	q->rq_qos = rqos;
 | 
			
		||||
	spin_unlock_irq(&q->queue_lock);
 | 
			
		||||
 | 
			
		||||
	blk_mq_unfreeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	if (rqos->ops->debugfs_attrs) {
 | 
			
		||||
		mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
		blk_mq_debugfs_register_rqos(rqos);
 | 
			
		||||
		mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
ebusy:
 | 
			
		||||
	spin_unlock_irq(&q->queue_lock);
 | 
			
		||||
	blk_mq_unfreeze_queue(q);
 | 
			
		||||
	return -EBUSY;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 | 
			
		||||
{
 | 
			
		||||
	struct rq_qos **cur;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * See comment in rq_qos_add() about freezing queue & using
 | 
			
		||||
	 * ->queue_lock.
 | 
			
		||||
	 */
 | 
			
		||||
	blk_mq_freeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&q->queue_lock);
 | 
			
		||||
	for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
 | 
			
		||||
		if (*cur == rqos) {
 | 
			
		||||
			*cur = rqos->next;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock_irq(&q->queue_lock);
 | 
			
		||||
 | 
			
		||||
	blk_mq_unfreeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
	blk_mq_debugfs_unregister_rqos(rqos);
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -85,65 +85,8 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
 | 
			
		|||
	init_waitqueue_head(&rq_wait->wait);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * No IO can be in-flight when adding rqos, so freeze queue, which
 | 
			
		||||
	 * is fine since we only support rq_qos for blk-mq queue.
 | 
			
		||||
	 *
 | 
			
		||||
	 * Reuse ->queue_lock for protecting against other concurrent
 | 
			
		||||
	 * rq_qos adding/deleting
 | 
			
		||||
	 */
 | 
			
		||||
	blk_mq_freeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&q->queue_lock);
 | 
			
		||||
	if (rq_qos_id(q, rqos->id))
 | 
			
		||||
		goto ebusy;
 | 
			
		||||
	rqos->next = q->rq_qos;
 | 
			
		||||
	q->rq_qos = rqos;
 | 
			
		||||
	spin_unlock_irq(&q->queue_lock);
 | 
			
		||||
 | 
			
		||||
	blk_mq_unfreeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	if (rqos->ops->debugfs_attrs) {
 | 
			
		||||
		mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
		blk_mq_debugfs_register_rqos(rqos);
 | 
			
		||||
		mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
ebusy:
 | 
			
		||||
	spin_unlock_irq(&q->queue_lock);
 | 
			
		||||
	blk_mq_unfreeze_queue(q);
 | 
			
		||||
	return -EBUSY;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 | 
			
		||||
{
 | 
			
		||||
	struct rq_qos **cur;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * See comment in rq_qos_add() about freezing queue & using
 | 
			
		||||
	 * ->queue_lock.
 | 
			
		||||
	 */
 | 
			
		||||
	blk_mq_freeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&q->queue_lock);
 | 
			
		||||
	for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
 | 
			
		||||
		if (*cur == rqos) {
 | 
			
		||||
			*cur = rqos->next;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock_irq(&q->queue_lock);
 | 
			
		||||
 | 
			
		||||
	blk_mq_unfreeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
	blk_mq_debugfs_unregister_rqos(rqos);
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
}
 | 
			
		||||
int rq_qos_add(struct request_queue *q, struct rq_qos *rqos);
 | 
			
		||||
void rq_qos_del(struct request_queue *q, struct rq_qos *rqos);
 | 
			
		||||
 | 
			
		||||
typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
 | 
			
		||||
typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		
		Reference in a new issue