mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 08:44:41 +00:00 
			
		
		
		
	blk-mq: remove __blk_mq_alloc_request
Move most code into blk_mq_rq_ctx_init, and the rest into blk_mq_get_request. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									5bbf4e5a8e
								
							
						
					
					
						commit
						e4cdf1a1cb
					
				
					 2 changed files with 27 additions and 47 deletions
				
			
		|  | @ -204,15 +204,31 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(blk_mq_can_queue); | EXPORT_SYMBOL(blk_mq_can_queue); | ||||||
| 
 | 
 | ||||||
| static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, | ||||||
| 		struct request *rq, unsigned int op) | 		unsigned int tag, unsigned int op) | ||||||
| { | { | ||||||
|  | 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data); | ||||||
|  | 	struct request *rq = tags->static_rqs[tag]; | ||||||
|  | 
 | ||||||
|  | 	if (data->flags & BLK_MQ_REQ_INTERNAL) { | ||||||
|  | 		rq->tag = -1; | ||||||
|  | 		rq->internal_tag = tag; | ||||||
|  | 	} else { | ||||||
|  | 		if (blk_mq_tag_busy(data->hctx)) { | ||||||
|  | 			rq->rq_flags = RQF_MQ_INFLIGHT; | ||||||
|  | 			atomic_inc(&data->hctx->nr_active); | ||||||
|  | 		} | ||||||
|  | 		rq->tag = tag; | ||||||
|  | 		rq->internal_tag = -1; | ||||||
|  | 		data->hctx->tags->rqs[rq->tag] = rq; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	INIT_LIST_HEAD(&rq->queuelist); | 	INIT_LIST_HEAD(&rq->queuelist); | ||||||
| 	/* csd/requeue_work/fifo_time is initialized before use */ | 	/* csd/requeue_work/fifo_time is initialized before use */ | ||||||
| 	rq->q = q; | 	rq->q = data->q; | ||||||
| 	rq->mq_ctx = ctx; | 	rq->mq_ctx = data->ctx; | ||||||
| 	rq->cmd_flags = op; | 	rq->cmd_flags = op; | ||||||
| 	if (blk_queue_io_stat(q)) | 	if (blk_queue_io_stat(data->q)) | ||||||
| 		rq->rq_flags |= RQF_IO_STAT; | 		rq->rq_flags |= RQF_IO_STAT; | ||||||
| 	/* do not touch atomic flags, it needs atomic ops against the timer */ | 	/* do not touch atomic flags, it needs atomic ops against the timer */ | ||||||
| 	rq->cpu = -1; | 	rq->cpu = -1; | ||||||
|  | @ -241,48 +257,17 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | ||||||
| 	rq->end_io_data = NULL; | 	rq->end_io_data = NULL; | ||||||
| 	rq->next_rq = NULL; | 	rq->next_rq = NULL; | ||||||
| 
 | 
 | ||||||
| 	ctx->rq_dispatched[op_is_sync(op)]++; | 	data->ctx->rq_dispatched[op_is_sync(op)]++; | ||||||
|  | 	return rq; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, |  | ||||||
| 				       unsigned int op) |  | ||||||
| { |  | ||||||
| 	struct request *rq; |  | ||||||
| 	unsigned int tag; |  | ||||||
| 
 |  | ||||||
| 	tag = blk_mq_get_tag(data); |  | ||||||
| 	if (tag != BLK_MQ_TAG_FAIL) { |  | ||||||
| 		struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |  | ||||||
| 
 |  | ||||||
| 		rq = tags->static_rqs[tag]; |  | ||||||
| 
 |  | ||||||
| 		if (data->flags & BLK_MQ_REQ_INTERNAL) { |  | ||||||
| 			rq->tag = -1; |  | ||||||
| 			rq->internal_tag = tag; |  | ||||||
| 		} else { |  | ||||||
| 			if (blk_mq_tag_busy(data->hctx)) { |  | ||||||
| 				rq->rq_flags = RQF_MQ_INFLIGHT; |  | ||||||
| 				atomic_inc(&data->hctx->nr_active); |  | ||||||
| 			} |  | ||||||
| 			rq->tag = tag; |  | ||||||
| 			rq->internal_tag = -1; |  | ||||||
| 			data->hctx->tags->rqs[rq->tag] = rq; |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		blk_mq_rq_ctx_init(data->q, data->ctx, rq, op); |  | ||||||
| 		return rq; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return NULL; |  | ||||||
| } |  | ||||||
| EXPORT_SYMBOL_GPL(__blk_mq_alloc_request); |  | ||||||
| 
 |  | ||||||
| static struct request *blk_mq_get_request(struct request_queue *q, | static struct request *blk_mq_get_request(struct request_queue *q, | ||||||
| 		struct bio *bio, unsigned int op, | 		struct bio *bio, unsigned int op, | ||||||
| 		struct blk_mq_alloc_data *data) | 		struct blk_mq_alloc_data *data) | ||||||
| { | { | ||||||
| 	struct elevator_queue *e = q->elevator; | 	struct elevator_queue *e = q->elevator; | ||||||
| 	struct request *rq; | 	struct request *rq; | ||||||
|  | 	unsigned int tag; | ||||||
| 
 | 
 | ||||||
| 	blk_queue_enter_live(q); | 	blk_queue_enter_live(q); | ||||||
| 	data->q = q; | 	data->q = q; | ||||||
|  | @ -302,12 +287,13 @@ static struct request *blk_mq_get_request(struct request_queue *q, | ||||||
| 			e->type->ops.mq.limit_depth(op, data); | 			e->type->ops.mq.limit_depth(op, data); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	rq = __blk_mq_alloc_request(data, op); | 	tag = blk_mq_get_tag(data); | ||||||
| 	if (!rq) { | 	if (tag == BLK_MQ_TAG_FAIL) { | ||||||
| 		blk_queue_exit(q); | 		blk_queue_exit(q); | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	rq = blk_mq_rq_ctx_init(data, tag, op); | ||||||
| 	if (!op_is_flush(op)) { | 	if (!op_is_flush(op)) { | ||||||
| 		rq->elv.icq = NULL; | 		rq->elv.icq = NULL; | ||||||
| 		if (e && e->type->ops.mq.prepare_request) { | 		if (e && e->type->ops.mq.prepare_request) { | ||||||
|  |  | ||||||
|  | @ -128,12 +128,6 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data | ||||||
| 	return data->hctx->tags; | 	return data->hctx->tags; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Internal helpers for request allocation/init/free |  | ||||||
|  */ |  | ||||||
| struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, |  | ||||||
| 					unsigned int op); |  | ||||||
| 
 |  | ||||||
| static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) | ||||||
| { | { | ||||||
| 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state); | 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state); | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Christoph Hellwig
						Christoph Hellwig