mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
block: Add a new helper to attempt to merge a bio
There are lots of duplicated code when trying to merge a bio from plug list and sw queue, we can introduce a new helper to attempt to merge a bio, which can simplify the blk_bio_list_merge() and blk_attempt_plug_merge(). Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
bdc6a287bc
commit
7d7ca7c526
3 changed files with 71 additions and 60 deletions
|
@ -907,13 +907,14 @@ static void blk_account_io_merge_bio(struct request *req)
|
||||||
part_stat_unlock();
|
part_stat_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
|
enum bio_merge_status bio_attempt_back_merge(struct request *req,
|
||||||
unsigned int nr_segs)
|
struct bio *bio,
|
||||||
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
|
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
|
||||||
|
|
||||||
if (!ll_back_merge_fn(req, bio, nr_segs))
|
if (!ll_back_merge_fn(req, bio, nr_segs))
|
||||||
return false;
|
return BIO_MERGE_FAILED;
|
||||||
|
|
||||||
trace_block_bio_backmerge(req->q, req, bio);
|
trace_block_bio_backmerge(req->q, req, bio);
|
||||||
rq_qos_merge(req->q, req, bio);
|
rq_qos_merge(req->q, req, bio);
|
||||||
|
@ -928,16 +929,17 @@ bool bio_attempt_back_merge(struct request *req, struct bio *bio,
|
||||||
bio_crypt_free_ctx(bio);
|
bio_crypt_free_ctx(bio);
|
||||||
|
|
||||||
blk_account_io_merge_bio(req);
|
blk_account_io_merge_bio(req);
|
||||||
return true;
|
return BIO_MERGE_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
|
enum bio_merge_status bio_attempt_front_merge(struct request *req,
|
||||||
unsigned int nr_segs)
|
struct bio *bio,
|
||||||
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
|
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
|
||||||
|
|
||||||
if (!ll_front_merge_fn(req, bio, nr_segs))
|
if (!ll_front_merge_fn(req, bio, nr_segs))
|
||||||
return false;
|
return BIO_MERGE_FAILED;
|
||||||
|
|
||||||
trace_block_bio_frontmerge(req->q, req, bio);
|
trace_block_bio_frontmerge(req->q, req, bio);
|
||||||
rq_qos_merge(req->q, req, bio);
|
rq_qos_merge(req->q, req, bio);
|
||||||
|
@ -954,11 +956,12 @@ bool bio_attempt_front_merge(struct request *req, struct bio *bio,
|
||||||
bio_crypt_do_front_merge(req, bio);
|
bio_crypt_do_front_merge(req, bio);
|
||||||
|
|
||||||
blk_account_io_merge_bio(req);
|
blk_account_io_merge_bio(req);
|
||||||
return true;
|
return BIO_MERGE_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
|
enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
|
||||||
struct bio *bio)
|
struct request *req,
|
||||||
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
unsigned short segments = blk_rq_nr_discard_segments(req);
|
unsigned short segments = blk_rq_nr_discard_segments(req);
|
||||||
|
|
||||||
|
@ -976,10 +979,39 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
|
||||||
req->nr_phys_segments = segments + 1;
|
req->nr_phys_segments = segments + 1;
|
||||||
|
|
||||||
blk_account_io_merge_bio(req);
|
blk_account_io_merge_bio(req);
|
||||||
return true;
|
return BIO_MERGE_OK;
|
||||||
no_merge:
|
no_merge:
|
||||||
req_set_nomerge(q, req);
|
req_set_nomerge(q, req);
|
||||||
return false;
|
return BIO_MERGE_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
|
||||||
|
struct request *rq,
|
||||||
|
struct bio *bio,
|
||||||
|
unsigned int nr_segs,
|
||||||
|
bool sched_allow_merge)
|
||||||
|
{
|
||||||
|
if (!blk_rq_merge_ok(rq, bio))
|
||||||
|
return BIO_MERGE_NONE;
|
||||||
|
|
||||||
|
switch (blk_try_merge(rq, bio)) {
|
||||||
|
case ELEVATOR_BACK_MERGE:
|
||||||
|
if (!sched_allow_merge ||
|
||||||
|
(sched_allow_merge && blk_mq_sched_allow_merge(q, rq, bio)))
|
||||||
|
return bio_attempt_back_merge(rq, bio, nr_segs);
|
||||||
|
break;
|
||||||
|
case ELEVATOR_FRONT_MERGE:
|
||||||
|
if (!sched_allow_merge ||
|
||||||
|
(sched_allow_merge && blk_mq_sched_allow_merge(q, rq, bio)))
|
||||||
|
return bio_attempt_front_merge(rq, bio, nr_segs);
|
||||||
|
break;
|
||||||
|
case ELEVATOR_DISCARD_MERGE:
|
||||||
|
return bio_attempt_discard_merge(q, rq, bio);
|
||||||
|
default:
|
||||||
|
return BIO_MERGE_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return BIO_MERGE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1018,8 +1050,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
plug_list = &plug->mq_list;
|
plug_list = &plug->mq_list;
|
||||||
|
|
||||||
list_for_each_entry_reverse(rq, plug_list, queuelist) {
|
list_for_each_entry_reverse(rq, plug_list, queuelist) {
|
||||||
bool merged = false;
|
|
||||||
|
|
||||||
if (rq->q == q && same_queue_rq) {
|
if (rq->q == q && same_queue_rq) {
|
||||||
/*
|
/*
|
||||||
* Only blk-mq multiple hardware queues case checks the
|
* Only blk-mq multiple hardware queues case checks the
|
||||||
|
@ -1029,24 +1059,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
*same_queue_rq = rq;
|
*same_queue_rq = rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
|
if (rq->q != q)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
switch (blk_try_merge(rq, bio)) {
|
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
|
||||||
case ELEVATOR_BACK_MERGE:
|
BIO_MERGE_OK)
|
||||||
merged = bio_attempt_back_merge(rq, bio, nr_segs);
|
|
||||||
break;
|
|
||||||
case ELEVATOR_FRONT_MERGE:
|
|
||||||
merged = bio_attempt_front_merge(rq, bio, nr_segs);
|
|
||||||
break;
|
|
||||||
case ELEVATOR_DISCARD_MERGE:
|
|
||||||
merged = bio_attempt_discard_merge(q, rq, bio);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (merged)
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1064,33 +1081,18 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||||
int checked = 8;
|
int checked = 8;
|
||||||
|
|
||||||
list_for_each_entry_reverse(rq, list, queuelist) {
|
list_for_each_entry_reverse(rq, list, queuelist) {
|
||||||
bool merged = false;
|
|
||||||
|
|
||||||
if (!checked--)
|
if (!checked--)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!blk_rq_merge_ok(rq, bio))
|
switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
|
||||||
continue;
|
case BIO_MERGE_NONE:
|
||||||
|
|
||||||
switch (blk_try_merge(rq, bio)) {
|
|
||||||
case ELEVATOR_BACK_MERGE:
|
|
||||||
if (blk_mq_sched_allow_merge(q, rq, bio))
|
|
||||||
merged = bio_attempt_back_merge(rq, bio,
|
|
||||||
nr_segs);
|
|
||||||
break;
|
|
||||||
case ELEVATOR_FRONT_MERGE:
|
|
||||||
if (blk_mq_sched_allow_merge(q, rq, bio))
|
|
||||||
merged = bio_attempt_front_merge(rq, bio,
|
|
||||||
nr_segs);
|
|
||||||
break;
|
|
||||||
case ELEVATOR_DISCARD_MERGE:
|
|
||||||
merged = bio_attempt_discard_merge(q, rq, bio);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
continue;
|
continue;
|
||||||
|
case BIO_MERGE_OK:
|
||||||
|
return true;
|
||||||
|
case BIO_MERGE_FAILED:
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return merged;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -368,7 +368,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||||
case ELEVATOR_BACK_MERGE:
|
case ELEVATOR_BACK_MERGE:
|
||||||
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
||||||
return false;
|
return false;
|
||||||
if (!bio_attempt_back_merge(rq, bio, nr_segs))
|
if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
|
||||||
return false;
|
return false;
|
||||||
*merged_request = attempt_back_merge(q, rq);
|
*merged_request = attempt_back_merge(q, rq);
|
||||||
if (!*merged_request)
|
if (!*merged_request)
|
||||||
|
@ -377,14 +377,14 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||||
case ELEVATOR_FRONT_MERGE:
|
case ELEVATOR_FRONT_MERGE:
|
||||||
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
||||||
return false;
|
return false;
|
||||||
if (!bio_attempt_front_merge(rq, bio, nr_segs))
|
if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
|
||||||
return false;
|
return false;
|
||||||
*merged_request = attempt_front_merge(q, rq);
|
*merged_request = attempt_front_merge(q, rq);
|
||||||
if (!*merged_request)
|
if (!*merged_request)
|
||||||
elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
|
elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
|
||||||
return true;
|
return true;
|
||||||
case ELEVATOR_DISCARD_MERGE:
|
case ELEVATOR_DISCARD_MERGE:
|
||||||
return bio_attempt_discard_merge(q, rq, bio);
|
return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
21
block/blk.h
21
block/blk.h
|
@ -29,6 +29,12 @@ struct blk_flush_queue {
|
||||||
spinlock_t mq_flush_lock;
|
spinlock_t mq_flush_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum bio_merge_status {
|
||||||
|
BIO_MERGE_OK,
|
||||||
|
BIO_MERGE_NONE,
|
||||||
|
BIO_MERGE_FAILED,
|
||||||
|
};
|
||||||
|
|
||||||
extern struct kmem_cache *blk_requestq_cachep;
|
extern struct kmem_cache *blk_requestq_cachep;
|
||||||
extern struct kobj_type blk_queue_ktype;
|
extern struct kobj_type blk_queue_ktype;
|
||||||
extern struct ida blk_queue_ida;
|
extern struct ida blk_queue_ida;
|
||||||
|
@ -169,12 +175,15 @@ static inline void blk_integrity_del(struct gendisk *disk)
|
||||||
unsigned long blk_rq_timeout(unsigned long timeout);
|
unsigned long blk_rq_timeout(unsigned long timeout);
|
||||||
void blk_add_timer(struct request *req);
|
void blk_add_timer(struct request *req);
|
||||||
|
|
||||||
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
|
enum bio_merge_status bio_attempt_front_merge(struct request *req,
|
||||||
unsigned int nr_segs);
|
struct bio *bio,
|
||||||
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
|
unsigned int nr_segs);
|
||||||
unsigned int nr_segs);
|
enum bio_merge_status bio_attempt_back_merge(struct request *req,
|
||||||
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
|
struct bio *bio,
|
||||||
struct bio *bio);
|
unsigned int nr_segs);
|
||||||
|
enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
|
||||||
|
struct request *req,
|
||||||
|
struct bio *bio);
|
||||||
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
unsigned int nr_segs, struct request **same_queue_rq);
|
unsigned int nr_segs, struct request **same_queue_rq);
|
||||||
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
|
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||||
|
|
Loading…
Add table
Reference in a new issue