mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-04-13 09:59:31 +00:00
virtio_blk: reverse request order in virtio_queue_rqs
blk_mq_flush_plug_list submits requests in the reverse order that they
were submitted, which leads to a rather suboptimal I/O pattern
especially in rotational devices. Fix this by rewriting virtio_queue_rqs
so that it always pops the requests from the passed in request list, and
then adds them to the head of a local submit list. This actually
simplifies the code a bit as it removes the complicated list splicing,
at the cost of extra updates of the rq_next pointer. As that should be
cache hot anyway it should be an easy price to pay.
Fixes: 0e9911fa76
("virtio-blk: support mq_ops->queue_rqs()")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20241113152050.157179-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
beadf00885
commit
7f212e997e
1 changed files with 21 additions and 25 deletions
|
@ -471,18 +471,18 @@ static bool virtblk_prep_rq_batch(struct request *req)
|
|||
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
|
||||
}
|
||||
|
||||
static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
|
||||
static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
|
||||
struct request **rqlist)
|
||||
{
|
||||
struct request *req;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
bool kick;
|
||||
|
||||
spin_lock_irqsave(&vq->lock, flags);
|
||||
|
||||
while (!rq_list_empty(*rqlist)) {
|
||||
struct request *req = rq_list_pop(rqlist);
|
||||
while ((req = rq_list_pop(rqlist))) {
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
int err;
|
||||
|
||||
err = virtblk_add_req(vq->vq, vbr);
|
||||
if (err) {
|
||||
|
@ -495,37 +495,33 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
|
|||
kick = virtqueue_kick_prepare(vq->vq);
|
||||
spin_unlock_irqrestore(&vq->lock, flags);
|
||||
|
||||
return kick;
|
||||
if (kick)
|
||||
virtqueue_notify(vq->vq);
|
||||
}
|
||||
|
||||
static void virtio_queue_rqs(struct request **rqlist)
|
||||
{
|
||||
struct request *req, *next, *prev = NULL;
|
||||
struct request *submit_list = NULL;
|
||||
struct request *requeue_list = NULL;
|
||||
struct request **requeue_lastp = &requeue_list;
|
||||
struct virtio_blk_vq *vq = NULL;
|
||||
struct request *req;
|
||||
|
||||
rq_list_for_each_safe(rqlist, req, next) {
|
||||
struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
|
||||
bool kick;
|
||||
while ((req = rq_list_pop(rqlist))) {
|
||||
struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
|
||||
|
||||
if (!virtblk_prep_rq_batch(req)) {
|
||||
rq_list_move(rqlist, &requeue_list, req, prev);
|
||||
req = prev;
|
||||
if (!req)
|
||||
continue;
|
||||
}
|
||||
if (vq && vq != this_vq)
|
||||
virtblk_add_req_batch(vq, &submit_list);
|
||||
vq = this_vq;
|
||||
|
||||
if (!next || req->mq_hctx != next->mq_hctx) {
|
||||
req->rq_next = NULL;
|
||||
kick = virtblk_add_req_batch(vq, rqlist);
|
||||
if (kick)
|
||||
virtqueue_notify(vq->vq);
|
||||
|
||||
*rqlist = next;
|
||||
prev = NULL;
|
||||
} else
|
||||
prev = req;
|
||||
if (virtblk_prep_rq_batch(req))
|
||||
rq_list_add(&submit_list, req); /* reverse order */
|
||||
else
|
||||
rq_list_add_tail(&requeue_lastp, req);
|
||||
}
|
||||
|
||||
if (vq)
|
||||
virtblk_add_req_batch(vq, &submit_list);
|
||||
*rqlist = requeue_list;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue