mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-04-13 09:59:31 +00:00
block: check BLK_FEAT_POLL under q_usage_count
Otherwise feature reconfiguration can race with I/O submission. Also drop the bio_clear_polled in the error path, as the flag does not matter for instant error completions, it is a left over from when we allowed polled I/O to proceed unpolled in this case. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Nilay Shroff <nilay@linux.ibm.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Link: https://lore.kernel.org/r/20250110054726.1499538-4-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
aa427d7b73
commit
958148a6ac
2 changed files with 22 additions and 12 deletions
|
@ -629,8 +629,14 @@ static void __submit_bio(struct bio *bio)
|
|||
blk_mq_submit_bio(bio);
|
||||
} else if (likely(bio_queue_enter(bio) == 0)) {
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
|
||||
disk->fops->submit_bio(bio);
|
||||
|
||||
if ((bio->bi_opf & REQ_POLLED) &&
|
||||
!(disk->queue->limits.features & BLK_FEAT_POLL)) {
|
||||
bio->bi_status = BLK_STS_NOTSUPP;
|
||||
bio_endio(bio);
|
||||
} else {
|
||||
disk->fops->submit_bio(bio);
|
||||
}
|
||||
blk_queue_exit(disk->queue);
|
||||
}
|
||||
|
||||
|
@ -805,12 +811,6 @@ void submit_bio_noacct(struct bio *bio)
|
|||
}
|
||||
}
|
||||
|
||||
if (!(q->limits.features & BLK_FEAT_POLL) &&
|
||||
(bio->bi_opf & REQ_POLLED)) {
|
||||
bio_clear_polled(bio);
|
||||
goto not_supported;
|
||||
}
|
||||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_READ:
|
||||
break;
|
||||
|
@ -935,7 +935,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
|
|||
return 0;
|
||||
|
||||
q = bdev_get_queue(bdev);
|
||||
if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL))
|
||||
if (cookie == BLK_QC_T_NONE)
|
||||
return 0;
|
||||
|
||||
blk_flush_plug(current->plug, false);
|
||||
|
@ -951,7 +951,9 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
|
|||
*/
|
||||
if (!percpu_ref_tryget(&q->q_usage_counter))
|
||||
return 0;
|
||||
if (queue_is_mq(q)) {
|
||||
if (!(q->limits.features & BLK_FEAT_POLL)) {
|
||||
ret = 0;
|
||||
} else if (queue_is_mq(q)) {
|
||||
ret = blk_mq_poll(q, cookie, iob, flags);
|
||||
} else {
|
||||
struct gendisk *disk = q->disk;
|
||||
|
|
|
@ -3096,14 +3096,22 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||
}
|
||||
|
||||
/*
|
||||
* Device reconfiguration may change logical block size, so alignment
|
||||
* check has to be done with queue usage counter held
|
||||
* Device reconfiguration may change logical block size or reduce the
|
||||
* number of poll queues, so the checks for alignment and poll support
|
||||
* have to be done with queue usage counter held.
|
||||
*/
|
||||
if (unlikely(bio_unaligned(bio, q))) {
|
||||
bio_io_error(bio);
|
||||
goto queue_exit;
|
||||
}
|
||||
|
||||
if ((bio->bi_opf & REQ_POLLED) &&
|
||||
!(q->limits.features & BLK_FEAT_POLL)) {
|
||||
bio->bi_status = BLK_STS_NOTSUPP;
|
||||
bio_endio(bio);
|
||||
goto queue_exit;
|
||||
}
|
||||
|
||||
bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
|
||||
if (!bio)
|
||||
goto queue_exit;
|
||||
|
|
Loading…
Add table
Reference in a new issue