mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-11-01 01:03:52 +00:00
dm mpath: fix bio-based multipath queue_if_no_path handling
Commitca5beb76("dm mpath: micro-optimize the hot path relative to MPATHF_QUEUE_IF_NO_PATH") caused bio-based DM-multipath to fail mptest's "test_02_sdev_delete". Restoring the logic that existed prior to commitca5beb76fixes this bio-based DM-multipath regression. Also verified all mptest tests pass with request-based DM-multipath. This commit effectively reverts commitca5beb76-- but it does so without reintroducing the need to take the m->lock spinlock in must_push_back_{rq,bio}. Fixes:ca5beb76("dm mpath: micro-optimize the hot path relative to MPATHF_QUEUE_IF_NO_PATH") Cc: stable@vger.kernel.org # 4.12+ Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
7e6358d244
commit
c1fd0abee0
1 changed files with 42 additions and 7 deletions
|
|
@ -457,6 +457,38 @@ do { \
|
||||||
dm_noflush_suspending((m)->ti)); \
|
dm_noflush_suspending((m)->ti)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check whether bios must be queued in the device-mapper core rather
|
||||||
|
* than here in the target.
|
||||||
|
*
|
||||||
|
* If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
|
||||||
|
* the same value then we are not between multipath_presuspend()
|
||||||
|
* and multipath_resume() calls and we have no need to check
|
||||||
|
* for the DMF_NOFLUSH_SUSPENDING flag.
|
||||||
|
*/
|
||||||
|
static bool __must_push_back(struct multipath *m, unsigned long flags)
|
||||||
|
{
|
||||||
|
return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
|
||||||
|
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
|
||||||
|
dm_noflush_suspending(m->ti));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Following functions use READ_ONCE to get atomic access to
|
||||||
|
* all m->flags to avoid taking spinlock
|
||||||
|
*/
|
||||||
|
static bool must_push_back_rq(struct multipath *m)
|
||||||
|
{
|
||||||
|
unsigned long flags = READ_ONCE(m->flags);
|
||||||
|
return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool must_push_back_bio(struct multipath *m)
|
||||||
|
{
|
||||||
|
unsigned long flags = READ_ONCE(m->flags);
|
||||||
|
return __must_push_back(m, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map cloned requests (request-based multipath)
|
* Map cloned requests (request-based multipath)
|
||||||
*/
|
*/
|
||||||
|
|
@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
|
||||||
pgpath = choose_pgpath(m, nr_bytes);
|
pgpath = choose_pgpath(m, nr_bytes);
|
||||||
|
|
||||||
if (!pgpath) {
|
if (!pgpath) {
|
||||||
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
|
if (must_push_back_rq(m))
|
||||||
return DM_MAPIO_DELAY_REQUEUE;
|
return DM_MAPIO_DELAY_REQUEUE;
|
||||||
dm_report_EIO(m); /* Failed */
|
dm_report_EIO(m); /* Failed */
|
||||||
return DM_MAPIO_KILL;
|
return DM_MAPIO_KILL;
|
||||||
|
|
@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pgpath) {
|
if (!pgpath) {
|
||||||
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
|
if (must_push_back_bio(m))
|
||||||
return DM_MAPIO_REQUEUE;
|
return DM_MAPIO_REQUEUE;
|
||||||
dm_report_EIO(m);
|
dm_report_EIO(m);
|
||||||
return DM_MAPIO_KILL;
|
return DM_MAPIO_KILL;
|
||||||
|
|
@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
|
||||||
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
|
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
|
||||||
(save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
|
(save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
|
||||||
(!save_old_value && queue_if_no_path));
|
(!save_old_value && queue_if_no_path));
|
||||||
assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
|
assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
|
||||||
queue_if_no_path || dm_noflush_suspending(m->ti));
|
|
||||||
spin_unlock_irqrestore(&m->lock, flags);
|
spin_unlock_irqrestore(&m->lock, flags);
|
||||||
|
|
||||||
if (!queue_if_no_path) {
|
if (!queue_if_no_path) {
|
||||||
|
|
@ -1486,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
|
||||||
fail_path(pgpath);
|
fail_path(pgpath);
|
||||||
|
|
||||||
if (atomic_read(&m->nr_valid_paths) == 0 &&
|
if (atomic_read(&m->nr_valid_paths) == 0 &&
|
||||||
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
!must_push_back_rq(m)) {
|
||||||
if (error == BLK_STS_IOERR)
|
if (error == BLK_STS_IOERR)
|
||||||
dm_report_EIO(m);
|
dm_report_EIO(m);
|
||||||
/* complete with the original error */
|
/* complete with the original error */
|
||||||
|
|
@ -1521,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
|
||||||
|
|
||||||
if (atomic_read(&m->nr_valid_paths) == 0 &&
|
if (atomic_read(&m->nr_valid_paths) == 0 &&
|
||||||
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
||||||
dm_report_EIO(m);
|
if (must_push_back_bio(m)) {
|
||||||
*error = BLK_STS_IOERR;
|
r = DM_ENDIO_REQUEUE;
|
||||||
|
} else {
|
||||||
|
dm_report_EIO(m);
|
||||||
|
*error = BLK_STS_IOERR;
|
||||||
|
}
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue