mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-04-13 09:59:31 +00:00
nvme: split nvme_kill_queues
nvme_kill_queues does two things: 1) mark the gendisk of all namespaces dead 2) unquiesce all I/O queues These used to be be intertwined due to block layer issues, but aren't any more. So move the unquiscing of the I/O queues into the callers, and rename the rest of the function to the now more descriptive nvme_mark_namespaces_dead. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Link: https://lore.kernel.org/r/20221101150050.3510-8-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6bcd5089ee
commit
cd50f9b247
4 changed files with 15 additions and 33 deletions
|
@ -1153,7 +1153,8 @@ out:
|
|||
nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
|
||||
nvme_get_ctrl(&anv->ctrl);
|
||||
apple_nvme_disable(anv, false);
|
||||
nvme_kill_queues(&anv->ctrl);
|
||||
nvme_mark_namespaces_dead(&anv->ctrl);
|
||||
nvme_start_queues(&anv->ctrl);
|
||||
if (!queue_work(nvme_wq, &anv->remove_work))
|
||||
nvme_put_ctrl(&anv->ctrl);
|
||||
}
|
||||
|
|
|
@ -4561,8 +4561,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
|
|||
* removing the namespaces' disks; fail all the queues now to avoid
|
||||
* potentially having to clean up the failed sync later.
|
||||
*/
|
||||
if (ctrl->state == NVME_CTRL_DEAD)
|
||||
nvme_kill_queues(ctrl);
|
||||
if (ctrl->state == NVME_CTRL_DEAD) {
|
||||
nvme_mark_namespaces_dead(ctrl);
|
||||
nvme_start_queues(ctrl);
|
||||
}
|
||||
|
||||
/* this is a no-op when called from the controller reset handler */
|
||||
nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
|
||||
|
@ -5108,39 +5110,17 @@ static void nvme_stop_ns_queue(struct nvme_ns *ns)
|
|||
blk_mq_wait_quiesce_done(ns->queue);
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare a queue for teardown.
|
||||
*
|
||||
* This must forcibly unquiesce queues to avoid blocking dispatch.
|
||||
*/
|
||||
static void nvme_set_queue_dying(struct nvme_ns *ns)
|
||||
{
|
||||
if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
|
||||
return;
|
||||
|
||||
blk_mark_disk_dead(ns->disk);
|
||||
nvme_start_ns_queue(ns);
|
||||
}
|
||||
|
||||
/**
|
||||
* nvme_kill_queues(): Ends all namespace queues
|
||||
* @ctrl: the dead controller that needs to end
|
||||
*
|
||||
* Call this function when the driver determines it is unable to get the
|
||||
* controller in a state capable of servicing IO.
|
||||
*/
|
||||
void nvme_kill_queues(struct nvme_ctrl *ctrl)
|
||||
/* let I/O to all namespaces fail in preparation for surprise removal */
|
||||
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||
nvme_set_queue_dying(ns);
|
||||
|
||||
blk_mark_disk_dead(ns->disk);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_kill_queues);
|
||||
EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
|
||||
|
||||
void nvme_unfreeze(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
|
|
|
@ -483,7 +483,6 @@ struct nvme_ns {
|
|||
unsigned long features;
|
||||
unsigned long flags;
|
||||
#define NVME_NS_REMOVING 0
|
||||
#define NVME_NS_DEAD 1
|
||||
#define NVME_NS_ANA_PENDING 2
|
||||
#define NVME_NS_FORCE_RO 3
|
||||
#define NVME_NS_READY 4
|
||||
|
@ -758,7 +757,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
|
|||
void nvme_start_queues(struct nvme_ctrl *ctrl);
|
||||
void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
|
||||
void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
|
||||
void nvme_kill_queues(struct nvme_ctrl *ctrl);
|
||||
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
|
||||
void nvme_sync_queues(struct nvme_ctrl *ctrl);
|
||||
void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
|
||||
void nvme_unfreeze(struct nvme_ctrl *ctrl);
|
||||
|
|
|
@ -2788,7 +2788,8 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
|
|||
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
|
||||
nvme_get_ctrl(&dev->ctrl);
|
||||
nvme_dev_disable(dev, false);
|
||||
nvme_kill_queues(&dev->ctrl);
|
||||
nvme_mark_namespaces_dead(&dev->ctrl);
|
||||
nvme_start_queues(&dev->ctrl);
|
||||
if (!queue_work(nvme_wq, &dev->remove_work))
|
||||
nvme_put_ctrl(&dev->ctrl);
|
||||
}
|
||||
|
@ -2913,7 +2914,8 @@ static void nvme_reset_work(struct work_struct *work)
|
|||
nvme_unfreeze(&dev->ctrl);
|
||||
} else {
|
||||
dev_warn(dev->ctrl.device, "IO queues lost\n");
|
||||
nvme_kill_queues(&dev->ctrl);
|
||||
nvme_mark_namespaces_dead(&dev->ctrl);
|
||||
nvme_start_queues(&dev->ctrl);
|
||||
nvme_remove_namespaces(&dev->ctrl);
|
||||
nvme_free_tagset(dev);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue