block: force noio scope in blk_mq_freeze_queue

When block drivers or the core block code perform allocations with a
frozen queue, this could try to recurse into the block device to
reclaim memory and deadlock.  Thus all allocations done by a process
that froze a queue need to be done without __GFP_IO and __GFP_FS.
Instead of tying to track all of them down, force a noio scope as
part of freezing the queue.

Note that nvme is a bit of a mess here due to the non-owner freezes,
and they will be addressed separately.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20250131120352.1315351-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2025-01-31 13:03:47 +01:00 committed by Jens Axboe
parent 14ef49657f
commit 1e1a9cecfa
26 changed files with 136 additions and 84 deletions

View file

@ -1545,6 +1545,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
struct blkg_policy_data *pd_prealloc = NULL; struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg, *pinned_blkg = NULL; struct blkcg_gq *blkg, *pinned_blkg = NULL;
unsigned int memflags;
int ret; int ret;
if (blkcg_policy_enabled(q, pol)) if (blkcg_policy_enabled(q, pol))
@ -1559,7 +1560,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
return -EINVAL; return -EINVAL;
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
retry: retry:
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
@ -1623,7 +1624,7 @@ retry:
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
out: out:
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
if (pinned_blkg) if (pinned_blkg)
blkg_put(pinned_blkg); blkg_put(pinned_blkg);
if (pd_prealloc) if (pd_prealloc)
@ -1667,12 +1668,13 @@ void blkcg_deactivate_policy(struct gendisk *disk,
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
unsigned int memflags;
if (!blkcg_policy_enabled(q, pol)) if (!blkcg_policy_enabled(q, pol))
return; return;
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
mutex_lock(&q->blkcg_mutex); mutex_lock(&q->blkcg_mutex);
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
@ -1696,7 +1698,7 @@ void blkcg_deactivate_policy(struct gendisk *disk,
mutex_unlock(&q->blkcg_mutex); mutex_unlock(&q->blkcg_mutex);
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
} }
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

View file

@ -3224,6 +3224,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
u32 qos[NR_QOS_PARAMS]; u32 qos[NR_QOS_PARAMS];
bool enable, user; bool enable, user;
char *body, *p; char *body, *p;
unsigned int memflags;
int ret; int ret;
blkg_conf_init(&ctx, input); blkg_conf_init(&ctx, input);
@ -3247,7 +3248,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
ioc = q_to_ioc(disk->queue); ioc = q_to_ioc(disk->queue);
} }
blk_mq_freeze_queue(disk->queue); memflags = blk_mq_freeze_queue(disk->queue);
blk_mq_quiesce_queue(disk->queue); blk_mq_quiesce_queue(disk->queue);
spin_lock_irq(&ioc->lock); spin_lock_irq(&ioc->lock);
@ -3347,7 +3348,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
wbt_enable_default(disk); wbt_enable_default(disk);
blk_mq_unquiesce_queue(disk->queue); blk_mq_unquiesce_queue(disk->queue);
blk_mq_unfreeze_queue(disk->queue); blk_mq_unfreeze_queue(disk->queue, memflags);
blkg_conf_exit(&ctx); blkg_conf_exit(&ctx);
return nbytes; return nbytes;
@ -3355,7 +3356,7 @@ einval:
spin_unlock_irq(&ioc->lock); spin_unlock_irq(&ioc->lock);
blk_mq_unquiesce_queue(disk->queue); blk_mq_unquiesce_queue(disk->queue);
blk_mq_unfreeze_queue(disk->queue); blk_mq_unfreeze_queue(disk->queue, memflags);
ret = -EINVAL; ret = -EINVAL;
err: err:
@ -3414,6 +3415,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
{ {
struct blkg_conf_ctx ctx; struct blkg_conf_ctx ctx;
struct request_queue *q; struct request_queue *q;
unsigned int memflags;
struct ioc *ioc; struct ioc *ioc;
u64 u[NR_I_LCOEFS]; u64 u[NR_I_LCOEFS];
bool user; bool user;
@ -3441,7 +3443,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
ioc = q_to_ioc(q); ioc = q_to_ioc(q);
} }
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);
spin_lock_irq(&ioc->lock); spin_lock_irq(&ioc->lock);
@ -3493,7 +3495,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
spin_unlock_irq(&ioc->lock); spin_unlock_irq(&ioc->lock);
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
blkg_conf_exit(&ctx); blkg_conf_exit(&ctx);
return nbytes; return nbytes;
@ -3502,7 +3504,7 @@ einval:
spin_unlock_irq(&ioc->lock); spin_unlock_irq(&ioc->lock);
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
ret = -EINVAL; ret = -EINVAL;
err: err:

View file

@ -749,9 +749,11 @@ static void blkiolatency_enable_work_fn(struct work_struct *work)
*/ */
enabled = atomic_read(&blkiolat->enable_cnt); enabled = atomic_read(&blkiolat->enable_cnt);
if (enabled != blkiolat->enabled) { if (enabled != blkiolat->enabled) {
blk_mq_freeze_queue(blkiolat->rqos.disk->queue); unsigned int memflags;
memflags = blk_mq_freeze_queue(blkiolat->rqos.disk->queue);
blkiolat->enabled = enabled; blkiolat->enabled = enabled;
blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue); blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue, memflags);
} }
} }

View file

@ -210,12 +210,12 @@ int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
} }
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
void blk_mq_freeze_queue(struct request_queue *q) void blk_mq_freeze_queue_nomemsave(struct request_queue *q)
{ {
blk_freeze_queue_start(q); blk_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q); blk_mq_freeze_queue_wait(q);
} }
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_nomemsave);
bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
{ {
@ -236,12 +236,12 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
return unfreeze; return unfreeze;
} }
void blk_mq_unfreeze_queue(struct request_queue *q) void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q)
{ {
if (__blk_mq_unfreeze_queue(q, false)) if (__blk_mq_unfreeze_queue(q, false))
blk_unfreeze_release_lock(q); blk_unfreeze_release_lock(q);
} }
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue_nomemrestore);
/* /*
* non_owner variant of blk_freeze_queue_start * non_owner variant of blk_freeze_queue_start
@ -4223,13 +4223,14 @@ static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
bool shared) bool shared)
{ {
struct request_queue *q; struct request_queue *q;
unsigned int memflags;
lockdep_assert_held(&set->tag_list_lock); lockdep_assert_held(&set->tag_list_lock);
list_for_each_entry(q, &set->tag_list, tag_set_list) { list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
queue_set_hctx_shared(q, shared); queue_set_hctx_shared(q, shared);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
} }
} }
@ -4992,6 +4993,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
struct request_queue *q; struct request_queue *q;
LIST_HEAD(head); LIST_HEAD(head);
int prev_nr_hw_queues = set->nr_hw_queues; int prev_nr_hw_queues = set->nr_hw_queues;
unsigned int memflags;
int i; int i;
lockdep_assert_held(&set->tag_list_lock); lockdep_assert_held(&set->tag_list_lock);
@ -5003,8 +5005,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
return; return;
memflags = memalloc_noio_save();
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue(q); blk_mq_freeze_queue_nomemsave(q);
/* /*
* Switch IO scheduler to 'none', cleaning up the data associated * Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done * with the previous scheduler. We will switch back once we are done
@ -5052,7 +5056,8 @@ switch_back:
blk_mq_elv_switch_back(&head, q); blk_mq_elv_switch_back(&head, q);
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue_nomemrestore(q);
memalloc_noio_restore(memflags);
/* Free the excess tags when nr_hw_queues shrink. */ /* Free the excess tags when nr_hw_queues shrink. */
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++) for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)

View file

@ -89,7 +89,7 @@ int blk_pre_runtime_suspend(struct request_queue *q)
if (percpu_ref_is_zero(&q->q_usage_counter)) if (percpu_ref_is_zero(&q->q_usage_counter))
ret = 0; ret = 0;
/* Switch q_usage_counter back to per-cpu mode. */ /* Switch q_usage_counter back to per-cpu mode. */
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue_nomemrestore(q);
if (ret < 0) { if (ret < 0) {
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);

View file

@ -299,6 +299,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
const struct rq_qos_ops *ops) const struct rq_qos_ops *ops)
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
unsigned int memflags;
lockdep_assert_held(&q->rq_qos_mutex); lockdep_assert_held(&q->rq_qos_mutex);
@ -310,14 +311,14 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
* No IO can be in-flight when adding rqos, so freeze queue, which * No IO can be in-flight when adding rqos, so freeze queue, which
* is fine since we only support rq_qos for blk-mq queue. * is fine since we only support rq_qos for blk-mq queue.
*/ */
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
if (rq_qos_id(q, rqos->id)) if (rq_qos_id(q, rqos->id))
goto ebusy; goto ebusy;
rqos->next = q->rq_qos; rqos->next = q->rq_qos;
q->rq_qos = rqos; q->rq_qos = rqos;
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
if (rqos->ops->debugfs_attrs) { if (rqos->ops->debugfs_attrs) {
mutex_lock(&q->debugfs_mutex); mutex_lock(&q->debugfs_mutex);
@ -327,7 +328,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
return 0; return 0;
ebusy: ebusy:
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
return -EBUSY; return -EBUSY;
} }
@ -335,17 +336,18 @@ void rq_qos_del(struct rq_qos *rqos)
{ {
struct request_queue *q = rqos->disk->queue; struct request_queue *q = rqos->disk->queue;
struct rq_qos **cur; struct rq_qos **cur;
unsigned int memflags;
lockdep_assert_held(&q->rq_qos_mutex); lockdep_assert_held(&q->rq_qos_mutex);
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) { for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
if (*cur == rqos) { if (*cur == rqos) {
*cur = rqos->next; *cur = rqos->next;
break; break;
} }
} }
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
mutex_lock(&q->debugfs_mutex); mutex_lock(&q->debugfs_mutex);
blk_mq_debugfs_unregister_rqos(rqos); blk_mq_debugfs_unregister_rqos(rqos);

View file

@ -461,11 +461,12 @@ EXPORT_SYMBOL_GPL(queue_limits_commit_update);
int queue_limits_commit_update_frozen(struct request_queue *q, int queue_limits_commit_update_frozen(struct request_queue *q,
struct queue_limits *lim) struct queue_limits *lim)
{ {
unsigned int memflags;
int ret; int ret;
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
ret = queue_limits_commit_update(q, lim); ret = queue_limits_commit_update(q, lim);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
return ret; return ret;
} }

View file

@ -681,7 +681,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
struct queue_sysfs_entry *entry = to_queue(attr); struct queue_sysfs_entry *entry = to_queue(attr);
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
unsigned int noio_flag; unsigned int memflags;
ssize_t res; ssize_t res;
if (!entry->store_limit && !entry->store) if (!entry->store_limit && !entry->store)
@ -711,11 +711,9 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
} }
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
noio_flag = memalloc_noio_save();
res = entry->store(disk, page, length); res = entry->store(disk, page, length);
memalloc_noio_restore(noio_flag); blk_mq_unfreeze_queue(q, memflags);
blk_mq_unfreeze_queue(q);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return res; return res;
} }

View file

@ -1202,6 +1202,7 @@ static int blk_throtl_init(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
struct throtl_data *td; struct throtl_data *td;
unsigned int memflags;
int ret; int ret;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
@ -1215,7 +1216,7 @@ static int blk_throtl_init(struct gendisk *disk)
* Freeze queue before activating policy, to synchronize with IO path, * Freeze queue before activating policy, to synchronize with IO path,
* which is protected by 'q_usage_counter'. * which is protected by 'q_usage_counter'.
*/ */
blk_mq_freeze_queue(disk->queue); memflags = blk_mq_freeze_queue(disk->queue);
blk_mq_quiesce_queue(disk->queue); blk_mq_quiesce_queue(disk->queue);
q->td = td; q->td = td;
@ -1239,7 +1240,7 @@ static int blk_throtl_init(struct gendisk *disk)
out: out:
blk_mq_unquiesce_queue(disk->queue); blk_mq_unquiesce_queue(disk->queue);
blk_mq_unfreeze_queue(disk->queue); blk_mq_unfreeze_queue(disk->queue, memflags);
return ret; return ret;
} }

View file

@ -1717,9 +1717,10 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
else else
pr_warn("%s: failed to revalidate zones\n", disk->disk_name); pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
if (ret) { if (ret) {
blk_mq_freeze_queue(q); unsigned int memflags = blk_mq_freeze_queue(q);
disk_free_zone_resources(disk); disk_free_zone_resources(disk);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
} }
return ret; return ret;

View file

@ -570,6 +570,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q)
void elevator_init_mq(struct request_queue *q) void elevator_init_mq(struct request_queue *q)
{ {
struct elevator_type *e; struct elevator_type *e;
unsigned int memflags;
int err; int err;
WARN_ON_ONCE(blk_queue_registered(q)); WARN_ON_ONCE(blk_queue_registered(q));
@ -590,13 +591,13 @@ void elevator_init_mq(struct request_queue *q)
* *
* Disk isn't added yet, so verifying queue lock only manually. * Disk isn't added yet, so verifying queue lock only manually.
*/ */
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
blk_mq_cancel_work_sync(q); blk_mq_cancel_work_sync(q);
err = blk_mq_init_sched(q, e); err = blk_mq_init_sched(q, e);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
if (err) { if (err) {
pr_warn("\"%s\" elevator initialization failed, " pr_warn("\"%s\" elevator initialization failed, "
@ -614,11 +615,12 @@ void elevator_init_mq(struct request_queue *q)
*/ */
int elevator_switch(struct request_queue *q, struct elevator_type *new_e) int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{ {
unsigned int memflags;
int ret; int ret;
lockdep_assert_held(&q->sysfs_lock); lockdep_assert_held(&q->sysfs_lock);
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);
if (q->elevator) { if (q->elevator) {
@ -639,7 +641,7 @@ int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
out_unfreeze: out_unfreeze:
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
if (ret) { if (ret) {
pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n", pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
@ -651,9 +653,11 @@ out_unfreeze:
void elevator_disable(struct request_queue *q) void elevator_disable(struct request_queue *q)
{ {
unsigned int memflags;
lockdep_assert_held(&q->sysfs_lock); lockdep_assert_held(&q->sysfs_lock);
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);
elv_unregister_queue(q); elv_unregister_queue(q);
@ -664,7 +668,7 @@ void elevator_disable(struct request_queue *q)
blk_add_trace_msg(q, "elv switch: none"); blk_add_trace_msg(q, "elv switch: none");
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
} }
/* /*

View file

@ -226,10 +226,11 @@ aoedev_downdev(struct aoedev *d)
/* fast fail all pending I/O */ /* fast fail all pending I/O */
if (d->blkq) { if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */ /* UP is cleared, freeze+quiesce to insure all are errored */
blk_mq_freeze_queue(d->blkq); unsigned int memflags = blk_mq_freeze_queue(d->blkq);
blk_mq_quiesce_queue(d->blkq); blk_mq_quiesce_queue(d->blkq);
blk_mq_unquiesce_queue(d->blkq); blk_mq_unquiesce_queue(d->blkq);
blk_mq_unfreeze_queue(d->blkq); blk_mq_unfreeze_queue(d->blkq, memflags);
} }
if (d->gd) if (d->gd)

View file

@ -746,6 +746,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
unsigned char *p; unsigned char *p;
int sect, nsect; int sect, nsect;
unsigned long flags; unsigned long flags;
unsigned int memflags;
int ret; int ret;
if (type) { if (type) {
@ -758,7 +759,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
} }
q = unit[drive].disk[type]->queue; q = unit[drive].disk[type]->queue;
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);
local_irq_save(flags); local_irq_save(flags);
@ -817,7 +818,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
ret = FormatError ? -EIO : 0; ret = FormatError ? -EIO : 0;
out: out:
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
return ret; return ret;
} }

View file

@ -586,6 +586,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
{ {
struct file *file = fget(arg); struct file *file = fget(arg);
struct file *old_file; struct file *old_file;
unsigned int memflags;
int error; int error;
bool partscan; bool partscan;
bool is_loop; bool is_loop;
@ -623,14 +624,14 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
/* and ... switch */ /* and ... switch */
disk_force_media_change(lo->lo_disk); disk_force_media_change(lo->lo_disk);
blk_mq_freeze_queue(lo->lo_queue); memflags = blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file; lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping); lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
mapping_set_gfp_mask(file->f_mapping, mapping_set_gfp_mask(file->f_mapping,
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
loop_update_dio(lo); loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue, memflags);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
loop_global_unlock(lo, is_loop); loop_global_unlock(lo, is_loop);
@ -1255,6 +1256,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err; int err;
bool partscan = false; bool partscan = false;
bool size_changed = false; bool size_changed = false;
unsigned int memflags;
err = mutex_lock_killable(&lo->lo_mutex); err = mutex_lock_killable(&lo->lo_mutex);
if (err) if (err)
@ -1272,7 +1274,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
} }
/* I/O needs to be drained before changing lo_offset or lo_sizelimit */ /* I/O needs to be drained before changing lo_offset or lo_sizelimit */
blk_mq_freeze_queue(lo->lo_queue); memflags = blk_mq_freeze_queue(lo->lo_queue);
err = loop_set_status_from_info(lo, info); err = loop_set_status_from_info(lo, info);
if (err) if (err)
@ -1294,7 +1296,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_update_dio(lo); loop_update_dio(lo);
out_unfreeze: out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue, memflags);
if (partscan) if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
out_unlock: out_unlock:
@ -1446,6 +1448,7 @@ static int loop_set_capacity(struct loop_device *lo)
static int loop_set_dio(struct loop_device *lo, unsigned long arg) static int loop_set_dio(struct loop_device *lo, unsigned long arg)
{ {
bool use_dio = !!arg; bool use_dio = !!arg;
unsigned int memflags;
if (lo->lo_state != Lo_bound) if (lo->lo_state != Lo_bound)
return -ENXIO; return -ENXIO;
@ -1459,18 +1462,19 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
vfs_fsync(lo->lo_backing_file, 0); vfs_fsync(lo->lo_backing_file, 0);
} }
blk_mq_freeze_queue(lo->lo_queue); memflags = blk_mq_freeze_queue(lo->lo_queue);
if (use_dio) if (use_dio)
lo->lo_flags |= LO_FLAGS_DIRECT_IO; lo->lo_flags |= LO_FLAGS_DIRECT_IO;
else else
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue, memflags);
return 0; return 0;
} }
static int loop_set_block_size(struct loop_device *lo, unsigned long arg) static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{ {
struct queue_limits lim; struct queue_limits lim;
unsigned int memflags;
int err = 0; int err = 0;
if (lo->lo_state != Lo_bound) if (lo->lo_state != Lo_bound)
@ -1485,10 +1489,10 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
lim = queue_limits_start_update(lo->lo_queue); lim = queue_limits_start_update(lo->lo_queue);
loop_update_limits(lo, &lim, arg); loop_update_limits(lo, &lim, arg);
blk_mq_freeze_queue(lo->lo_queue); memflags = blk_mq_freeze_queue(lo->lo_queue);
err = queue_limits_commit_update(lo->lo_queue, &lim); err = queue_limits_commit_update(lo->lo_queue, &lim);
loop_update_dio(lo); loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue, memflags);
return err; return err;
} }

View file

@ -1234,6 +1234,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
struct socket *sock; struct socket *sock;
struct nbd_sock **socks; struct nbd_sock **socks;
struct nbd_sock *nsock; struct nbd_sock *nsock;
unsigned int memflags;
int err; int err;
/* Arg will be cast to int, check it to avoid overflow */ /* Arg will be cast to int, check it to avoid overflow */
@ -1247,7 +1248,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
* We need to make sure we don't get any errant requests while we're * We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array. * reallocating the ->socks array.
*/ */
blk_mq_freeze_queue(nbd->disk->queue); memflags = blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup && if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags)) !test_bit(NBD_RT_BOUND, &config->runtime_flags))
@ -1288,12 +1289,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
INIT_WORK(&nsock->work, nbd_pending_cmd_work); INIT_WORK(&nsock->work, nbd_pending_cmd_work);
socks[config->num_connections++] = nsock; socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections); atomic_inc(&config->live_connections);
blk_mq_unfreeze_queue(nbd->disk->queue); blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
return 0; return 0;
put_socket: put_socket:
blk_mq_unfreeze_queue(nbd->disk->queue); blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
sockfd_put(sock); sockfd_put(sock);
return err; return err;
} }

View file

@ -7281,9 +7281,10 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
* Prevent new IO from being queued and wait for existing * Prevent new IO from being queued and wait for existing
* IO to complete/fail. * IO to complete/fail.
*/ */
blk_mq_freeze_queue(rbd_dev->disk->queue); unsigned int memflags = blk_mq_freeze_queue(rbd_dev->disk->queue);
blk_mark_disk_dead(rbd_dev->disk); blk_mark_disk_dead(rbd_dev->disk);
blk_mq_unfreeze_queue(rbd_dev->disk->queue); blk_mq_unfreeze_queue(rbd_dev->disk->queue, memflags);
} }
del_gendisk(rbd_dev->disk); del_gendisk(rbd_dev->disk);

View file

@ -1113,6 +1113,7 @@ static void vdc_requeue_inflight(struct vdc_port *port)
static void vdc_queue_drain(struct vdc_port *port) static void vdc_queue_drain(struct vdc_port *port)
{ {
struct request_queue *q = port->disk->queue; struct request_queue *q = port->disk->queue;
unsigned int memflags;
/* /*
* Mark the queue as draining, then freeze/quiesce to ensure * Mark the queue as draining, then freeze/quiesce to ensure
@ -1121,12 +1122,12 @@ static void vdc_queue_drain(struct vdc_port *port)
port->drain = 1; port->drain = 1;
spin_unlock_irq(&port->vio.lock); spin_unlock_irq(&port->vio.lock);
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);
spin_lock_irq(&port->vio.lock); spin_lock_irq(&port->vio.lock);
port->drain = 0; port->drain = 0;
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q, memflags);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
} }

View file

@ -840,6 +840,7 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
static void release_drive(struct floppy_state *fs) static void release_drive(struct floppy_state *fs)
{ {
struct request_queue *q = disks[fs->index]->queue; struct request_queue *q = disks[fs->index]->queue;
unsigned int memflags;
unsigned long flags; unsigned long flags;
swim3_dbg("%s", "-> release drive\n"); swim3_dbg("%s", "-> release drive\n");
@ -848,10 +849,10 @@ static void release_drive(struct floppy_state *fs)
fs->state = idle; fs->state = idle;
spin_unlock_irqrestore(&swim3_lock, flags); spin_unlock_irqrestore(&swim3_lock, flags);
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
} }
static int fd_eject(struct floppy_state *fs) static int fd_eject(struct floppy_state *fs)

View file

@ -1584,11 +1584,12 @@ static int virtblk_freeze(struct virtio_device *vdev)
{ {
struct virtio_blk *vblk = vdev->priv; struct virtio_blk *vblk = vdev->priv;
struct request_queue *q = vblk->disk->queue; struct request_queue *q = vblk->disk->queue;
unsigned int memflags;
/* Ensure no requests in virtqueues before deleting vqs. */ /* Ensure no requests in virtqueues before deleting vqs. */
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue_nowait(q); blk_mq_quiesce_queue_nowait(q);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
/* Ensure we don't receive any more interrupts */ /* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev); virtio_reset_device(vdev);

View file

@ -404,6 +404,7 @@ out_list_del:
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{ {
unsigned long flags; unsigned long flags;
unsigned int memflags;
lockdep_assert_held(&mtd_table_mutex); lockdep_assert_held(&mtd_table_mutex);
@ -420,10 +421,10 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
spin_unlock_irqrestore(&old->queue_lock, flags); spin_unlock_irqrestore(&old->queue_lock, flags);
/* freeze+quiesce queue to ensure all requests are flushed */ /* freeze+quiesce queue to ensure all requests are flushed */
blk_mq_freeze_queue(old->rq); memflags = blk_mq_freeze_queue(old->rq);
blk_mq_quiesce_queue(old->rq); blk_mq_quiesce_queue(old->rq);
blk_mq_unquiesce_queue(old->rq); blk_mq_unquiesce_queue(old->rq);
blk_mq_unfreeze_queue(old->rq); blk_mq_unfreeze_queue(old->rq, memflags);
/* If the device is currently open, tell trans driver to close it, /* If the device is currently open, tell trans driver to close it,
then put mtd device, and don't touch it again */ then put mtd device, and don't touch it again */

View file

@ -2132,15 +2132,16 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
struct nvme_ns_info *info) struct nvme_ns_info *info)
{ {
struct queue_limits lim; struct queue_limits lim;
unsigned int memflags;
int ret; int ret;
lim = queue_limits_start_update(ns->disk->queue); lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim); nvme_set_ctrl_limits(ns->ctrl, &lim);
blk_mq_freeze_queue(ns->disk->queue); memflags = blk_mq_freeze_queue(ns->disk->queue);
ret = queue_limits_commit_update(ns->disk->queue, &lim); ret = queue_limits_commit_update(ns->disk->queue, &lim);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
blk_mq_unfreeze_queue(ns->disk->queue); blk_mq_unfreeze_queue(ns->disk->queue, memflags);
/* Hide the block-interface for these devices */ /* Hide the block-interface for these devices */
if (!ret) if (!ret)
@ -2155,6 +2156,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_id_ns_nvm *nvm = NULL; struct nvme_id_ns_nvm *nvm = NULL;
struct nvme_zone_info zi = {}; struct nvme_zone_info zi = {};
struct nvme_id_ns *id; struct nvme_id_ns *id;
unsigned int memflags;
sector_t capacity; sector_t capacity;
unsigned lbaf; unsigned lbaf;
int ret; int ret;
@ -2186,7 +2188,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
lim = queue_limits_start_update(ns->disk->queue); lim = queue_limits_start_update(ns->disk->queue);
blk_mq_freeze_queue(ns->disk->queue); memflags = blk_mq_freeze_queue(ns->disk->queue);
ns->head->lba_shift = id->lbaf[lbaf].ds; ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse); ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
@ -2219,7 +2221,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ret = queue_limits_commit_update(ns->disk->queue, &lim); ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) { if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue); blk_mq_unfreeze_queue(ns->disk->queue, memflags);
goto out; goto out;
} }
@ -2235,7 +2237,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->head->features |= NVME_NS_DEAC; ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags); set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue); blk_mq_unfreeze_queue(ns->disk->queue, memflags);
if (blk_queue_is_zoned(ns->queue)) { if (blk_queue_is_zoned(ns->queue)) {
ret = blk_revalidate_disk_zones(ns->disk); ret = blk_revalidate_disk_zones(ns->disk);
@ -2291,9 +2293,10 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
if (!ret && nvme_ns_head_multipath(ns->head)) { if (!ret && nvme_ns_head_multipath(ns->head)) {
struct queue_limits *ns_lim = &ns->disk->queue->limits; struct queue_limits *ns_lim = &ns->disk->queue->limits;
struct queue_limits lim; struct queue_limits lim;
unsigned int memflags;
lim = queue_limits_start_update(ns->head->disk->queue); lim = queue_limits_start_update(ns->head->disk->queue);
blk_mq_freeze_queue(ns->head->disk->queue); memflags = blk_mq_freeze_queue(ns->head->disk->queue);
/* /*
* queue_limits mixes values that are the hardware limitations * queue_limits mixes values that are the hardware limitations
* for bio splitting with what is the device configuration. * for bio splitting with what is the device configuration.
@ -2325,7 +2328,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns); nvme_mpath_revalidate_paths(ns);
blk_mq_unfreeze_queue(ns->head->disk->queue); blk_mq_unfreeze_queue(ns->head->disk->queue, memflags);
} }
return ret; return ret;

View file

@ -60,7 +60,7 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
lockdep_assert_held(&subsys->lock); lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) list_for_each_entry(h, &subsys->nsheads, entry)
if (h->disk) if (h->disk)
blk_mq_unfreeze_queue(h->disk->queue); blk_mq_unfreeze_queue_nomemrestore(h->disk->queue);
} }
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)

View file

@ -2723,6 +2723,7 @@ int
scsi_device_quiesce(struct scsi_device *sdev) scsi_device_quiesce(struct scsi_device *sdev)
{ {
struct request_queue *q = sdev->request_queue; struct request_queue *q = sdev->request_queue;
unsigned int memflags;
int err; int err;
/* /*
@ -2737,7 +2738,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
blk_set_pm_only(q); blk_set_pm_only(q);
blk_mq_freeze_queue(q); memflags = blk_mq_freeze_queue(q);
/* /*
* Ensure that the effect of blk_set_pm_only() will be visible * Ensure that the effect of blk_set_pm_only() will be visible
* for percpu_ref_tryget() callers that occur after the queue * for percpu_ref_tryget() callers that occur after the queue
@ -2745,7 +2746,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
* was called. See also https://lwn.net/Articles/573497/. * was called. See also https://lwn.net/Articles/573497/.
*/ */
synchronize_rcu(); synchronize_rcu();
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q, memflags);
mutex_lock(&sdev->state_mutex); mutex_lock(&sdev->state_mutex);
err = scsi_device_set_state(sdev, SDEV_QUIESCE); err = scsi_device_set_state(sdev, SDEV_QUIESCE);

View file

@ -220,6 +220,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
int new_shift = sbitmap_calculate_shift(depth); int new_shift = sbitmap_calculate_shift(depth);
bool need_alloc = !sdev->budget_map.map; bool need_alloc = !sdev->budget_map.map;
bool need_free = false; bool need_free = false;
unsigned int memflags;
int ret; int ret;
struct sbitmap sb_backup; struct sbitmap sb_backup;
@ -240,7 +241,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
* and here disk isn't added yet, so freezing is pretty fast * and here disk isn't added yet, so freezing is pretty fast
*/ */
if (need_free) { if (need_free) {
blk_mq_freeze_queue(sdev->request_queue); memflags = blk_mq_freeze_queue(sdev->request_queue);
sb_backup = sdev->budget_map; sb_backup = sdev->budget_map;
} }
ret = sbitmap_init_node(&sdev->budget_map, ret = sbitmap_init_node(&sdev->budget_map,
@ -256,7 +257,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
else else
sbitmap_free(&sb_backup); sbitmap_free(&sb_backup);
ret = 0; ret = 0;
blk_mq_unfreeze_queue(sdev->request_queue); blk_mq_unfreeze_queue(sdev->request_queue, memflags);
} }
return ret; return ret;
} }

View file

@ -1439,6 +1439,7 @@ static ssize_t max_number_of_rtt_store(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev); struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_dev_info *dev_info = &hba->dev_info; struct ufs_dev_info *dev_info = &hba->dev_info;
struct scsi_device *sdev; struct scsi_device *sdev;
unsigned int memflags;
unsigned int rtt; unsigned int rtt;
int ret; int ret;
@ -1458,14 +1459,16 @@ static ssize_t max_number_of_rtt_store(struct device *dev,
ufshcd_rpm_get_sync(hba); ufshcd_rpm_get_sync(hba);
memflags = memalloc_noio_save();
shost_for_each_device(sdev, hba->host) shost_for_each_device(sdev, hba->host)
blk_mq_freeze_queue(sdev->request_queue); blk_mq_freeze_queue_nomemsave(sdev->request_queue);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt); QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
shost_for_each_device(sdev, hba->host) shost_for_each_device(sdev, hba->host)
blk_mq_unfreeze_queue(sdev->request_queue); blk_mq_unfreeze_queue_nomemrestore(sdev->request_queue);
memalloc_noio_restore(memflags);
ufshcd_rpm_put_sync(hba); ufshcd_rpm_put_sync(hba);

View file

@ -900,8 +900,22 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv); busy_tag_iter_fn *fn, void *priv);
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_freeze_queue_nomemsave(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q);
static inline unsigned int __must_check
blk_mq_freeze_queue(struct request_queue *q)
{
unsigned int memflags = memalloc_noio_save();
blk_mq_freeze_queue_nomemsave(q);
return memflags;
}
static inline void
blk_mq_unfreeze_queue(struct request_queue *q, unsigned int memflags)
{
blk_mq_unfreeze_queue_nomemrestore(q);
memalloc_noio_restore(memflags);
}
void blk_freeze_queue_start(struct request_queue *q); void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q); void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,