mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
block-6.17-20250828
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmiwwhkQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpnooD/92QqpW1IOn0hs0WGOiKl7se5JgBuLV9bd0 j7M8EpGyFK63zeDgAO0/Qu2arEva9tlDaW9S/e78kt1PwQ7it82RSlPC47nKWEv+ Xn5npn2SOEkyY/5/a+iVkOsSD++RIM/DnHNBe9vmU6wePR7AthG5Nhl6kOEi5cyw e1eLbwB/0eJZzhHYCaowLwdtnMCcwQAEh/FFYz7tSyrgOsDpovKid6D7s2Sv9sGA +lOzwimgszr3RF5bOMudU12RtP35gAvyF56iDVMQylHXybuaYYzzVAmxRLD5vrOf vC4HSzRxwUCZnIW62TTP29dAB9mik3va069e1xV1le94vdEsgj2HmuV4tpCzs3LN 4a1q8oEC83QI+cixzsjxf00DaJSkk9msUNsqE+6rcEK0M6z0tRz3mbLXYiCo7V4Y eLD7eMsxXkJUSpTwWOIVYiXuM+OvSmFxIEoz4lGnQESKyV0dA86RT5TpOiUCrkW1 G9nTccxVPZG3i8FKJXmgZLPmpviw+wwzdpjlVShqSGA++/bBXKwAnjAQPSsaVDIS HaqhG1IngL9sCcnAy8ZBTEy2TYibasUL48vfCeRhP2u2RZa21zRSOPNFRV9cVvRE /wOWQikOqC9ys7zvbLG0OfLQAlejGn1k+k6oEOI6P9x2a0vbjq/UZxIQSkdYEhXG x73gWYk28g== =/1eG -----END PGP SIGNATURE----- Merge tag 'block-6.17-20250828' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: - Fix a lockdep spotted issue on recursive locking for zoned writes, in case of errors - Update bcache MAINTAINERS entry address for Coly - Fix for a ublk release issue, with selftests - Fix for a regression introduced in this cycle, where it assumed q->rq_qos was always set if the bio flag indicated that - Fix for a regression introduced in this cycle, where loop retrieving block device sizes got broken * tag 'block-6.17-20250828' of git://git.kernel.dk/linux: bcache: change maintainer's email address ublk selftests: add --no_ublk_fixed_fd for not using registered ublk char device ublk: avoid ublk_io_release() called after ublk char dev is closed block: validate QoS before calling __rq_qos_done_bio() blk-zoned: Fix a lockdep complaint about recursive locking loop: fix zero sized loop for block special file
This commit is contained in:
commit
d1cf752d58
11 changed files with 175 additions and 56 deletions
|
@ -4205,7 +4205,7 @@ W: http://www.baycom.org/~tom/ham/ham.html
|
|||
F: drivers/net/hamradio/baycom*
|
||||
|
||||
BCACHE (BLOCK LAYER CACHE)
|
||||
M: Coly Li <colyli@kernel.org>
|
||||
M: Coly Li <colyli@fnnas.com>
|
||||
M: Kent Overstreet <kent.overstreet@linux.dev>
|
||||
L: linux-bcache@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
|
@ -149,12 +149,15 @@ static inline void rq_qos_done_bio(struct bio *bio)
|
|||
q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
/*
|
||||
* If a bio has BIO_QOS_xxx set, it implicitly implies that
|
||||
* q->rq_qos is present. So, we skip re-checking q->rq_qos
|
||||
* here as an extra optimization and directly call
|
||||
* __rq_qos_done_bio().
|
||||
* A BIO may carry BIO_QOS_* flags even if the associated request_queue
|
||||
* does not have rq_qos enabled. This can happen with stacked block
|
||||
* devices — for example, NVMe multipath, where it's possible that the
|
||||
* bottom device has QoS enabled but the top device does not. Therefore,
|
||||
* always verify that q->rq_qos is present and QoS is enabled before
|
||||
* calling __rq_qos_done_bio().
|
||||
*/
|
||||
__rq_qos_done_bio(q->rq_qos, bio);
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
|
||||
__rq_qos_done_bio(q->rq_qos, bio);
|
||||
}
|
||||
|
||||
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
||||
|
|
|
@ -1286,14 +1286,14 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
|
|||
struct block_device *bdev;
|
||||
unsigned long flags;
|
||||
struct bio *bio;
|
||||
bool prepared;
|
||||
|
||||
/*
|
||||
* Submit the next plugged BIO. If we do not have any, clear
|
||||
* the plugged flag.
|
||||
*/
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
|
||||
again:
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
bio = bio_list_pop(&zwplug->bio_list);
|
||||
if (!bio) {
|
||||
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
|
||||
|
@ -1304,13 +1304,14 @@ again:
|
|||
trace_blk_zone_wplug_bio(zwplug->disk->queue, zwplug->zone_no,
|
||||
bio->bi_iter.bi_sector, bio_sectors(bio));
|
||||
|
||||
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
|
||||
prepared = blk_zone_wplug_prepare_bio(zwplug, bio);
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
if (!prepared) {
|
||||
blk_zone_wplug_bio_io_error(zwplug, bio);
|
||||
goto again;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
bdev = bio->bi_bdev;
|
||||
|
||||
/*
|
||||
|
|
|
@ -139,20 +139,26 @@ static int part_shift;
|
|||
|
||||
static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
|
||||
{
|
||||
struct kstat stat;
|
||||
loff_t loopsize;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Get the accurate file size. This provides better results than
|
||||
* cached inode data, particularly for network filesystems where
|
||||
* metadata may be stale.
|
||||
*/
|
||||
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
|
||||
if (ret)
|
||||
return 0;
|
||||
if (S_ISBLK(file_inode(file)->i_mode)) {
|
||||
loopsize = i_size_read(file->f_mapping->host);
|
||||
} else {
|
||||
struct kstat stat;
|
||||
|
||||
/*
|
||||
* Get the accurate file size. This provides better results than
|
||||
* cached inode data, particularly for network filesystems where
|
||||
* metadata may be stale.
|
||||
*/
|
||||
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
loopsize = stat.size;
|
||||
}
|
||||
|
||||
loopsize = stat.size;
|
||||
if (lo->lo_offset > 0)
|
||||
loopsize -= lo->lo_offset;
|
||||
/* offset is beyond i_size, weird but possible */
|
||||
|
|
|
@ -239,6 +239,7 @@ struct ublk_device {
|
|||
struct mutex cancel_mutex;
|
||||
bool canceling;
|
||||
pid_t ublksrv_tgid;
|
||||
struct delayed_work exit_work;
|
||||
};
|
||||
|
||||
/* header of ublk_params */
|
||||
|
@ -1595,12 +1596,62 @@ static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
|
|||
ublk_get_queue(ub, i)->canceling = canceling;
|
||||
}
|
||||
|
||||
static int ublk_ch_release(struct inode *inode, struct file *filp)
|
||||
static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
|
||||
{
|
||||
struct ublk_device *ub = filp->private_data;
|
||||
int i, j;
|
||||
|
||||
if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
|
||||
UBLK_F_AUTO_BUF_REG)))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
|
||||
struct ublk_queue *ubq = ublk_get_queue(ub, i);
|
||||
|
||||
for (j = 0; j < ubq->q_depth; j++) {
|
||||
struct ublk_io *io = &ubq->ios[j];
|
||||
unsigned int refs = refcount_read(&io->ref) +
|
||||
io->task_registered_buffers;
|
||||
|
||||
/*
|
||||
* UBLK_REFCOUNT_INIT or zero means no active
|
||||
* reference
|
||||
*/
|
||||
if (refs != UBLK_REFCOUNT_INIT && refs != 0)
|
||||
return true;
|
||||
|
||||
/* reset to zero if the io hasn't active references */
|
||||
refcount_set(&io->ref, 0);
|
||||
io->task_registered_buffers = 0;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ublk_ch_release_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct ublk_device *ub =
|
||||
container_of(work, struct ublk_device, exit_work.work);
|
||||
struct gendisk *disk;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* For zero-copy and auto buffer register modes, I/O references
|
||||
* might not be dropped naturally when the daemon is killed, but
|
||||
* io_uring guarantees that registered bvec kernel buffers are
|
||||
* unregistered finally when freeing io_uring context, then the
|
||||
* active references are dropped.
|
||||
*
|
||||
* Wait until active references are dropped for avoiding use-after-free
|
||||
*
|
||||
* registered buffer may be unregistered in io_ring's release hander,
|
||||
* so have to wait by scheduling work function for avoiding the two
|
||||
* file release dependency.
|
||||
*/
|
||||
if (ublk_check_and_reset_active_ref(ub)) {
|
||||
schedule_delayed_work(&ub->exit_work, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* disk isn't attached yet, either device isn't live, or it has
|
||||
* been removed already, so we needn't to do anything
|
||||
|
@ -1673,6 +1724,23 @@ unlock:
|
|||
ublk_reset_ch_dev(ub);
|
||||
out:
|
||||
clear_bit(UB_STATE_OPEN, &ub->state);
|
||||
|
||||
/* put the reference grabbed in ublk_ch_release() */
|
||||
ublk_put_device(ub);
|
||||
}
|
||||
|
||||
static int ublk_ch_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct ublk_device *ub = filp->private_data;
|
||||
|
||||
/*
|
||||
* Grab ublk device reference, so it won't be gone until we are
|
||||
* really released from work function.
|
||||
*/
|
||||
ublk_get_device(ub);
|
||||
|
||||
INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn);
|
||||
schedule_delayed_work(&ub->exit_work, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q,
|
|||
struct io_uring_sqe *sqe[1];
|
||||
|
||||
ublk_io_alloc_sqes(t, sqe, 1);
|
||||
io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
|
||||
io_uring_prep_fsync(sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/, IORING_FSYNC_DATASYNC);
|
||||
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
|
||||
/* bit63 marks us as tgt io */
|
||||
sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
|
||||
|
@ -42,7 +42,7 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
|
|||
if (!sqe[0])
|
||||
return -ENOMEM;
|
||||
|
||||
io_uring_prep_rw(op, sqe[0], 1 /*fds[1]*/,
|
||||
io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/,
|
||||
addr,
|
||||
iod->nr_sectors << 9,
|
||||
iod->start_sector << 9);
|
||||
|
@ -56,19 +56,19 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
|
|||
|
||||
ublk_io_alloc_sqes(t, sqe, 3);
|
||||
|
||||
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
|
||||
io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
|
||||
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
|
||||
sqe[0]->user_data = build_user_data(tag,
|
||||
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
|
||||
|
||||
io_uring_prep_rw(op, sqe[1], 1 /*fds[1]*/, 0,
|
||||
io_uring_prep_rw(op, sqe[1], ublk_get_registered_fd(q, 1) /*fds[1]*/, 0,
|
||||
iod->nr_sectors << 9,
|
||||
iod->start_sector << 9);
|
||||
sqe[1]->buf_index = tag;
|
||||
sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
|
||||
sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
|
||||
|
||||
io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
|
||||
io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
|
||||
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
|
||||
|
||||
return 2;
|
||||
|
|
|
@ -432,7 +432,7 @@ static void ublk_thread_deinit(struct ublk_thread *t)
|
|||
}
|
||||
}
|
||||
|
||||
static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
|
||||
static int ublk_queue_init(struct ublk_queue *q, unsigned long long extra_flags)
|
||||
{
|
||||
struct ublk_dev *dev = q->dev;
|
||||
int depth = dev->dev_info.queue_depth;
|
||||
|
@ -446,6 +446,9 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
|
|||
q->flags = dev->dev_info.flags;
|
||||
q->flags |= extra_flags;
|
||||
|
||||
/* Cache fd in queue for fast path access */
|
||||
q->ublk_fd = dev->fds[0];
|
||||
|
||||
cmd_buf_size = ublk_queue_cmd_buf_sz(q);
|
||||
off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz();
|
||||
q->io_cmd_buf = mmap(0, cmd_buf_size, PROT_READ,
|
||||
|
@ -481,9 +484,10 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int ublk_thread_init(struct ublk_thread *t)
|
||||
static int ublk_thread_init(struct ublk_thread *t, unsigned long long extra_flags)
|
||||
{
|
||||
struct ublk_dev *dev = t->dev;
|
||||
unsigned long long flags = dev->dev_info.flags | extra_flags;
|
||||
int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth;
|
||||
int ret;
|
||||
|
||||
|
@ -512,7 +516,17 @@ static int ublk_thread_init(struct ublk_thread *t)
|
|||
|
||||
io_uring_register_ring_fd(&t->ring);
|
||||
|
||||
ret = io_uring_register_files(&t->ring, dev->fds, dev->nr_fds);
|
||||
if (flags & UBLKS_Q_NO_UBLK_FIXED_FD) {
|
||||
/* Register only backing files starting from index 1, exclude ublk control device */
|
||||
if (dev->nr_fds > 1) {
|
||||
ret = io_uring_register_files(&t->ring, &dev->fds[1], dev->nr_fds - 1);
|
||||
} else {
|
||||
/* No backing files to register, skip file registration */
|
||||
ret = 0;
|
||||
}
|
||||
} else {
|
||||
ret = io_uring_register_files(&t->ring, dev->fds, dev->nr_fds);
|
||||
}
|
||||
if (ret) {
|
||||
ublk_err("ublk dev %d thread %d register files failed %d\n",
|
||||
t->dev->dev_info.dev_id, t->idx, ret);
|
||||
|
@ -626,9 +640,12 @@ int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io)
|
|||
|
||||
/* These fields should be written once, never change */
|
||||
ublk_set_sqe_cmd_op(sqe[0], cmd_op);
|
||||
sqe[0]->fd = 0; /* dev->fds[0] */
|
||||
sqe[0]->fd = ublk_get_registered_fd(q, 0); /* dev->fds[0] */
|
||||
sqe[0]->opcode = IORING_OP_URING_CMD;
|
||||
sqe[0]->flags = IOSQE_FIXED_FILE;
|
||||
if (q->flags & UBLKS_Q_NO_UBLK_FIXED_FD)
|
||||
sqe[0]->flags = 0; /* Use raw FD, not fixed file */
|
||||
else
|
||||
sqe[0]->flags = IOSQE_FIXED_FILE;
|
||||
sqe[0]->rw_flags = 0;
|
||||
cmd->tag = io->tag;
|
||||
cmd->q_id = q->q_id;
|
||||
|
@ -832,6 +849,7 @@ struct ublk_thread_info {
|
|||
unsigned idx;
|
||||
sem_t *ready;
|
||||
cpu_set_t *affinity;
|
||||
unsigned long long extra_flags;
|
||||
};
|
||||
|
||||
static void *ublk_io_handler_fn(void *data)
|
||||
|
@ -844,7 +862,7 @@ static void *ublk_io_handler_fn(void *data)
|
|||
t->dev = info->dev;
|
||||
t->idx = info->idx;
|
||||
|
||||
ret = ublk_thread_init(t);
|
||||
ret = ublk_thread_init(t, info->extra_flags);
|
||||
if (ret) {
|
||||
ublk_err("ublk dev %d thread %u init failed\n",
|
||||
dev_id, t->idx);
|
||||
|
@ -934,6 +952,8 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
|
|||
|
||||
if (ctx->auto_zc_fallback)
|
||||
extra_flags = UBLKS_Q_AUTO_BUF_REG_FALLBACK;
|
||||
if (ctx->no_ublk_fixed_fd)
|
||||
extra_flags |= UBLKS_Q_NO_UBLK_FIXED_FD;
|
||||
|
||||
for (i = 0; i < dinfo->nr_hw_queues; i++) {
|
||||
dev->q[i].dev = dev;
|
||||
|
@ -951,6 +971,7 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
|
|||
tinfo[i].dev = dev;
|
||||
tinfo[i].idx = i;
|
||||
tinfo[i].ready = &ready;
|
||||
tinfo[i].extra_flags = extra_flags;
|
||||
|
||||
/*
|
||||
* If threads are not tied 1:1 to queues, setting thread
|
||||
|
@ -1471,7 +1492,7 @@ static void __cmd_create_help(char *exe, bool recovery)
|
|||
printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n",
|
||||
exe, recovery ? "recover" : "add");
|
||||
printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1 ] [-g]\n");
|
||||
printf("\t[-e 0|1 ] [-i 0|1]\n");
|
||||
printf("\t[-e 0|1 ] [-i 0|1] [--no_ublk_fixed_fd]\n");
|
||||
printf("\t[--nthreads threads] [--per_io_tasks]\n");
|
||||
printf("\t[target options] [backfile1] [backfile2] ...\n");
|
||||
printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n");
|
||||
|
@ -1534,6 +1555,7 @@ int main(int argc, char *argv[])
|
|||
{ "size", 1, NULL, 's'},
|
||||
{ "nthreads", 1, NULL, 0 },
|
||||
{ "per_io_tasks", 0, NULL, 0 },
|
||||
{ "no_ublk_fixed_fd", 0, NULL, 0 },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
const struct ublk_tgt_ops *ops = NULL;
|
||||
|
@ -1613,6 +1635,8 @@ int main(int argc, char *argv[])
|
|||
ctx.nthreads = strtol(optarg, NULL, 10);
|
||||
if (!strcmp(longopts[option_idx].name, "per_io_tasks"))
|
||||
ctx.per_io_tasks = 1;
|
||||
if (!strcmp(longopts[option_idx].name, "no_ublk_fixed_fd"))
|
||||
ctx.no_ublk_fixed_fd = 1;
|
||||
break;
|
||||
case '?':
|
||||
/*
|
||||
|
|
|
@ -77,6 +77,7 @@ struct dev_ctx {
|
|||
unsigned int recovery:1;
|
||||
unsigned int auto_zc_fallback:1;
|
||||
unsigned int per_io_tasks:1;
|
||||
unsigned int no_ublk_fixed_fd:1;
|
||||
|
||||
int _evtfd;
|
||||
int _shmid;
|
||||
|
@ -166,7 +167,9 @@ struct ublk_queue {
|
|||
|
||||
/* borrow one bit of ublk uapi flags, which may never be used */
|
||||
#define UBLKS_Q_AUTO_BUF_REG_FALLBACK (1ULL << 63)
|
||||
#define UBLKS_Q_NO_UBLK_FIXED_FD (1ULL << 62)
|
||||
__u64 flags;
|
||||
int ublk_fd; /* cached ublk char device fd */
|
||||
struct ublk_io ios[UBLK_QUEUE_DEPTH];
|
||||
};
|
||||
|
||||
|
@ -273,34 +276,48 @@ static inline int ublk_io_alloc_sqes(struct ublk_thread *t,
|
|||
return nr_sqes;
|
||||
}
|
||||
|
||||
static inline void io_uring_prep_buf_register(struct io_uring_sqe *sqe,
|
||||
int dev_fd, int tag, int q_id, __u64 index)
|
||||
static inline int ublk_get_registered_fd(struct ublk_queue *q, int fd_index)
|
||||
{
|
||||
if (q->flags & UBLKS_Q_NO_UBLK_FIXED_FD) {
|
||||
if (fd_index == 0)
|
||||
/* Return the raw ublk FD for index 0 */
|
||||
return q->ublk_fd;
|
||||
/* Adjust index for backing files (index 1 becomes 0, etc.) */
|
||||
return fd_index - 1;
|
||||
}
|
||||
return fd_index;
|
||||
}
|
||||
|
||||
static inline void __io_uring_prep_buf_reg_unreg(struct io_uring_sqe *sqe,
|
||||
struct ublk_queue *q, int tag, int q_id, __u64 index)
|
||||
{
|
||||
struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
|
||||
int dev_fd = ublk_get_registered_fd(q, 0);
|
||||
|
||||
io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
|
||||
sqe->opcode = IORING_OP_URING_CMD;
|
||||
sqe->flags |= IOSQE_FIXED_FILE;
|
||||
sqe->cmd_op = UBLK_U_IO_REGISTER_IO_BUF;
|
||||
if (q->flags & UBLKS_Q_NO_UBLK_FIXED_FD)
|
||||
sqe->flags &= ~IOSQE_FIXED_FILE;
|
||||
else
|
||||
sqe->flags |= IOSQE_FIXED_FILE;
|
||||
|
||||
cmd->tag = tag;
|
||||
cmd->addr = index;
|
||||
cmd->q_id = q_id;
|
||||
}
|
||||
|
||||
static inline void io_uring_prep_buf_unregister(struct io_uring_sqe *sqe,
|
||||
int dev_fd, int tag, int q_id, __u64 index)
|
||||
static inline void io_uring_prep_buf_register(struct io_uring_sqe *sqe,
|
||||
struct ublk_queue *q, int tag, int q_id, __u64 index)
|
||||
{
|
||||
struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
|
||||
__io_uring_prep_buf_reg_unreg(sqe, q, tag, q_id, index);
|
||||
sqe->cmd_op = UBLK_U_IO_REGISTER_IO_BUF;
|
||||
}
|
||||
|
||||
io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
|
||||
sqe->opcode = IORING_OP_URING_CMD;
|
||||
sqe->flags |= IOSQE_FIXED_FILE;
|
||||
static inline void io_uring_prep_buf_unregister(struct io_uring_sqe *sqe,
|
||||
struct ublk_queue *q, int tag, int q_id, __u64 index)
|
||||
{
|
||||
__io_uring_prep_buf_reg_unreg(sqe, q, tag, q_id, index);
|
||||
sqe->cmd_op = UBLK_U_IO_UNREGISTER_IO_BUF;
|
||||
|
||||
cmd->tag = tag;
|
||||
cmd->addr = index;
|
||||
cmd->q_id = q_id;
|
||||
}
|
||||
|
||||
static inline void *ublk_get_sqe_cmd(const struct io_uring_sqe *sqe)
|
||||
|
|
|
@ -63,7 +63,7 @@ static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
|
|||
|
||||
ublk_io_alloc_sqes(t, sqe, 3);
|
||||
|
||||
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
|
||||
io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
|
||||
sqe[0]->user_data = build_user_data(tag,
|
||||
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
|
||||
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
|
||||
|
@ -71,7 +71,7 @@ static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
|
|||
__setup_nop_io(tag, iod, sqe[1], q->q_id);
|
||||
sqe[1]->flags |= IOSQE_IO_HARDLINK;
|
||||
|
||||
io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
|
||||
io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
|
||||
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
|
||||
|
||||
// buf register is marked as IOSQE_CQE_SKIP_SUCCESS
|
||||
|
|
|
@ -142,7 +142,7 @@ static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
|
|||
ublk_io_alloc_sqes(t, sqe, s->nr + extra);
|
||||
|
||||
if (zc) {
|
||||
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, io->buf_index);
|
||||
io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index);
|
||||
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
|
||||
sqe[0]->user_data = build_user_data(tag,
|
||||
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
|
||||
|
@ -168,7 +168,7 @@ static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
|
|||
if (zc) {
|
||||
struct io_uring_sqe *unreg = sqe[s->nr + 1];
|
||||
|
||||
io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, io->buf_index);
|
||||
io_uring_prep_buf_unregister(unreg, q, tag, q->q_id, io->buf_index);
|
||||
unreg->user_data = build_user_data(
|
||||
tag, ublk_cmd_op_nr(unreg->cmd_op), 0, q->q_id, 1);
|
||||
}
|
||||
|
|
|
@ -28,14 +28,14 @@ _create_backfile 0 256M
|
|||
_create_backfile 1 128M
|
||||
_create_backfile 2 128M
|
||||
|
||||
ublk_io_and_kill_daemon 8G -t null -q 4 -z &
|
||||
ublk_io_and_kill_daemon 256M -t loop -q 4 -z "${UBLK_BACKFILES[0]}" &
|
||||
ublk_io_and_kill_daemon 8G -t null -q 4 -z --no_ublk_fixed_fd &
|
||||
ublk_io_and_kill_daemon 256M -t loop -q 4 -z --no_ublk_fixed_fd "${UBLK_BACKFILES[0]}" &
|
||||
ublk_io_and_kill_daemon 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
|
||||
|
||||
if _have_feature "AUTO_BUF_REG"; then
|
||||
ublk_io_and_kill_daemon 8G -t null -q 4 --auto_zc &
|
||||
ublk_io_and_kill_daemon 256M -t loop -q 4 --auto_zc "${UBLK_BACKFILES[0]}" &
|
||||
ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
|
||||
ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc --no_ublk_fixed_fd "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
|
||||
ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback &
|
||||
fi
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue