2025-03-01 00:19:14 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef KUBLK_INTERNAL_H
|
|
|
|
#define KUBLK_INTERNAL_H
|
|
|
|
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <getopt.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <poll.h>
|
2025-03-03 20:43:12 +08:00
|
|
|
#include <fcntl.h>
|
2025-03-01 00:19:14 +08:00
|
|
|
#include <sys/syscall.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/inotify.h>
|
|
|
|
#include <sys/wait.h>
|
|
|
|
#include <sys/eventfd.h>
|
2025-04-12 10:30:24 +08:00
|
|
|
#include <sys/ipc.h>
|
|
|
|
#include <sys/shm.h>
|
2025-04-12 10:30:18 +08:00
|
|
|
#include <linux/io_uring.h>
|
2025-03-01 00:19:14 +08:00
|
|
|
#include <liburing.h>
|
2025-04-12 10:30:24 +08:00
|
|
|
#include <semaphore.h>
|
|
|
|
|
|
|
|
/* allow ublk_dep.h to override ublk_cmd.h */
|
2025-03-20 09:37:33 +08:00
|
|
|
#include "ublk_dep.h"
|
2025-04-12 10:30:24 +08:00
|
|
|
#include <linux/ublk_cmd.h>
|
2025-03-01 00:19:14 +08:00
|
|
|
|
2025-07-13 22:34:12 +08:00
|
|
|
#include "utils.h"
|
2025-03-01 00:19:14 +08:00
|
|
|
|
2025-07-13 22:34:12 +08:00
|
|
|
#define MAX_BACK_FILES 4
|
2025-04-12 10:30:17 +08:00
|
|
|
|
2025-03-01 00:19:14 +08:00
|
|
|
/****************** part 1: libublk ********************/
|
|
|
|
|
|
|
|
#define CTRL_DEV "/dev/ublk-control"
|
|
|
|
#define UBLKC_DEV "/dev/ublkc"
|
|
|
|
#define UBLKB_DEV "/dev/ublkb"
|
|
|
|
#define UBLK_CTRL_RING_DEPTH 32
|
|
|
|
#define ERROR_EVTFD_DEVID -2
|
|
|
|
|
2025-03-22 17:32:11 +08:00
|
|
|
#define UBLK_IO_MAX_BYTES (1 << 20)
|
2025-05-29 17:47:11 -06:00
|
|
|
#define UBLK_MAX_QUEUES_SHIFT 5
|
|
|
|
#define UBLK_MAX_QUEUES (1 << UBLK_MAX_QUEUES_SHIFT)
|
2025-05-29 17:47:14 -06:00
|
|
|
#define UBLK_MAX_THREADS_SHIFT 5
|
|
|
|
#define UBLK_MAX_THREADS (1 << UBLK_MAX_THREADS_SHIFT)
|
2025-04-12 10:30:25 +08:00
|
|
|
#define UBLK_QUEUE_DEPTH 1024
|
2025-03-01 00:19:14 +08:00
|
|
|
|
|
|
|
struct ublk_dev;
|
|
|
|
struct ublk_queue;
|
2025-05-29 17:47:14 -06:00
|
|
|
struct ublk_thread;
|
2025-03-01 00:19:14 +08:00
|
|
|
|
2025-04-12 10:30:26 +08:00
|
|
|
struct stripe_ctx {
|
|
|
|
/* stripe */
|
|
|
|
unsigned int chunk_size;
|
|
|
|
};
|
|
|
|
|
2025-04-16 11:54:42 +08:00
|
|
|
struct fault_inject_ctx {
|
|
|
|
/* fault_inject */
|
|
|
|
unsigned long delay_us;
|
|
|
|
};
|
|
|
|
|
2025-03-01 00:19:14 +08:00
|
|
|
struct dev_ctx {
|
|
|
|
char tgt_type[16];
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned nr_hw_queues;
|
2025-05-29 17:47:15 -06:00
|
|
|
unsigned short nthreads;
|
2025-03-01 00:19:14 +08:00
|
|
|
unsigned queue_depth;
|
|
|
|
int dev_id;
|
|
|
|
int nr_files;
|
|
|
|
char *files[MAX_BACK_FILES];
|
|
|
|
unsigned int logging:1;
|
|
|
|
unsigned int all:1;
|
2025-03-03 20:43:13 +08:00
|
|
|
unsigned int fg:1;
|
2025-04-12 10:30:27 +08:00
|
|
|
unsigned int recovery:1;
|
2025-05-20 12:54:36 +08:00
|
|
|
unsigned int auto_zc_fallback:1;
|
2025-05-29 17:47:15 -06:00
|
|
|
unsigned int per_io_tasks:1;
|
2025-03-01 00:19:14 +08:00
|
|
|
|
|
|
|
int _evtfd;
|
2025-04-12 10:30:24 +08:00
|
|
|
int _shmid;
|
|
|
|
|
|
|
|
/* built from shmem, only for ublk_dump_dev() */
|
|
|
|
struct ublk_dev *shadow_dev;
|
2025-04-12 10:30:26 +08:00
|
|
|
|
2025-05-23 00:35:19 +08:00
|
|
|
/* for 'update_size' command */
|
|
|
|
unsigned long long size;
|
|
|
|
|
2025-04-12 10:30:26 +08:00
|
|
|
union {
|
2025-04-16 11:54:42 +08:00
|
|
|
struct stripe_ctx stripe;
|
|
|
|
struct fault_inject_ctx fault_inject;
|
2025-04-12 10:30:26 +08:00
|
|
|
};
|
2025-03-01 00:19:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ublk_ctrl_cmd_data {
|
|
|
|
__u32 cmd_op;
|
|
|
|
#define CTRL_CMD_HAS_DATA 1
|
|
|
|
#define CTRL_CMD_HAS_BUF 2
|
|
|
|
__u32 flags;
|
|
|
|
|
|
|
|
__u64 data[2];
|
|
|
|
__u64 addr;
|
|
|
|
__u32 len;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ublk_io {
|
|
|
|
char *buf_addr;
|
|
|
|
|
2025-07-13 22:34:10 +08:00
|
|
|
#define UBLKS_IO_NEED_FETCH_RQ (1UL << 0)
|
|
|
|
#define UBLKS_IO_NEED_COMMIT_RQ_COMP (1UL << 1)
|
|
|
|
#define UBLKS_IO_FREE (1UL << 2)
|
|
|
|
#define UBLKS_IO_NEED_GET_DATA (1UL << 3)
|
|
|
|
#define UBLKS_IO_NEED_REG_BUF (1UL << 4)
|
2025-03-01 00:19:14 +08:00
|
|
|
unsigned short flags;
|
|
|
|
unsigned short refs; /* used by target code only */
|
|
|
|
|
2025-05-29 17:47:12 -06:00
|
|
|
int tag;
|
|
|
|
|
2025-03-01 00:19:14 +08:00
|
|
|
int result;
|
2025-03-22 17:32:13 +08:00
|
|
|
|
2025-05-29 17:47:15 -06:00
|
|
|
unsigned short buf_index;
|
2025-03-22 17:32:13 +08:00
|
|
|
unsigned short tgt_ios;
|
|
|
|
void *private_data;
|
2025-03-01 00:19:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ublk_tgt_ops {
|
|
|
|
const char *name;
|
2025-03-22 17:32:13 +08:00
|
|
|
int (*init_tgt)(const struct dev_ctx *ctx, struct ublk_dev *);
|
2025-03-01 00:19:14 +08:00
|
|
|
void (*deinit_tgt)(struct ublk_dev *);
|
|
|
|
|
2025-07-13 22:34:07 +08:00
|
|
|
int (*queue_io)(struct ublk_thread *, struct ublk_queue *, int tag);
|
|
|
|
void (*tgt_io_done)(struct ublk_thread *, struct ublk_queue *,
|
|
|
|
const struct io_uring_cqe *);
|
2025-04-12 10:30:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Target specific command line handling
|
|
|
|
*
|
|
|
|
* each option requires argument for target command line
|
|
|
|
*/
|
|
|
|
void (*parse_cmd_line)(struct dev_ctx *ctx, int argc, char *argv[]);
|
|
|
|
void (*usage)(const struct ublk_tgt_ops *ops);
|
2025-05-20 12:54:36 +08:00
|
|
|
|
|
|
|
/* return buffer index for UBLK_F_AUTO_BUF_REG */
|
|
|
|
unsigned short (*buf_index)(const struct ublk_queue *, int tag);
|
2025-03-01 00:19:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ublk_tgt {
|
|
|
|
unsigned long dev_size;
|
|
|
|
unsigned int sq_depth;
|
|
|
|
unsigned int cq_depth;
|
|
|
|
const struct ublk_tgt_ops *ops;
|
|
|
|
struct ublk_params params;
|
2025-03-01 00:19:15 +08:00
|
|
|
|
|
|
|
int nr_backing_files;
|
|
|
|
unsigned long backing_file_size[MAX_BACK_FILES];
|
|
|
|
char backing_file[MAX_BACK_FILES][PATH_MAX];
|
2025-03-01 00:19:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ublk_queue {
|
|
|
|
int q_id;
|
|
|
|
int q_depth;
|
|
|
|
struct ublk_dev *dev;
|
|
|
|
const struct ublk_tgt_ops *tgt_ops;
|
2025-03-28 13:42:30 -06:00
|
|
|
struct ublksrv_io_desc *io_cmd_buf;
|
2025-05-29 17:47:14 -06:00
|
|
|
|
2025-07-13 22:34:09 +08:00
|
|
|
/* borrow one bit of ublk uapi flags, which may never be used */
|
2025-07-13 22:34:10 +08:00
|
|
|
#define UBLKS_Q_AUTO_BUF_REG_FALLBACK (1ULL << 63)
|
2025-07-13 22:34:09 +08:00
|
|
|
__u64 flags;
|
2025-03-01 00:19:14 +08:00
|
|
|
struct ublk_io ios[UBLK_QUEUE_DEPTH];
|
2025-05-29 17:47:14 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ublk_thread {
|
|
|
|
struct ublk_dev *dev;
|
|
|
|
struct io_uring ring;
|
|
|
|
unsigned int cmd_inflight;
|
|
|
|
unsigned int io_inflight;
|
|
|
|
|
2025-03-01 00:19:14 +08:00
|
|
|
pthread_t thread;
|
2025-05-29 17:47:14 -06:00
|
|
|
unsigned idx;
|
|
|
|
|
2025-07-13 22:34:10 +08:00
|
|
|
#define UBLKS_T_STOPPING (1U << 0)
|
|
|
|
#define UBLKS_T_IDLE (1U << 1)
|
2025-05-29 17:47:14 -06:00
|
|
|
unsigned state;
|
2025-03-01 00:19:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ublk_dev {
|
|
|
|
struct ublk_tgt tgt;
|
|
|
|
struct ublksrv_ctrl_dev_info dev_info;
|
|
|
|
struct ublk_queue q[UBLK_MAX_QUEUES];
|
2025-05-29 17:47:14 -06:00
|
|
|
struct ublk_thread threads[UBLK_MAX_THREADS];
|
2025-05-29 17:47:15 -06:00
|
|
|
unsigned nthreads;
|
|
|
|
unsigned per_io_tasks;
|
2025-03-01 00:19:14 +08:00
|
|
|
|
2025-03-01 00:19:15 +08:00
|
|
|
int fds[MAX_BACK_FILES + 1]; /* fds[0] points to /dev/ublkcN */
|
2025-03-01 00:19:14 +08:00
|
|
|
int nr_fds;
|
|
|
|
int ctrl_fd;
|
|
|
|
struct io_uring ring;
|
2025-03-22 17:32:13 +08:00
|
|
|
|
|
|
|
void *private_data;
|
2025-03-01 00:19:14 +08:00
|
|
|
};
|
|
|
|
|
2025-07-13 22:34:08 +08:00
|
|
|
extern int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io);
|
2025-03-01 00:19:14 +08:00
|
|
|
|
2025-05-20 12:54:36 +08:00
|
|
|
|
|
|
|
static inline int ublk_io_auto_zc_fallback(const struct ublksrv_io_desc *iod)
|
|
|
|
{
|
|
|
|
return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF);
|
|
|
|
}
|
|
|
|
|
2025-03-01 00:19:14 +08:00
|
|
|
static inline int is_target_io(__u64 user_data)
|
|
|
|
{
|
|
|
|
return (user_data & (1ULL << 63)) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline __u64 build_user_data(unsigned tag, unsigned op,
|
2025-05-29 17:47:11 -06:00
|
|
|
unsigned tgt_data, unsigned q_id, unsigned is_target_io)
|
2025-03-01 00:19:14 +08:00
|
|
|
{
|
2025-05-29 17:47:11 -06:00
|
|
|
/* we only have 7 bits to encode q_id */
|
|
|
|
_Static_assert(UBLK_MAX_QUEUES_SHIFT <= 7);
|
|
|
|
assert(!(tag >> 16) && !(op >> 8) && !(tgt_data >> 16) && !(q_id >> 7));
|
2025-03-01 00:19:14 +08:00
|
|
|
|
2025-05-29 17:47:11 -06:00
|
|
|
return tag | (op << 16) | (tgt_data << 24) |
|
|
|
|
(__u64)q_id << 56 | (__u64)is_target_io << 63;
|
2025-03-01 00:19:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int user_data_to_tag(__u64 user_data)
|
|
|
|
{
|
|
|
|
return user_data & 0xffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int user_data_to_op(__u64 user_data)
|
|
|
|
{
|
|
|
|
return (user_data >> 16) & 0xff;
|
|
|
|
}
|
|
|
|
|
2025-03-01 00:19:16 +08:00
|
|
|
static inline unsigned int user_data_to_tgt_data(__u64 user_data)
|
|
|
|
{
|
|
|
|
return (user_data >> 24) & 0xffff;
|
|
|
|
}
|
|
|
|
|
2025-05-29 17:47:11 -06:00
|
|
|
static inline unsigned int user_data_to_q_id(__u64 user_data)
|
|
|
|
{
|
|
|
|
return (user_data >> 56) & 0x7f;
|
|
|
|
}
|
|
|
|
|
2025-03-22 17:32:14 +08:00
|
|
|
static inline unsigned short ublk_cmd_op_nr(unsigned int op)
|
|
|
|
{
|
|
|
|
return _IOC_NR(op);
|
|
|
|
}
|
|
|
|
|
2025-05-29 17:47:12 -06:00
|
|
|
static inline struct ublk_queue *ublk_io_to_queue(const struct ublk_io *io)
|
|
|
|
{
|
|
|
|
return container_of(io, struct ublk_queue, ios[io->tag]);
|
|
|
|
}
|
|
|
|
|
2025-07-13 22:34:07 +08:00
|
|
|
static inline int ublk_io_alloc_sqes(struct ublk_thread *t,
|
2025-03-22 17:32:10 +08:00
|
|
|
struct io_uring_sqe *sqes[], int nr_sqes)
|
2025-03-01 00:19:14 +08:00
|
|
|
{
|
2025-07-13 22:34:07 +08:00
|
|
|
struct io_uring *ring = &t->ring;
|
2025-05-29 17:47:12 -06:00
|
|
|
unsigned left = io_uring_sq_space_left(ring);
|
2025-03-22 17:32:10 +08:00
|
|
|
int i;
|
2025-03-01 00:19:14 +08:00
|
|
|
|
2025-03-22 17:32:10 +08:00
|
|
|
if (left < nr_sqes)
|
2025-05-29 17:47:12 -06:00
|
|
|
io_uring_submit(ring);
|
2025-03-01 00:19:16 +08:00
|
|
|
|
2025-03-22 17:32:10 +08:00
|
|
|
for (i = 0; i < nr_sqes; i++) {
|
2025-05-29 17:47:12 -06:00
|
|
|
sqes[i] = io_uring_get_sqe(ring);
|
2025-03-22 17:32:10 +08:00
|
|
|
if (!sqes[i])
|
|
|
|
return i;
|
|
|
|
}
|
2025-03-01 00:19:16 +08:00
|
|
|
|
2025-03-22 17:32:10 +08:00
|
|
|
return nr_sqes;
|
2025-03-01 00:19:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void io_uring_prep_buf_register(struct io_uring_sqe *sqe,
|
|
|
|
int dev_fd, int tag, int q_id, __u64 index)
|
|
|
|
{
|
|
|
|
struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
|
|
|
|
|
|
|
|
io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
|
|
|
|
sqe->opcode = IORING_OP_URING_CMD;
|
|
|
|
sqe->flags |= IOSQE_FIXED_FILE;
|
|
|
|
sqe->cmd_op = UBLK_U_IO_REGISTER_IO_BUF;
|
|
|
|
|
|
|
|
cmd->tag = tag;
|
|
|
|
cmd->addr = index;
|
|
|
|
cmd->q_id = q_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void io_uring_prep_buf_unregister(struct io_uring_sqe *sqe,
|
|
|
|
int dev_fd, int tag, int q_id, __u64 index)
|
|
|
|
{
|
|
|
|
struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
|
|
|
|
|
|
|
|
io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
|
|
|
|
sqe->opcode = IORING_OP_URING_CMD;
|
|
|
|
sqe->flags |= IOSQE_FIXED_FILE;
|
|
|
|
sqe->cmd_op = UBLK_U_IO_UNREGISTER_IO_BUF;
|
|
|
|
|
|
|
|
cmd->tag = tag;
|
|
|
|
cmd->addr = index;
|
|
|
|
cmd->q_id = q_id;
|
|
|
|
}
|
|
|
|
|
2025-03-01 00:19:14 +08:00
|
|
|
static inline void *ublk_get_sqe_cmd(const struct io_uring_sqe *sqe)
|
|
|
|
{
|
|
|
|
return (void *)&sqe->cmd;
|
|
|
|
}
|
|
|
|
|
2025-03-01 00:19:16 +08:00
|
|
|
static inline void ublk_set_io_res(struct ublk_queue *q, int tag, int res)
|
|
|
|
{
|
|
|
|
q->ios[tag].result = res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ublk_get_io_res(const struct ublk_queue *q, unsigned tag)
|
|
|
|
{
|
|
|
|
return q->ios[tag].result;
|
|
|
|
}
|
|
|
|
|
2025-03-01 00:19:14 +08:00
|
|
|
static inline void ublk_mark_io_done(struct ublk_io *io, int res)
|
|
|
|
{
|
2025-07-13 22:34:10 +08:00
|
|
|
io->flags |= (UBLKS_IO_NEED_COMMIT_RQ_COMP | UBLKS_IO_FREE);
|
2025-03-01 00:19:14 +08:00
|
|
|
io->result = res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const struct ublksrv_io_desc *ublk_get_iod(const struct ublk_queue *q, int tag)
|
|
|
|
{
|
2025-03-28 13:42:30 -06:00
|
|
|
return &q->io_cmd_buf[tag];
|
2025-03-01 00:19:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ublk_set_sqe_cmd_op(struct io_uring_sqe *sqe, __u32 cmd_op)
|
|
|
|
{
|
|
|
|
__u32 *addr = (__u32 *)&sqe->off;
|
|
|
|
|
|
|
|
addr[0] = cmd_op;
|
|
|
|
addr[1] = 0;
|
|
|
|
}
|
|
|
|
|
2025-03-22 17:32:13 +08:00
|
|
|
static inline struct ublk_io *ublk_get_io(struct ublk_queue *q, unsigned tag)
|
|
|
|
{
|
|
|
|
return &q->ios[tag];
|
|
|
|
}
|
|
|
|
|
2025-07-13 22:34:08 +08:00
|
|
|
static inline int ublk_complete_io(struct ublk_thread *t, struct ublk_queue *q,
|
|
|
|
unsigned tag, int res)
|
2025-03-01 00:19:14 +08:00
|
|
|
{
|
|
|
|
struct ublk_io *io = &q->ios[tag];
|
|
|
|
|
|
|
|
ublk_mark_io_done(io, res);
|
|
|
|
|
2025-07-13 22:34:08 +08:00
|
|
|
return ublk_queue_io_cmd(t, io);
|
2025-03-01 00:19:14 +08:00
|
|
|
}
|
|
|
|
|
2025-07-13 22:34:08 +08:00
|
|
|
static inline void ublk_queued_tgt_io(struct ublk_thread *t, struct ublk_queue *q,
|
|
|
|
unsigned tag, int queued)
|
2025-03-22 17:32:13 +08:00
|
|
|
{
|
|
|
|
if (queued < 0)
|
2025-07-13 22:34:08 +08:00
|
|
|
ublk_complete_io(t, q, tag, queued);
|
2025-03-22 17:32:13 +08:00
|
|
|
else {
|
|
|
|
struct ublk_io *io = ublk_get_io(q, tag);
|
|
|
|
|
2025-07-13 22:34:08 +08:00
|
|
|
t->io_inflight += queued;
|
2025-03-22 17:32:13 +08:00
|
|
|
io->tgt_ios = queued;
|
|
|
|
io->result = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-07-13 22:34:08 +08:00
|
|
|
static inline int ublk_completed_tgt_io(struct ublk_thread *t,
|
|
|
|
struct ublk_queue *q, unsigned tag)
|
2025-03-22 17:32:13 +08:00
|
|
|
{
|
|
|
|
struct ublk_io *io = ublk_get_io(q, tag);
|
|
|
|
|
2025-07-13 22:34:08 +08:00
|
|
|
t->io_inflight--;
|
2025-03-22 17:32:13 +08:00
|
|
|
|
|
|
|
return --io->tgt_ios == 0;
|
|
|
|
}
|
|
|
|
|
2025-03-01 00:19:16 +08:00
|
|
|
static inline int ublk_queue_use_zc(const struct ublk_queue *q)
|
|
|
|
{
|
2025-07-13 22:34:09 +08:00
|
|
|
return q->flags & UBLK_F_SUPPORT_ZERO_COPY;
|
2025-03-01 00:19:16 +08:00
|
|
|
}
|
|
|
|
|
2025-05-20 12:54:35 +08:00
|
|
|
static inline int ublk_queue_use_auto_zc(const struct ublk_queue *q)
|
|
|
|
{
|
2025-07-13 22:34:09 +08:00
|
|
|
return q->flags & UBLK_F_AUTO_BUF_REG;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ublk_queue_auto_zc_fallback(const struct ublk_queue *q)
|
|
|
|
{
|
2025-07-13 22:34:10 +08:00
|
|
|
return q->flags & UBLKS_Q_AUTO_BUF_REG_FALLBACK;
|
2025-07-13 22:34:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ublk_queue_no_buf(const struct ublk_queue *q)
|
|
|
|
{
|
|
|
|
return ublk_queue_use_zc(q) || ublk_queue_use_auto_zc(q);
|
2025-05-20 12:54:35 +08:00
|
|
|
}
|
|
|
|
|
2025-03-01 00:19:14 +08:00
|
|
|
extern const struct ublk_tgt_ops null_tgt_ops;
|
2025-03-01 00:19:15 +08:00
|
|
|
extern const struct ublk_tgt_ops loop_tgt_ops;
|
2025-03-22 17:32:16 +08:00
|
|
|
extern const struct ublk_tgt_ops stripe_tgt_ops;
|
2025-04-16 11:54:42 +08:00
|
|
|
extern const struct ublk_tgt_ops fault_inject_tgt_ops;
|
2025-03-01 00:19:14 +08:00
|
|
|
|
2025-03-22 17:32:12 +08:00
|
|
|
void backing_file_tgt_deinit(struct ublk_dev *dev);
|
|
|
|
int backing_file_tgt_init(struct ublk_dev *dev);
|
2025-03-22 17:32:16 +08:00
|
|
|
|
2025-03-01 00:19:14 +08:00
|
|
|
#endif
|