2025-01-20 02:28:59 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0
|
|
|
|
*
|
|
|
|
* FUSE: Filesystem in Userspace
|
|
|
|
* Copyright (c) 2023-2024 DataDirect Networks.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _FS_FUSE_DEV_URING_I_H
|
|
|
|
#define _FS_FUSE_DEV_URING_I_H
|
|
|
|
|
|
|
|
#include "fuse_i.h"
|
|
|
|
|
|
|
|
#ifdef CONFIG_FUSE_IO_URING
|
|
|
|
|
2025-01-20 02:29:04 +01:00
|
|
|
#define FUSE_URING_TEARDOWN_TIMEOUT (5 * HZ)
|
|
|
|
#define FUSE_URING_TEARDOWN_INTERVAL (HZ/20)
|
|
|
|
|
2025-01-20 02:28:59 +01:00
|
|
|
enum fuse_ring_req_state {
|
|
|
|
FRRS_INVALID = 0,
|
|
|
|
|
|
|
|
/* The ring entry received from userspace and it is being processed */
|
|
|
|
FRRS_COMMIT,
|
|
|
|
|
|
|
|
/* The ring entry is waiting for new fuse requests */
|
|
|
|
FRRS_AVAILABLE,
|
|
|
|
|
2025-01-20 02:29:03 +01:00
|
|
|
/* The ring entry got assigned a fuse req */
|
|
|
|
FRRS_FUSE_REQ,
|
|
|
|
|
2025-01-20 02:28:59 +01:00
|
|
|
/* The ring entry is in or on the way to user space */
|
|
|
|
FRRS_USERSPACE,
|
2025-01-20 02:29:08 +01:00
|
|
|
|
|
|
|
/* The ring entry is in teardown */
|
|
|
|
FRRS_TEARDOWN,
|
|
|
|
|
|
|
|
/* The ring entry is released, but not freed yet */
|
|
|
|
FRRS_RELEASED,
|
2025-01-20 02:28:59 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/** A fuse ring entry, part of the ring queue */
|
|
|
|
struct fuse_ring_ent {
|
|
|
|
/* userspace buffer */
|
|
|
|
struct fuse_uring_req_header __user *headers;
|
|
|
|
void __user *payload;
|
|
|
|
|
|
|
|
/* the ring queue that owns the request */
|
|
|
|
struct fuse_ring_queue *queue;
|
|
|
|
|
|
|
|
/* fields below are protected by queue->lock */
|
|
|
|
|
|
|
|
struct io_uring_cmd *cmd;
|
|
|
|
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
enum fuse_ring_req_state state;
|
|
|
|
|
|
|
|
struct fuse_req *fuse_req;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct fuse_ring_queue {
|
|
|
|
/*
|
|
|
|
* back pointer to the main fuse uring structure that holds this
|
|
|
|
* queue
|
|
|
|
*/
|
|
|
|
struct fuse_ring *ring;
|
|
|
|
|
|
|
|
/* queue id, corresponds to the cpu core */
|
|
|
|
unsigned int qid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* queue lock, taken when any value in the queue changes _and_ also
|
|
|
|
* a ring entry state changes.
|
|
|
|
*/
|
|
|
|
spinlock_t lock;
|
|
|
|
|
|
|
|
/* available ring entries (struct fuse_ring_ent) */
|
|
|
|
struct list_head ent_avail_queue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* entries in the process of being committed or in the process
|
|
|
|
* to be sent to userspace
|
|
|
|
*/
|
2025-01-20 02:29:03 +01:00
|
|
|
struct list_head ent_w_req_queue;
|
2025-01-20 02:28:59 +01:00
|
|
|
struct list_head ent_commit_queue;
|
2025-01-20 02:29:03 +01:00
|
|
|
|
|
|
|
/* entries in userspace */
|
|
|
|
struct list_head ent_in_userspace;
|
|
|
|
|
2025-01-20 02:29:08 +01:00
|
|
|
/* entries that are released */
|
|
|
|
struct list_head ent_released;
|
|
|
|
|
2025-01-20 02:29:03 +01:00
|
|
|
/* fuse requests waiting for an entry slot */
|
|
|
|
struct list_head fuse_req_queue;
|
|
|
|
|
2025-01-20 02:29:07 +01:00
|
|
|
/* background fuse requests */
|
|
|
|
struct list_head fuse_req_bg_queue;
|
|
|
|
|
2025-01-20 02:29:03 +01:00
|
|
|
struct fuse_pqueue fpq;
|
2025-01-20 02:29:04 +01:00
|
|
|
|
2025-01-20 02:29:07 +01:00
|
|
|
unsigned int active_background;
|
|
|
|
|
2025-01-20 02:29:04 +01:00
|
|
|
bool stopped;
|
2025-01-20 02:28:59 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Describes if uring is for communication and holds alls the data needed
|
|
|
|
* for uring communication
|
|
|
|
*/
|
|
|
|
struct fuse_ring {
|
|
|
|
/* back pointer */
|
|
|
|
struct fuse_conn *fc;
|
|
|
|
|
|
|
|
/* number of ring queues */
|
|
|
|
size_t nr_queues;
|
|
|
|
|
|
|
|
/* maximum payload/arg size */
|
|
|
|
size_t max_payload_sz;
|
|
|
|
|
|
|
|
struct fuse_ring_queue **queues;
|
2025-01-20 02:29:04 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Log ring entry states on stop when entries cannot be released
|
|
|
|
*/
|
|
|
|
unsigned int stop_debug_log : 1;
|
|
|
|
|
|
|
|
wait_queue_head_t stop_waitq;
|
|
|
|
|
|
|
|
/* async tear down */
|
|
|
|
struct delayed_work async_teardown_work;
|
|
|
|
|
|
|
|
/* log */
|
|
|
|
unsigned long teardown_time;
|
|
|
|
|
|
|
|
atomic_t queue_refs;
|
2025-01-20 02:29:06 +01:00
|
|
|
|
|
|
|
bool ready;
|
2025-01-20 02:28:59 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
bool fuse_uring_enabled(void);
|
|
|
|
void fuse_uring_destruct(struct fuse_conn *fc);
|
2025-01-20 02:29:04 +01:00
|
|
|
void fuse_uring_stop_queues(struct fuse_ring *ring);
|
|
|
|
void fuse_uring_abort_end_requests(struct fuse_ring *ring);
|
2025-01-20 02:28:59 +01:00
|
|
|
int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
|
2025-01-20 02:29:06 +01:00
|
|
|
void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
|
2025-01-20 02:29:07 +01:00
|
|
|
bool fuse_uring_queue_bq_req(struct fuse_req *req);
|
2025-03-25 18:29:31 +01:00
|
|
|
bool fuse_uring_remove_pending_req(struct fuse_req *req);
|
2025-01-22 13:55:27 -08:00
|
|
|
bool fuse_uring_request_expired(struct fuse_conn *fc);
|
2025-01-20 02:28:59 +01:00
|
|
|
|
2025-01-20 02:29:04 +01:00
|
|
|
static inline void fuse_uring_abort(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
struct fuse_ring *ring = fc->ring;
|
|
|
|
|
|
|
|
if (ring == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (atomic_read(&ring->queue_refs) > 0) {
|
|
|
|
fuse_uring_abort_end_requests(ring);
|
|
|
|
fuse_uring_stop_queues(ring);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
struct fuse_ring *ring = fc->ring;
|
|
|
|
|
|
|
|
if (ring)
|
|
|
|
wait_event(ring->stop_waitq,
|
|
|
|
atomic_read(&ring->queue_refs) == 0);
|
|
|
|
}
|
|
|
|
|
2025-01-20 02:29:06 +01:00
|
|
|
static inline bool fuse_uring_ready(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
return fc->ring && fc->ring->ready;
|
|
|
|
}
|
|
|
|
|
2025-01-20 02:28:59 +01:00
|
|
|
#else /* CONFIG_FUSE_IO_URING */
|
|
|
|
|
|
|
|
static inline void fuse_uring_destruct(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool fuse_uring_enabled(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2025-01-20 02:29:04 +01:00
|
|
|
static inline void fuse_uring_abort(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
}
|
2025-01-20 02:29:07 +01:00
|
|
|
|
|
|
|
static inline bool fuse_uring_ready(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2025-03-25 18:29:31 +01:00
|
|
|
static inline bool fuse_uring_remove_pending_req(struct fuse_req *req)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2025-01-22 13:55:27 -08:00
|
|
|
static inline bool fuse_uring_request_expired(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2025-01-20 02:28:59 +01:00
|
|
|
#endif /* CONFIG_FUSE_IO_URING */
|
|
|
|
|
|
|
|
#endif /* _FS_FUSE_DEV_URING_I_H */
|