mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
Merge branch 'io_uring-6.14' into for-6.15/io_uring
Merge mainline fixes into 6.15 branch, as upcoming patches depend on fixes that went into the 6.14 mainline branch. * io_uring-6.14: io_uring/net: save msg_control for compat io_uring/rw: clean up mshot forced sync mode io_uring/rw: move ki_complete init into prep io_uring/rw: don't directly use ki_complete io_uring/rw: forbid multishot async reads io_uring/rsrc: remove unused constants io_uring: fix spelling error in uapi io_uring.h io_uring: prevent opcode speculation io-wq: backoff when retrying worker creation
This commit is contained in:
commit
c0d8c0362b
6 changed files with 45 additions and 22 deletions
|
@ -380,7 +380,7 @@ enum io_uring_op {
|
|||
* result will be the number of buffers send, with
|
||||
* the starting buffer ID in cqe->flags as per
|
||||
* usual for provided buffer usage. The buffers
|
||||
* will be contigious from the starting buffer ID.
|
||||
* will be contiguous from the starting buffer ID.
|
||||
*/
|
||||
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
|
||||
#define IORING_RECV_MULTISHOT (1U << 1)
|
||||
|
|
|
@ -63,7 +63,7 @@ struct io_worker {
|
|||
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
struct work_struct work;
|
||||
struct delayed_work work;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -784,6 +784,18 @@ static inline bool io_should_retry_thread(struct io_worker *worker, long err)
|
|||
}
|
||||
}
|
||||
|
||||
static void queue_create_worker_retry(struct io_worker *worker)
|
||||
{
|
||||
/*
|
||||
* We only bother retrying because there's a chance that the
|
||||
* failure to create a worker is due to some temporary condition
|
||||
* in the forking task (e.g. outstanding signal); give the task
|
||||
* some time to clear that condition.
|
||||
*/
|
||||
schedule_delayed_work(&worker->work,
|
||||
msecs_to_jiffies(worker->init_retries * 5));
|
||||
}
|
||||
|
||||
static void create_worker_cont(struct callback_head *cb)
|
||||
{
|
||||
struct io_worker *worker;
|
||||
|
@ -823,12 +835,13 @@ static void create_worker_cont(struct callback_head *cb)
|
|||
|
||||
/* re-create attempts grab a new worker ref, drop the existing one */
|
||||
io_worker_release(worker);
|
||||
schedule_work(&worker->work);
|
||||
queue_create_worker_retry(worker);
|
||||
}
|
||||
|
||||
static void io_workqueue_create(struct work_struct *work)
|
||||
{
|
||||
struct io_worker *worker = container_of(work, struct io_worker, work);
|
||||
struct io_worker *worker = container_of(work, struct io_worker,
|
||||
work.work);
|
||||
struct io_wq_acct *acct = io_wq_get_acct(worker);
|
||||
|
||||
if (!io_queue_worker_create(worker, acct, create_worker_cont))
|
||||
|
@ -866,8 +879,8 @@ fail:
|
|||
kfree(worker);
|
||||
goto fail;
|
||||
} else {
|
||||
INIT_WORK(&worker->work, io_workqueue_create);
|
||||
schedule_work(&worker->work);
|
||||
INIT_DELAYED_WORK(&worker->work, io_workqueue_create);
|
||||
queue_create_worker_retry(worker);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -2071,6 +2071,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
|||
req->opcode = 0;
|
||||
return io_init_fail_req(req, -EINVAL);
|
||||
}
|
||||
opcode = array_index_nospec(opcode, IORING_OP_LAST);
|
||||
|
||||
def = &io_issue_defs[opcode];
|
||||
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
|
||||
/* enforce forwards compatibility on users */
|
||||
|
|
|
@ -323,7 +323,9 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
|
|||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
|
||||
ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
|
||||
sr->msg_control = iomsg->msg.msg_control_user;
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -4,12 +4,6 @@
|
|||
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#define IO_NODE_ALLOC_CACHE_MAX 32
|
||||
|
||||
#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
|
||||
#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
|
||||
#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
|
||||
|
||||
enum {
|
||||
IORING_RSRC_FILE = 0,
|
||||
IORING_RSRC_BUFFER = 1,
|
||||
|
|
|
@ -23,6 +23,9 @@
|
|||
#include "poll.h"
|
||||
#include "rw.h"
|
||||
|
||||
static void io_complete_rw(struct kiocb *kiocb, long res);
|
||||
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res);
|
||||
|
||||
struct io_rw {
|
||||
/* NOTE: kiocb has the file as the first member, so don't do it here */
|
||||
struct kiocb kiocb;
|
||||
|
@ -273,6 +276,11 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
|||
rw->kiocb.dio_complete = NULL;
|
||||
rw->kiocb.ki_flags = 0;
|
||||
|
||||
if (req->ctx->flags & IORING_SETUP_IOPOLL)
|
||||
rw->kiocb.ki_complete = io_complete_rw_iopoll;
|
||||
else
|
||||
rw->kiocb.ki_complete = io_complete_rw;
|
||||
|
||||
rw->addr = READ_ONCE(sqe->addr);
|
||||
rw->len = READ_ONCE(sqe->len);
|
||||
rw->flags = READ_ONCE(sqe->rw_flags);
|
||||
|
@ -544,8 +552,10 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
|
|||
smp_store_release(&req->iopoll_completed, 1);
|
||||
}
|
||||
|
||||
static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
|
||||
static inline void io_rw_done(struct io_kiocb *req, ssize_t ret)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
|
||||
/* IO was queued async, completion will happen later */
|
||||
if (ret == -EIOCBQUEUED)
|
||||
return;
|
||||
|
@ -567,8 +577,10 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
|
|||
}
|
||||
}
|
||||
|
||||
INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
|
||||
io_complete_rw, kiocb, ret);
|
||||
if (req->ctx->flags & IORING_SETUP_IOPOLL)
|
||||
io_complete_rw_iopoll(&rw->kiocb, ret);
|
||||
else
|
||||
io_complete_rw(&rw->kiocb, ret);
|
||||
}
|
||||
|
||||
static int kiocb_done(struct io_kiocb *req, ssize_t ret,
|
||||
|
@ -579,7 +591,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
|
|||
|
||||
if (ret >= 0 && req->flags & REQ_F_CUR_POS)
|
||||
req->file->f_pos = rw->kiocb.ki_pos;
|
||||
if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
|
||||
if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
|
||||
__io_complete_rw_common(req, ret);
|
||||
/*
|
||||
* Safe to call io_end from here as we're inline
|
||||
|
@ -590,7 +602,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
|
|||
io_req_rw_cleanup(req, issue_flags);
|
||||
return IOU_OK;
|
||||
} else {
|
||||
io_rw_done(&rw->kiocb, ret);
|
||||
io_rw_done(req, ret);
|
||||
}
|
||||
|
||||
return IOU_ISSUE_SKIP_COMPLETE;
|
||||
|
@ -794,10 +806,8 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
|
|||
if (ctx->flags & IORING_SETUP_IOPOLL) {
|
||||
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
kiocb->private = NULL;
|
||||
kiocb->ki_flags |= IOCB_HIPRI;
|
||||
kiocb->ki_complete = io_complete_rw_iopoll;
|
||||
req->iopoll_completed = 0;
|
||||
if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
|
||||
/* make sure every req only blocks once*/
|
||||
|
@ -807,7 +817,6 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
|
|||
} else {
|
||||
if (kiocb->ki_flags & IOCB_HIPRI)
|
||||
return -EINVAL;
|
||||
kiocb->ki_complete = io_complete_rw;
|
||||
}
|
||||
|
||||
if (req->flags & REQ_F_HAS_METADATA) {
|
||||
|
@ -885,7 +894,8 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
|
|||
} else if (ret == -EIOCBQUEUED) {
|
||||
return IOU_ISSUE_SKIP_COMPLETE;
|
||||
} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
|
||||
(req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
|
||||
(req->flags & REQ_F_NOWAIT) || !need_complete_io(req) ||
|
||||
(issue_flags & IO_URING_F_MULTISHOT)) {
|
||||
/* read all, failed, already did sync or don't want to retry */
|
||||
goto done;
|
||||
}
|
||||
|
@ -958,6 +968,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
|
|||
if (!io_file_can_poll(req))
|
||||
return -EBADFD;
|
||||
|
||||
/* make it sync, multishot doesn't support async execution */
|
||||
rw->kiocb.ki_complete = NULL;
|
||||
ret = __io_read(req, issue_flags);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Reference in a new issue