mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
io_uring/net: fix io_req_post_cqe abuse by send bundle
[ 114.987980][ T5313] WARNING: CPU: 6 PID: 5313 at io_uring/io_uring.c:872 io_req_post_cqe+0x12e/0x4f0
[ 114.991597][ T5313] RIP: 0010:io_req_post_cqe+0x12e/0x4f0
[ 115.001880][ T5313] Call Trace:
[ 115.002222][ T5313] <TASK>
[ 115.007813][ T5313] io_send+0x4fe/0x10f0
[ 115.009317][ T5313] io_issue_sqe+0x1a6/0x1740
[ 115.012094][ T5313] io_wq_submit_work+0x38b/0xed0
[ 115.013223][ T5313] io_worker_handle_work+0x62a/0x1600
[ 115.013876][ T5313] io_wq_worker+0x34f/0xdf0
As the comment states, io_req_post_cqe() should only be used by
multishot requests, i.e. REQ_F_APOLL_MULTISHOT, which bundled sends are
not. Add a flag signifying whether a request wants to post multiple
CQEs. Eventually REQ_F_APOLL_MULTISHOT should imply the new flag, but
that's left out for simplicity.
Cc: stable@vger.kernel.org
Fixes: a05d1f625c
("io_uring/net: support bundles for send")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8b611dbb54d1cd47a88681f5d38c84d0c02bc563.1743067183.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
73b6dacb1c
commit
6889ae1b4d
3 changed files with 6 additions and 2 deletions
|
@ -490,6 +490,7 @@ enum {
|
||||||
REQ_F_SKIP_LINK_CQES_BIT,
|
REQ_F_SKIP_LINK_CQES_BIT,
|
||||||
REQ_F_SINGLE_POLL_BIT,
|
REQ_F_SINGLE_POLL_BIT,
|
||||||
REQ_F_DOUBLE_POLL_BIT,
|
REQ_F_DOUBLE_POLL_BIT,
|
||||||
|
REQ_F_MULTISHOT_BIT,
|
||||||
REQ_F_APOLL_MULTISHOT_BIT,
|
REQ_F_APOLL_MULTISHOT_BIT,
|
||||||
REQ_F_CLEAR_POLLIN_BIT,
|
REQ_F_CLEAR_POLLIN_BIT,
|
||||||
/* keep async read/write and isreg together and in order */
|
/* keep async read/write and isreg together and in order */
|
||||||
|
@ -567,6 +568,8 @@ enum {
|
||||||
REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
|
REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
|
||||||
/* double poll may active */
|
/* double poll may active */
|
||||||
REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
|
REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
|
||||||
|
/* request posts multiple completions, should be set at prep time */
|
||||||
|
REQ_F_MULTISHOT = IO_REQ_FLAG(REQ_F_MULTISHOT_BIT),
|
||||||
/* fast poll multishot mode */
|
/* fast poll multishot mode */
|
||||||
REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
|
REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
|
||||||
/* recvmsg special flag, clear EPOLLIN */
|
/* recvmsg special flag, clear EPOLLIN */
|
||||||
|
|
|
@ -1840,7 +1840,7 @@ fail:
|
||||||
* Don't allow any multishot execution from io-wq. It's more restrictive
|
* Don't allow any multishot execution from io-wq. It's more restrictive
|
||||||
* than necessary and also cleaner.
|
* than necessary and also cleaner.
|
||||||
*/
|
*/
|
||||||
if (req->flags & REQ_F_APOLL_MULTISHOT) {
|
if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) {
|
||||||
err = -EBADFD;
|
err = -EBADFD;
|
||||||
if (!io_file_can_poll(req))
|
if (!io_file_can_poll(req))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -1851,7 +1851,7 @@ fail:
|
||||||
goto fail;
|
goto fail;
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
req->flags &= ~REQ_F_APOLL_MULTISHOT;
|
req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -448,6 +448,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||||
sr->msg_flags |= MSG_WAITALL;
|
sr->msg_flags |= MSG_WAITALL;
|
||||||
sr->buf_group = req->buf_index;
|
sr->buf_group = req->buf_index;
|
||||||
req->buf_list = NULL;
|
req->buf_list = NULL;
|
||||||
|
req->flags |= REQ_F_MULTISHOT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (io_is_compat(req->ctx))
|
if (io_is_compat(req->ctx))
|
||||||
|
|
Loading…
Add table
Reference in a new issue