mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-11-01 09:13:37 +00:00
mptcp: don't always assume copied data in mptcp_cleanup_rbuf()
Under some corner cases the MPTCP protocol can end-up invoking
mptcp_cleanup_rbuf() when no data has been copied, but such helper
assumes the opposite condition.
Explicitly drop such assumption and performs the costly call only
when strictly needed - before releasing the msk socket lock.
Fixes: fd8976790a ("mptcp: be careful on MPTCP-level ack.")
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20241230-net-mptcp-rbuf-fixes-v1-2-8608af434ceb@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
449e6912a2
commit
551844f26d
1 changed files with 9 additions and 9 deletions
|
|
@ -528,13 +528,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
|
|||
mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
|
||||
}
|
||||
|
||||
static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
|
||||
static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
|
||||
{
|
||||
bool slow;
|
||||
|
||||
slow = lock_sock_fast(ssk);
|
||||
if (tcp_can_send_ack(ssk))
|
||||
tcp_cleanup_rbuf(ssk, 1);
|
||||
tcp_cleanup_rbuf(ssk, copied);
|
||||
unlock_sock_fast(ssk, slow);
|
||||
}
|
||||
|
||||
|
|
@ -551,7 +551,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
|
|||
(ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
|
||||
}
|
||||
|
||||
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
|
||||
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
|
||||
{
|
||||
int old_space = READ_ONCE(msk->old_wspace);
|
||||
struct mptcp_subflow_context *subflow;
|
||||
|
|
@ -559,14 +559,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
|
|||
int space = __mptcp_space(sk);
|
||||
bool cleanup, rx_empty;
|
||||
|
||||
cleanup = (space > 0) && (space >= (old_space << 1));
|
||||
rx_empty = !__mptcp_rmem(sk);
|
||||
cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
|
||||
rx_empty = !__mptcp_rmem(sk) && copied;
|
||||
|
||||
mptcp_for_each_subflow(msk, subflow) {
|
||||
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
|
||||
if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
|
||||
mptcp_subflow_cleanup_rbuf(ssk);
|
||||
mptcp_subflow_cleanup_rbuf(ssk, copied);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2220,9 +2220,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|||
|
||||
copied += bytes_read;
|
||||
|
||||
/* be sure to advertise window change */
|
||||
mptcp_cleanup_rbuf(msk);
|
||||
|
||||
if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
|
||||
continue;
|
||||
|
||||
|
|
@ -2271,6 +2268,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|||
}
|
||||
|
||||
pr_debug("block timeout %ld\n", timeo);
|
||||
mptcp_cleanup_rbuf(msk, copied);
|
||||
err = sk_wait_data(sk, &timeo, NULL);
|
||||
if (err < 0) {
|
||||
err = copied ? : err;
|
||||
|
|
@ -2278,6 +2276,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|||
}
|
||||
}
|
||||
|
||||
mptcp_cleanup_rbuf(msk, copied);
|
||||
|
||||
out_err:
|
||||
if (cmsg_flags && copied >= 0) {
|
||||
if (cmsg_flags & MPTCP_CMSG_TS)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue