2019-06-04 10:11:15 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-07-28 15:36:32 +01:00
|
|
|
/*
|
|
|
|
* common code for virtio vsock
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013-2015 Red Hat, Inc.
|
|
|
|
* Author: Asias He <asias@redhat.com>
|
|
|
|
* Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
|
*/
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/module.h>
|
2017-02-02 19:15:33 +01:00
|
|
|
#include <linux/sched/signal.h>
|
2016-07-28 15:36:32 +01:00
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/virtio_vsock.h>
|
2017-04-21 10:10:46 +01:00
|
|
|
#include <uapi/linux/vsockmon.h>
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/af_vsock.h>
|
|
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/vsock_virtio_transport_common.h>
|
|
|
|
|
|
|
|
/* How long to wait for graceful shutdown of a connection */
|
|
|
|
#define VSOCK_CLOSE_TIMEOUT (8 * HZ)
|
|
|
|
|
2019-07-30 17:43:30 +02:00
|
|
|
/* Threshold for detecting small packets to copy */
|
|
|
|
#define GOOD_COPY_LEN 128
|
|
|
|
|
2025-01-10 09:35:09 +01:00
|
|
|
static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
|
|
|
|
bool cancel_timeout);
|
|
|
|
|
2019-11-14 10:57:41 +01:00
|
|
|
static const struct virtio_transport *
|
|
|
|
virtio_transport_get_ops(struct vsock_sock *vsk)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2019-11-14 10:57:41 +01:00
|
|
|
const struct vsock_transport *t = vsock_core_get_transport(vsk);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2019-12-13 19:48:01 +01:00
|
|
|
if (WARN_ON(!t))
|
|
|
|
return NULL;
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
return container_of(t, struct virtio_transport, transport);
|
|
|
|
}
|
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
static bool virtio_transport_can_zcopy(const struct virtio_transport *t_ops,
|
|
|
|
struct virtio_vsock_pkt_info *info,
|
|
|
|
size_t pkt_len)
|
|
|
|
{
|
|
|
|
struct iov_iter *iov_iter;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
if (!info->msg)
|
|
|
|
return false;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
iov_iter = &info->msg->msg_iter;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
if (iov_iter->iov_offset)
|
|
|
|
return false;
|
2021-06-11 14:13:06 +03:00
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
/* We can't send whole iov. */
|
|
|
|
if (iov_iter->count > pkt_len)
|
|
|
|
return false;
|
2021-09-03 15:32:48 +03:00
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
/* Check that transport can send data in zerocopy mode. */
|
|
|
|
t_ops = virtio_transport_get_ops(info->vsk);
|
|
|
|
|
|
|
|
if (t_ops->can_msgzerocopy) {
|
vsock/virtio: fix "comparison of distinct pointer types lacks a cast" warning
After backporting commit 581512a6dc93 ("vsock/virtio: MSG_ZEROCOPY
flag support") in CentOS Stream 9, CI reported the following error:
In file included from ./include/linux/kernel.h:17,
from ./include/linux/list.h:9,
from ./include/linux/preempt.h:11,
from ./include/linux/spinlock.h:56,
from net/vmw_vsock/virtio_transport_common.c:9:
net/vmw_vsock/virtio_transport_common.c: In function ‘virtio_transport_can_zcopy‘:
./include/linux/minmax.h:20:35: error: comparison of distinct pointer types lacks a cast [-Werror]
20 | (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
| ^~
./include/linux/minmax.h:26:18: note: in expansion of macro ‘__typecheck‘
26 | (__typecheck(x, y) && __no_side_effects(x, y))
| ^~~~~~~~~~~
./include/linux/minmax.h:36:31: note: in expansion of macro ‘__safe_cmp‘
36 | __builtin_choose_expr(__safe_cmp(x, y), \
| ^~~~~~~~~~
./include/linux/minmax.h:45:25: note: in expansion of macro ‘__careful_cmp‘
45 | #define min(x, y) __careful_cmp(x, y, <)
| ^~~~~~~~~~~~~
net/vmw_vsock/virtio_transport_common.c:63:37: note: in expansion of macro ‘min‘
63 | int pages_to_send = min(pages_in_iov, MAX_SKB_FRAGS);
We could solve it by using min_t(), but this operation seems entirely
unnecessary, because we also pass MAX_SKB_FRAGS to iov_iter_npages(),
which performs almost the same check, returning at most MAX_SKB_FRAGS
elements. So, let's eliminate this unnecessary comparison.
Fixes: 581512a6dc93 ("vsock/virtio: MSG_ZEROCOPY flag support")
Cc: avkrasnov@salutedevices.com
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Arseniy Krasnov <avkrasnov@salutedevices.com>
Link: https://lore.kernel.org/r/20231206164143.281107-1-sgarzare@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-12-06 17:41:43 +01:00
|
|
|
int pages_to_send = iov_iter_npages(iov_iter, MAX_SKB_FRAGS);
|
2023-09-16 16:09:18 +03:00
|
|
|
|
|
|
|
/* +1 is for packet header. */
|
|
|
|
return t_ops->can_msgzerocopy(pages_to_send + 1);
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
return true;
|
|
|
|
}
|
2023-01-13 22:21:37 +00:00
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
static int virtio_transport_init_zcopy_skb(struct vsock_sock *vsk,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct msghdr *msg,
|
|
|
|
bool zerocopy)
|
|
|
|
{
|
|
|
|
struct ubuf_info *uarg;
|
|
|
|
|
|
|
|
if (msg->msg_ubuf) {
|
|
|
|
uarg = msg->msg_ubuf;
|
|
|
|
net_zcopy_get(uarg);
|
|
|
|
} else {
|
|
|
|
struct iov_iter *iter = &msg->msg_iter;
|
|
|
|
struct ubuf_info_msgzc *uarg_zc;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
uarg = msg_zerocopy_realloc(sk_vsock(vsk),
|
|
|
|
iter->count,
|
2025-05-08 00:48:24 +00:00
|
|
|
NULL, false);
|
2023-09-16 16:09:18 +03:00
|
|
|
if (!uarg)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
uarg_zc = uarg_to_msgzc(uarg);
|
|
|
|
uarg_zc->zerocopy = zerocopy ? 1 : 0;
|
2023-03-29 16:51:58 +00:00
|
|
|
}
|
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
skb_zcopy_init(skb, uarg);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_transport_fill_skb(struct sk_buff *skb,
|
|
|
|
struct virtio_vsock_pkt_info *info,
|
|
|
|
size_t len,
|
|
|
|
bool zcopy)
|
|
|
|
{
|
|
|
|
if (zcopy)
|
|
|
|
return __zerocopy_sg_from_iter(info->msg, NULL, skb,
|
2025-05-08 00:48:24 +00:00
|
|
|
&info->msg->msg_iter, len, NULL);
|
2023-09-16 16:09:18 +03:00
|
|
|
|
2025-07-17 10:01:16 +01:00
|
|
|
virtio_vsock_skb_put(skb, len);
|
|
|
|
return skb_copy_datagram_from_iter(skb, 0, &info->msg->msg_iter, len);
|
2023-09-16 16:09:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_transport_init_hdr(struct sk_buff *skb,
|
|
|
|
struct virtio_vsock_pkt_info *info,
|
|
|
|
size_t payload_len,
|
|
|
|
u32 src_cid,
|
|
|
|
u32 src_port,
|
|
|
|
u32 dst_cid,
|
|
|
|
u32 dst_port)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_hdr *hdr;
|
|
|
|
|
|
|
|
hdr = virtio_vsock_hdr(skb);
|
|
|
|
hdr->type = cpu_to_le16(info->type);
|
|
|
|
hdr->op = cpu_to_le16(info->op);
|
|
|
|
hdr->src_cid = cpu_to_le64(src_cid);
|
|
|
|
hdr->dst_cid = cpu_to_le64(dst_cid);
|
|
|
|
hdr->src_port = cpu_to_le32(src_port);
|
|
|
|
hdr->dst_port = cpu_to_le32(dst_port);
|
|
|
|
hdr->flags = cpu_to_le32(info->flags);
|
|
|
|
hdr->len = cpu_to_le32(payload_len);
|
2023-11-05 00:05:31 +09:00
|
|
|
hdr->buf_alloc = cpu_to_le32(0);
|
|
|
|
hdr->fwd_cnt = cpu_to_le32(0);
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
|
2023-09-16 16:09:17 +03:00
|
|
|
static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb,
|
|
|
|
void *dst,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct iov_iter iov_iter = { 0 };
|
|
|
|
struct kvec kvec;
|
|
|
|
size_t to_copy;
|
|
|
|
|
|
|
|
kvec.iov_base = dst;
|
|
|
|
kvec.iov_len = len;
|
|
|
|
|
|
|
|
iov_iter.iter_type = ITER_KVEC;
|
|
|
|
iov_iter.kvec = &kvec;
|
|
|
|
iov_iter.nr_segs = 1;
|
|
|
|
|
|
|
|
to_copy = min_t(size_t, len, skb->len);
|
|
|
|
|
|
|
|
skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
|
|
|
|
&iov_iter, to_copy);
|
|
|
|
}
|
|
|
|
|
2017-04-21 10:10:46 +01:00
|
|
|
/* Packet capture */
|
|
|
|
static struct sk_buff *virtio_transport_build_skb(void *opaque)
|
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *pkt_hdr;
|
|
|
|
struct sk_buff *pkt = opaque;
|
2017-04-21 10:10:46 +01:00
|
|
|
struct af_vsockmon_hdr *hdr;
|
|
|
|
struct sk_buff *skb;
|
2019-07-30 17:43:33 +02:00
|
|
|
size_t payload_len;
|
2017-04-21 10:10:46 +01:00
|
|
|
|
2019-07-30 17:43:33 +02:00
|
|
|
/* A packet could be split to fit the RX buffer, so we can retrieve
|
|
|
|
* the payload length from the header and the buffer pointer taking
|
|
|
|
* care of the offset in the original packet.
|
|
|
|
*/
|
2023-01-13 22:21:37 +00:00
|
|
|
pkt_hdr = virtio_vsock_hdr(pkt);
|
|
|
|
payload_len = pkt->len;
|
2019-07-30 17:43:33 +02:00
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
|
2017-04-21 10:10:46 +01:00
|
|
|
GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
return NULL;
|
|
|
|
|
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 14:29:21 +02:00
|
|
|
hdr = skb_put(skb, sizeof(*hdr));
|
2017-04-21 10:10:46 +01:00
|
|
|
|
|
|
|
/* pkt->hdr is little-endian so no need to byteswap here */
|
2023-01-13 22:21:37 +00:00
|
|
|
hdr->src_cid = pkt_hdr->src_cid;
|
|
|
|
hdr->src_port = pkt_hdr->src_port;
|
|
|
|
hdr->dst_cid = pkt_hdr->dst_cid;
|
|
|
|
hdr->dst_port = pkt_hdr->dst_port;
|
2017-04-21 10:10:46 +01:00
|
|
|
|
|
|
|
hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
|
2023-01-13 22:21:37 +00:00
|
|
|
hdr->len = cpu_to_le16(sizeof(*pkt_hdr));
|
2017-04-21 10:10:46 +01:00
|
|
|
memset(hdr->reserved, 0, sizeof(hdr->reserved));
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
switch (le16_to_cpu(pkt_hdr->op)) {
|
2017-04-21 10:10:46 +01:00
|
|
|
case VIRTIO_VSOCK_OP_REQUEST:
|
|
|
|
case VIRTIO_VSOCK_OP_RESPONSE:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_RST:
|
|
|
|
case VIRTIO_VSOCK_OP_SHUTDOWN:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_RW:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
|
|
|
|
case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
|
2017-04-21 10:10:46 +01:00
|
|
|
|
2019-07-30 17:43:33 +02:00
|
|
|
if (payload_len) {
|
2023-09-16 16:09:17 +03:00
|
|
|
if (skb_is_nonlinear(pkt)) {
|
|
|
|
void *data = skb_put(skb, payload_len);
|
|
|
|
|
|
|
|
virtio_transport_copy_nonlinear_skb(pkt, data, payload_len);
|
|
|
|
} else {
|
|
|
|
skb_put_data(skb, pkt->data, payload_len);
|
|
|
|
}
|
2017-04-21 10:10:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
void virtio_transport_deliver_tap_pkt(struct sk_buff *skb)
|
2017-04-21 10:10:46 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
if (virtio_vsock_skb_tap_delivered(skb))
|
2020-04-24 17:08:30 +02:00
|
|
|
return;
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
vsock_deliver_tap(virtio_transport_build_skb, skb);
|
|
|
|
virtio_vsock_skb_set_tap_delivered(skb);
|
2017-04-21 10:10:46 +01:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
|
|
|
|
|
2021-06-11 14:12:53 +03:00
|
|
|
static u16 virtio_transport_get_type(struct sock *sk)
|
|
|
|
{
|
|
|
|
if (sk->sk_type == SOCK_STREAM)
|
|
|
|
return VIRTIO_VSOCK_TYPE_STREAM;
|
|
|
|
else
|
|
|
|
return VIRTIO_VSOCK_TYPE_SEQPACKET;
|
|
|
|
}
|
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
/* Returns new sk_buff on success, otherwise returns NULL. */
|
|
|
|
static struct sk_buff *virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
|
|
|
|
size_t payload_len,
|
|
|
|
bool zcopy,
|
|
|
|
u32 src_cid,
|
|
|
|
u32 src_port,
|
|
|
|
u32 dst_cid,
|
|
|
|
u32 dst_port)
|
|
|
|
{
|
|
|
|
struct vsock_sock *vsk;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
size_t skb_len;
|
|
|
|
|
|
|
|
skb_len = VIRTIO_VSOCK_SKB_HEADROOM;
|
|
|
|
|
|
|
|
if (!zcopy)
|
|
|
|
skb_len += payload_len;
|
|
|
|
|
2025-07-17 10:01:16 +01:00
|
|
|
skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
|
2023-09-16 16:09:18 +03:00
|
|
|
if (!skb)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
virtio_transport_init_hdr(skb, info, payload_len, src_cid, src_port,
|
|
|
|
dst_cid, dst_port);
|
|
|
|
|
|
|
|
vsk = info->vsk;
|
|
|
|
|
|
|
|
/* If 'vsk' != NULL then payload is always present, so we
|
|
|
|
* will never call '__zerocopy_sg_from_iter()' below without
|
|
|
|
* setting skb owner in 'skb_set_owner_w()'. The only case
|
|
|
|
* when 'vsk' == NULL is VIRTIO_VSOCK_OP_RST control message
|
|
|
|
* without payload.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(!(vsk && (info->msg && payload_len)) && zcopy);
|
|
|
|
|
|
|
|
/* Set owner here, because '__zerocopy_sg_from_iter()' uses
|
|
|
|
* owner of skb without check to update 'sk_wmem_alloc'.
|
|
|
|
*/
|
|
|
|
if (vsk)
|
|
|
|
skb_set_owner_w(skb, sk_vsock(vsk));
|
|
|
|
|
|
|
|
if (info->msg && payload_len > 0) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = virtio_transport_fill_skb(skb, info, payload_len, zcopy);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (msg_data_left(info->msg) == 0 &&
|
|
|
|
info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
|
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
|
|
|
|
|
|
|
hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
|
|
|
|
|
|
|
|
if (info->msg->msg_flags & MSG_EOR)
|
|
|
|
hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info->reply)
|
|
|
|
virtio_vsock_skb_set_reply(skb);
|
|
|
|
|
|
|
|
trace_virtio_transport_alloc_pkt(src_cid, src_port,
|
|
|
|
dst_cid, dst_port,
|
|
|
|
payload_len,
|
|
|
|
info->type,
|
|
|
|
info->op,
|
|
|
|
info->flags,
|
|
|
|
zcopy);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
out:
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-13 19:48:01 +01:00
|
|
|
/* This function can only be used on connecting/connected sockets,
|
|
|
|
* since a socket assigned to a transport is required.
|
|
|
|
*
|
|
|
|
* Do not use on listener sockets!
|
|
|
|
*/
|
2016-07-28 15:36:32 +01:00
|
|
|
static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
|
|
|
|
struct virtio_vsock_pkt_info *info)
|
|
|
|
{
|
2023-09-16 16:09:18 +03:00
|
|
|
u32 max_skb_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
|
2016-07-28 15:36:32 +01:00
|
|
|
u32 src_cid, src_port, dst_cid, dst_port;
|
2019-12-13 19:48:01 +01:00
|
|
|
const struct virtio_transport *t_ops;
|
2016-07-28 15:36:32 +01:00
|
|
|
struct virtio_vsock_sock *vvs;
|
|
|
|
u32 pkt_len = info->pkt_len;
|
2023-09-16 16:09:18 +03:00
|
|
|
bool can_zcopy = false;
|
2023-03-26 01:03:52 +03:00
|
|
|
u32 rest_len;
|
|
|
|
int ret;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2021-06-11 14:13:06 +03:00
|
|
|
info->type = virtio_transport_get_type(sk_vsock(vsk));
|
2021-06-11 14:11:31 +03:00
|
|
|
|
2019-12-13 19:48:01 +01:00
|
|
|
t_ops = virtio_transport_get_ops(vsk);
|
|
|
|
if (unlikely(!t_ops))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
src_cid = t_ops->transport.get_local_cid();
|
2016-07-28 15:36:32 +01:00
|
|
|
src_port = vsk->local_addr.svm_port;
|
|
|
|
if (!info->remote_cid) {
|
|
|
|
dst_cid = vsk->remote_addr.svm_cid;
|
|
|
|
dst_port = vsk->remote_addr.svm_port;
|
|
|
|
} else {
|
|
|
|
dst_cid = info->remote_cid;
|
|
|
|
dst_port = info->remote_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
vvs = vsk->trans;
|
|
|
|
|
|
|
|
/* virtio_transport_get_credit might return less than pkt_len credit */
|
|
|
|
pkt_len = virtio_transport_get_credit(vvs, pkt_len);
|
|
|
|
|
|
|
|
/* Do not send zero length OP_RW pkt */
|
|
|
|
if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
|
|
|
|
return pkt_len;
|
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
if (info->msg) {
|
|
|
|
/* If zerocopy is not enabled by 'setsockopt()', we behave as
|
|
|
|
* there is no MSG_ZEROCOPY flag set.
|
|
|
|
*/
|
|
|
|
if (!sock_flag(sk_vsock(vsk), SOCK_ZEROCOPY))
|
|
|
|
info->msg->msg_flags &= ~MSG_ZEROCOPY;
|
|
|
|
|
|
|
|
if (info->msg->msg_flags & MSG_ZEROCOPY)
|
|
|
|
can_zcopy = virtio_transport_can_zcopy(t_ops, info, pkt_len);
|
|
|
|
|
|
|
|
if (can_zcopy)
|
|
|
|
max_skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE,
|
|
|
|
(MAX_SKB_FRAGS * PAGE_SIZE));
|
|
|
|
}
|
|
|
|
|
2023-03-26 01:03:52 +03:00
|
|
|
rest_len = pkt_len;
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct sk_buff *skb;
|
|
|
|
size_t skb_len;
|
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
skb_len = min(max_skb_len, rest_len);
|
2023-03-26 01:03:52 +03:00
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
skb = virtio_transport_alloc_skb(info, skb_len, can_zcopy,
|
2023-03-26 01:03:52 +03:00
|
|
|
src_cid, src_port,
|
|
|
|
dst_cid, dst_port);
|
|
|
|
if (!skb) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
/* We process buffer part by part, allocating skb on
|
|
|
|
* each iteration. If this is last skb for this buffer
|
|
|
|
* and MSG_ZEROCOPY mode is in use - we must allocate
|
|
|
|
* completion for the current syscall.
|
|
|
|
*/
|
|
|
|
if (info->msg && info->msg->msg_flags & MSG_ZEROCOPY &&
|
|
|
|
skb_len == rest_len && info->op == VIRTIO_VSOCK_OP_RW) {
|
|
|
|
if (virtio_transport_init_zcopy_skb(vsk, skb,
|
|
|
|
info->msg,
|
|
|
|
can_zcopy)) {
|
2024-11-07 21:46:14 +01:00
|
|
|
kfree_skb(skb);
|
2023-09-16 16:09:18 +03:00
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-26 01:03:52 +03:00
|
|
|
virtio_transport_inc_tx_pkt(vvs, skb);
|
|
|
|
|
|
|
|
ret = t_ops->send_pkt(skb);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Both virtio and vhost 'send_pkt()' returns 'skb_len',
|
|
|
|
* but for reliability use 'ret' instead of 'skb_len'.
|
|
|
|
* Also if partial send happens (e.g. 'ret' != 'skb_len')
|
|
|
|
* somehow, we break this loop, but account such returned
|
|
|
|
* value in 'virtio_transport_put_credit()'.
|
|
|
|
*/
|
|
|
|
rest_len -= ret;
|
|
|
|
|
|
|
|
if (WARN_ONCE(ret != skb_len,
|
|
|
|
"'send_pkt()' returns %i, but %zu expected\n",
|
|
|
|
ret, skb_len))
|
|
|
|
break;
|
|
|
|
} while (rest_len);
|
|
|
|
|
|
|
|
virtio_transport_put_credit(vvs, rest_len);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-03-26 01:03:52 +03:00
|
|
|
/* Return number of bytes, if any data has been sent. */
|
|
|
|
if (rest_len != pkt_len)
|
|
|
|
ret = pkt_len - rest_len;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-03-26 01:03:52 +03:00
|
|
|
return ret;
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
|
2019-10-17 14:44:03 +02:00
|
|
|
static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
|
2023-03-14 14:05:48 +03:00
|
|
|
u32 len)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
if (vvs->buf_used + len > vvs->buf_alloc)
|
2019-10-17 14:44:03 +02:00
|
|
|
return false;
|
|
|
|
|
2023-03-14 14:05:48 +03:00
|
|
|
vvs->rx_bytes += len;
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
vvs->buf_used += len;
|
2019-10-17 14:44:03 +02:00
|
|
|
return true;
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
u32 bytes_read, u32 bytes_dequeued)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
vvs->rx_bytes -= bytes_read;
|
|
|
|
vvs->buf_used -= bytes_dequeued;
|
|
|
|
vvs->fwd_cnt += bytes_dequeued;
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
|
|
|
|
2019-07-30 17:43:32 +02:00
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
2019-07-30 17:43:31 +02:00
|
|
|
vvs->last_fwd_cnt = vvs->fwd_cnt;
|
2023-01-13 22:21:37 +00:00
|
|
|
hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
|
|
|
|
hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc);
|
2019-07-30 17:43:32 +02:00
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
|
|
|
|
|
2024-07-30 21:43:07 +02:00
|
|
|
void virtio_transport_consume_skb_sent(struct sk_buff *skb, bool consume)
|
|
|
|
{
|
|
|
|
struct sock *s = skb->sk;
|
|
|
|
|
|
|
|
if (s && skb->len) {
|
|
|
|
struct vsock_sock *vs = vsock_sk(s);
|
|
|
|
struct virtio_vsock_sock *vvs;
|
|
|
|
|
|
|
|
vvs = vs->trans;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
vvs->bytes_unsent -= skb->len;
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (consume)
|
|
|
|
consume_skb(skb);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_consume_skb_sent);
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
|
|
|
|
{
|
|
|
|
u32 ret;
|
|
|
|
|
2023-03-26 01:04:50 +03:00
|
|
|
if (!credit)
|
|
|
|
return 0;
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
|
|
|
|
if (ret > credit)
|
|
|
|
ret = credit;
|
|
|
|
vvs->tx_cnt += ret;
|
2024-07-30 21:43:07 +02:00
|
|
|
vvs->bytes_unsent += ret;
|
2016-07-28 15:36:32 +01:00
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
|
|
|
|
|
|
|
|
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
|
|
|
|
{
|
2023-03-26 01:04:50 +03:00
|
|
|
if (!credit)
|
|
|
|
return;
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
vvs->tx_cnt -= credit;
|
2024-07-30 21:43:07 +02:00
|
|
|
vvs->bytes_unsent -= credit;
|
2016-07-28 15:36:32 +01:00
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
|
|
|
|
|
2021-06-11 14:12:08 +03:00
|
|
|
static int virtio_transport_send_credit_update(struct vsock_sock *vsk)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 15:36:32 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
|
2019-09-30 18:25:23 +00:00
|
|
|
static ssize_t
|
|
|
|
virtio_transport_stream_do_peek(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
2023-07-25 20:29:09 +03:00
|
|
|
struct sk_buff *skb;
|
|
|
|
size_t total = 0;
|
|
|
|
int err;
|
2019-09-30 18:25:23 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
2023-07-25 20:29:09 +03:00
|
|
|
skb_queue_walk(&vvs->rx_queue, skb) {
|
|
|
|
size_t bytes;
|
2019-09-30 18:25:23 +00:00
|
|
|
|
2023-07-25 20:29:09 +03:00
|
|
|
bytes = len - total;
|
|
|
|
if (bytes > skb->len)
|
|
|
|
bytes = skb->len;
|
2019-09-30 18:25:23 +00:00
|
|
|
|
2023-07-25 20:29:09 +03:00
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
2019-09-30 18:25:23 +00:00
|
|
|
|
2023-07-25 20:29:09 +03:00
|
|
|
/* sk_lock is held by caller so no one else can dequeue.
|
2023-09-16 16:09:15 +03:00
|
|
|
* Unlock rx_lock since skb_copy_datagram_iter() may sleep.
|
2023-07-25 20:29:09 +03:00
|
|
|
*/
|
2023-09-16 16:09:15 +03:00
|
|
|
err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
|
|
|
|
&msg->msg_iter, bytes);
|
2023-07-25 20:29:09 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2019-09-30 18:25:23 +00:00
|
|
|
|
2023-07-25 20:29:09 +03:00
|
|
|
total += bytes;
|
2019-09-30 18:25:23 +00:00
|
|
|
|
2023-07-25 20:29:09 +03:00
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
2019-09-30 18:25:23 +00:00
|
|
|
|
2023-07-25 20:29:09 +03:00
|
|
|
if (total == len)
|
|
|
|
break;
|
2019-09-30 18:25:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
return total;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (total)
|
|
|
|
err = total;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
static ssize_t
|
|
|
|
virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb;
|
2023-12-14 15:52:28 +03:00
|
|
|
u32 fwd_cnt_delta;
|
|
|
|
bool low_rx_bytes;
|
2016-07-28 15:36:32 +01:00
|
|
|
int err = -EFAULT;
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
size_t total = 0;
|
2023-01-13 22:21:37 +00:00
|
|
|
u32 free_space;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
2023-03-28 14:32:12 +03:00
|
|
|
|
|
|
|
if (WARN_ONCE(skb_queue_empty(&vvs->rx_queue) && vvs->rx_bytes,
|
|
|
|
"rx_queue is empty, but rx_bytes is non-zero\n")) {
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
size_t bytes, dequeued = 0;
|
|
|
|
|
2023-03-14 14:08:20 +03:00
|
|
|
skb = skb_peek(&vvs->rx_queue);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-09-16 16:09:15 +03:00
|
|
|
bytes = min_t(size_t, len - total,
|
|
|
|
skb->len - VIRTIO_VSOCK_SKB_CB(skb)->offset);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
/* sk_lock is held by caller so no one else can dequeue.
|
2023-09-16 16:09:15 +03:00
|
|
|
* Unlock rx_lock since skb_copy_datagram_iter() may sleep.
|
2016-07-28 15:36:32 +01:00
|
|
|
*/
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
2023-09-16 16:09:15 +03:00
|
|
|
err = skb_copy_datagram_iter(skb,
|
|
|
|
VIRTIO_VSOCK_SKB_CB(skb)->offset,
|
|
|
|
&msg->msg_iter, bytes);
|
2016-07-28 15:36:32 +01:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
total += bytes;
|
2023-01-13 22:21:37 +00:00
|
|
|
|
2023-09-16 16:09:15 +03:00
|
|
|
VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes;
|
|
|
|
|
|
|
|
if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) {
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
dequeued = le32_to_cpu(virtio_vsock_hdr(skb)->len);
|
2023-03-14 14:08:20 +03:00
|
|
|
__skb_unlink(skb, &vvs->rx_queue);
|
2023-01-13 22:21:37 +00:00
|
|
|
consume_skb(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
|
|
|
|
virtio_transport_dec_rx_pkt(vvs, bytes, dequeued);
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
2019-07-30 17:43:31 +02:00
|
|
|
|
2023-12-14 15:52:28 +03:00
|
|
|
fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt;
|
|
|
|
free_space = vvs->buf_alloc - fwd_cnt_delta;
|
|
|
|
low_rx_bytes = (vvs->rx_bytes <
|
|
|
|
sock_rcvlowat(sk_vsock(vsk), 0, INT_MAX));
|
2019-07-30 17:43:31 +02:00
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
2019-09-03 03:38:16 -04:00
|
|
|
/* To reduce the number of credit update messages,
|
|
|
|
* don't update credits as long as lots of space is available.
|
|
|
|
* Note: the limit chosen here is arbitrary. Setting the limit
|
|
|
|
* too high causes extra messages. Too low causes transmitter
|
|
|
|
* stalls. As stalls are in theory more expensive than extra
|
|
|
|
* messages, we set the limit to a high value. TODO: experiment
|
2023-12-14 15:52:28 +03:00
|
|
|
* with different values. Also send credit update message when
|
|
|
|
* number of bytes in rx queue is not enough to wake up reader.
|
2019-07-30 17:43:31 +02:00
|
|
|
*/
|
2023-12-14 15:52:28 +03:00
|
|
|
if (fwd_cnt_delta &&
|
|
|
|
(free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || low_rx_bytes))
|
2021-06-11 14:12:08 +03:00
|
|
|
virtio_transport_send_credit_update(vsk);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
return total;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (total)
|
|
|
|
err = total;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-07-25 20:29:10 +03:00
|
|
|
static ssize_t
|
|
|
|
virtio_transport_seqpacket_do_peek(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
size_t total, len;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
if (!vvs->msg_count) {
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
total = 0;
|
|
|
|
len = msg_data_left(msg);
|
|
|
|
|
|
|
|
skb_queue_walk(&vvs->rx_queue, skb) {
|
|
|
|
struct virtio_vsock_hdr *hdr;
|
|
|
|
|
|
|
|
if (total < len) {
|
|
|
|
size_t bytes;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
bytes = len - total;
|
|
|
|
if (bytes > skb->len)
|
|
|
|
bytes = skb->len;
|
|
|
|
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
/* sk_lock is held by caller so no one else can dequeue.
|
2023-09-16 16:09:15 +03:00
|
|
|
* Unlock rx_lock since skb_copy_datagram_iter() may sleep.
|
2023-07-25 20:29:10 +03:00
|
|
|
*/
|
2023-09-16 16:09:15 +03:00
|
|
|
err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
|
|
|
|
&msg->msg_iter, bytes);
|
2023-07-25 20:29:10 +03:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
total += skb->len;
|
|
|
|
hdr = virtio_vsock_hdr(skb);
|
|
|
|
|
|
|
|
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
|
|
|
|
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
|
|
|
|
msg->msg_flags |= MSG_EOR;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
2021-06-11 14:12:38 +03:00
|
|
|
static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
int dequeued_len = 0;
|
|
|
|
size_t user_buf_len = msg_data_left(msg);
|
|
|
|
bool msg_ready = false;
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb;
|
2021-06-11 14:12:38 +03:00
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
if (vvs->msg_count == 0) {
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!msg_ready) {
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr;
|
2023-03-14 14:05:48 +03:00
|
|
|
size_t pkt_len;
|
2023-01-13 22:21:37 +00:00
|
|
|
|
|
|
|
skb = __skb_dequeue(&vvs->rx_queue);
|
|
|
|
if (!skb)
|
|
|
|
break;
|
|
|
|
hdr = virtio_vsock_hdr(skb);
|
2023-03-14 14:05:48 +03:00
|
|
|
pkt_len = (size_t)le32_to_cpu(hdr->len);
|
2021-06-11 14:12:38 +03:00
|
|
|
|
2021-06-18 15:35:26 +02:00
|
|
|
if (dequeued_len >= 0) {
|
2021-06-11 14:12:38 +03:00
|
|
|
size_t bytes_to_copy;
|
|
|
|
|
|
|
|
bytes_to_copy = min(user_buf_len, pkt_len);
|
|
|
|
|
|
|
|
if (bytes_to_copy) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* sk_lock is held by caller so no one else can dequeue.
|
2023-09-16 16:09:15 +03:00
|
|
|
* Unlock rx_lock since skb_copy_datagram_iter() may sleep.
|
2021-06-11 14:12:38 +03:00
|
|
|
*/
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
2023-09-16 16:09:15 +03:00
|
|
|
err = skb_copy_datagram_iter(skb, 0,
|
|
|
|
&msg->msg_iter,
|
|
|
|
bytes_to_copy);
|
2021-06-11 14:12:38 +03:00
|
|
|
if (err) {
|
2021-06-18 15:35:26 +02:00
|
|
|
/* Copy of message failed. Rest of
|
2021-06-11 14:12:38 +03:00
|
|
|
* fragments will be freed without copy.
|
|
|
|
*/
|
|
|
|
dequeued_len = err;
|
|
|
|
} else {
|
|
|
|
user_buf_len -= bytes_to_copy;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dequeued_len >= 0)
|
|
|
|
dequeued_len += pkt_len;
|
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
|
2021-06-11 14:12:38 +03:00
|
|
|
msg_ready = true;
|
|
|
|
vvs->msg_count--;
|
2021-09-03 15:32:48 +03:00
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
|
2021-09-03 15:32:48 +03:00
|
|
|
msg->msg_flags |= MSG_EOR;
|
2021-06-11 14:12:38 +03:00
|
|
|
}
|
|
|
|
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len);
|
2023-01-13 22:21:37 +00:00
|
|
|
kfree_skb(skb);
|
2021-06-11 14:12:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
virtio_transport_send_credit_update(vsk);
|
|
|
|
|
|
|
|
return dequeued_len;
|
|
|
|
}
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
ssize_t
|
|
|
|
virtio_transport_stream_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len, int flags)
|
|
|
|
{
|
|
|
|
if (flags & MSG_PEEK)
|
2019-09-30 18:25:23 +00:00
|
|
|
return virtio_transport_stream_do_peek(vsk, msg, len);
|
|
|
|
else
|
|
|
|
return virtio_transport_stream_do_dequeue(vsk, msg, len);
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
|
|
|
|
|
2021-06-11 14:12:38 +03:00
|
|
|
ssize_t
|
|
|
|
virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
if (flags & MSG_PEEK)
|
2023-07-25 20:29:10 +03:00
|
|
|
return virtio_transport_seqpacket_do_peek(vsk, msg);
|
|
|
|
else
|
|
|
|
return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags);
|
2021-06-11 14:12:38 +03:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
|
|
|
|
|
2021-06-11 14:13:06 +03:00
|
|
|
int
|
|
|
|
virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
|
|
|
|
if (len > vvs->peer_buf_alloc) {
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
|
|
|
|
return virtio_transport_stream_enqueue(vsk, msg, len);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue);
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
int
|
|
|
|
virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len, int flags)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
|
|
|
|
|
|
|
|
s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
s64 bytes;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
bytes = vvs->rx_bytes;
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
|
|
|
|
|
2021-06-11 14:13:06 +03:00
|
|
|
u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
u32 msg_count;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
msg_count = vvs->msg_count;
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
return msg_count;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data);
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
static s64 virtio_transport_has_space(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
s64 bytes;
|
|
|
|
|
2023-12-11 19:23:17 +03:00
|
|
|
bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
|
2016-07-28 15:36:32 +01:00
|
|
|
if (bytes < 0)
|
|
|
|
bytes = 0;
|
|
|
|
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
s64 bytes;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
bytes = virtio_transport_has_space(vsk);
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
|
|
|
|
|
|
|
|
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
|
|
|
|
struct vsock_sock *psk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs;
|
|
|
|
|
|
|
|
vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
|
|
|
|
if (!vvs)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
vsk->trans = vvs;
|
|
|
|
vvs->vsk = vsk;
|
2019-11-14 10:57:46 +01:00
|
|
|
if (psk && psk->trans) {
|
2016-07-28 15:36:32 +01:00
|
|
|
struct virtio_vsock_sock *ptrans = psk->trans;
|
|
|
|
|
|
|
|
vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
|
|
|
|
}
|
|
|
|
|
2019-11-14 10:57:42 +01:00
|
|
|
if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE)
|
|
|
|
vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE;
|
|
|
|
|
|
|
|
vvs->buf_alloc = vsk->buffer_size;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
spin_lock_init(&vvs->rx_lock);
|
|
|
|
spin_lock_init(&vvs->tx_lock);
|
2023-01-13 22:21:37 +00:00
|
|
|
skb_queue_head_init(&vvs->rx_queue);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
|
|
|
|
|
2019-11-14 10:57:42 +01:00
|
|
|
/* sk_lock held by the caller */
|
|
|
|
void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
|
2019-11-14 10:57:42 +01:00
|
|
|
if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE)
|
|
|
|
*val = VIRTIO_VSOCK_MAX_BUF_SIZE;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2019-11-14 10:57:42 +01:00
|
|
|
vvs->buf_alloc = *val;
|
2019-10-17 14:44:02 +02:00
|
|
|
|
2021-06-11 14:12:08 +03:00
|
|
|
virtio_transport_send_credit_update(vsk);
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
2019-11-14 10:57:42 +01:00
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
int
|
|
|
|
virtio_transport_notify_poll_in(struct vsock_sock *vsk,
|
|
|
|
size_t target,
|
|
|
|
bool *data_ready_now)
|
|
|
|
{
|
2022-08-19 05:29:34 +00:00
|
|
|
*data_ready_now = vsock_stream_has_data(vsk) >= target;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
|
|
|
|
|
|
|
|
int
|
|
|
|
virtio_transport_notify_poll_out(struct vsock_sock *vsk,
|
|
|
|
size_t target,
|
|
|
|
bool *space_avail_now)
|
|
|
|
{
|
|
|
|
s64 free_space;
|
|
|
|
|
|
|
|
free_space = vsock_stream_has_space(vsk);
|
|
|
|
if (free_space > 0)
|
|
|
|
*space_avail_now = true;
|
|
|
|
else if (free_space == 0)
|
|
|
|
*space_avail_now = false;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
|
|
|
|
|
|
|
|
int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
|
|
|
|
size_t target, struct vsock_transport_recv_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
|
|
|
|
|
|
|
|
int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
|
|
|
|
size_t target, struct vsock_transport_recv_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
|
|
|
|
|
|
|
|
int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
|
|
|
|
size_t target, struct vsock_transport_recv_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
|
|
|
|
|
|
|
|
int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
|
|
|
|
size_t target, ssize_t copied, bool data_read,
|
|
|
|
struct vsock_transport_recv_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
|
|
|
|
|
|
|
|
int virtio_transport_notify_send_init(struct vsock_sock *vsk,
|
|
|
|
struct vsock_transport_send_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
|
|
|
|
|
|
|
|
int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
|
|
|
|
struct vsock_transport_send_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
|
|
|
|
|
|
|
|
int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
|
|
|
|
struct vsock_transport_send_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
|
|
|
|
|
|
|
|
int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
|
|
|
|
ssize_t written, struct vsock_transport_send_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
|
|
|
|
|
|
|
|
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
|
|
|
|
{
|
2019-11-14 10:57:42 +01:00
|
|
|
return vsk->buffer_size;
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
|
|
|
|
|
|
|
|
bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
|
|
|
|
|
|
|
|
bool virtio_transport_stream_allow(u32 cid, u32 port)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
|
|
|
|
|
|
|
|
int virtio_transport_dgram_bind(struct vsock_sock *vsk,
|
|
|
|
struct sockaddr_vm *addr)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
|
|
|
|
|
|
|
|
bool virtio_transport_dgram_allow(u32 cid, u32 port)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
|
|
|
|
|
|
|
|
int virtio_transport_connect(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_REQUEST,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 15:36:32 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_connect);
|
|
|
|
|
|
|
|
int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_SHUTDOWN,
|
|
|
|
.flags = (mode & RCV_SHUTDOWN ?
|
|
|
|
VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
|
|
|
|
(mode & SEND_SHUTDOWN ?
|
|
|
|
VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 15:36:32 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
|
|
|
|
|
|
|
|
int
|
|
|
|
virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
|
|
|
|
struct sockaddr_vm *remote_addr,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t dgram_len)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
|
|
|
|
|
|
|
|
ssize_t
|
|
|
|
virtio_transport_stream_enqueue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_RW,
|
|
|
|
.msg = msg,
|
|
|
|
.pkt_len = len,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 15:36:32 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
|
|
|
|
|
|
|
|
void virtio_transport_destruct(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
|
2025-01-10 09:35:09 +01:00
|
|
|
virtio_transport_cancel_close_work(vsk, true);
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
kfree(vvs);
|
2024-10-22 09:32:56 +02:00
|
|
|
vsk->trans = NULL;
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_destruct);
|
|
|
|
|
2024-07-30 21:43:07 +02:00
|
|
|
ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
size_t ret;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
ret = vvs->bytes_unsent;
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_unsent_bytes);
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
static int virtio_transport_reset(struct vsock_sock *vsk,
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_RST,
|
2023-01-13 22:21:37 +00:00
|
|
|
.reply = !!skb,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 15:36:32 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Send RST only if the original pkt is not a RST pkt */
|
2023-01-13 22:21:37 +00:00
|
|
|
if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST)
|
2016-07-28 15:36:32 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Normally packets are associated with a socket. There may be no socket if an
|
|
|
|
* attempt was made to connect to a socket that does not exist.
|
|
|
|
*/
|
2019-11-14 10:57:40 +01:00
|
|
|
static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_RST,
|
2023-01-13 22:21:37 +00:00
|
|
|
.type = le16_to_cpu(hdr->type),
|
2016-07-28 15:36:32 +01:00
|
|
|
.reply = true,
|
|
|
|
};
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *reply;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
/* Send RST only if the original pkt is not a RST pkt */
|
2023-01-13 22:21:37 +00:00
|
|
|
if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
|
2016-07-28 15:36:32 +01:00
|
|
|
return 0;
|
|
|
|
|
2023-03-20 20:43:29 +03:00
|
|
|
if (!t)
|
|
|
|
return -ENOTCONN;
|
|
|
|
|
2023-09-16 16:09:18 +03:00
|
|
|
reply = virtio_transport_alloc_skb(&info, 0, false,
|
2023-01-13 22:21:37 +00:00
|
|
|
le64_to_cpu(hdr->dst_cid),
|
|
|
|
le32_to_cpu(hdr->dst_port),
|
|
|
|
le64_to_cpu(hdr->src_cid),
|
|
|
|
le32_to_cpu(hdr->src_port));
|
2019-03-06 12:13:53 +02:00
|
|
|
if (!reply)
|
2016-07-28 15:36:32 +01:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-03-06 12:13:53 +02:00
|
|
|
return t->send_pkt(reply);
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
|
2021-04-20 13:07:27 +02:00
|
|
|
/* This function should be called with sk_lock held and SOCK_DONE set */
|
|
|
|
static void virtio_transport_remove_sock(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
|
|
|
|
/* We don't need to take rx_lock, as the socket is closing and we are
|
|
|
|
* removing it.
|
|
|
|
*/
|
2023-01-13 22:21:37 +00:00
|
|
|
__skb_queue_purge(&vvs->rx_queue);
|
2021-04-20 13:07:27 +02:00
|
|
|
vsock_remove_sock(vsk);
|
|
|
|
}
|
|
|
|
|
2025-01-10 09:35:09 +01:00
|
|
|
static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
|
|
|
|
bool cancel_timeout)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
|
|
|
struct sock *sk = sk_vsock(vsk);
|
|
|
|
|
|
|
|
if (vsk->close_work_scheduled &&
|
|
|
|
(!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
|
|
|
|
vsk->close_work_scheduled = false;
|
|
|
|
|
2021-04-20 13:07:27 +02:00
|
|
|
virtio_transport_remove_sock(vsk);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
/* Release refcnt obtained when we scheduled the timeout */
|
|
|
|
sock_put(sk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-10 09:35:09 +01:00
|
|
|
static void virtio_transport_do_close(struct vsock_sock *vsk,
|
|
|
|
bool cancel_timeout)
|
|
|
|
{
|
|
|
|
struct sock *sk = sk_vsock(vsk);
|
|
|
|
|
|
|
|
sock_set_flag(sk, SOCK_DONE);
|
|
|
|
vsk->peer_shutdown = SHUTDOWN_MASK;
|
|
|
|
if (vsock_stream_has_data(vsk) <= 0)
|
|
|
|
sk->sk_state = TCP_CLOSING;
|
|
|
|
sk->sk_state_change(sk);
|
|
|
|
|
|
|
|
virtio_transport_cancel_close_work(vsk, cancel_timeout);
|
|
|
|
}
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
static void virtio_transport_close_timeout(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct vsock_sock *vsk =
|
|
|
|
container_of(work, struct vsock_sock, close_work.work);
|
|
|
|
struct sock *sk = sk_vsock(vsk);
|
|
|
|
|
|
|
|
sock_hold(sk);
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
if (!sock_flag(sk, SOCK_DONE)) {
|
|
|
|
(void)virtio_transport_reset(vsk, NULL);
|
|
|
|
|
|
|
|
virtio_transport_do_close(vsk, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
vsk->close_work_scheduled = false;
|
|
|
|
|
|
|
|
release_sock(sk);
|
|
|
|
sock_put(sk);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* User context, vsk->sk is locked */
|
|
|
|
static bool virtio_transport_close(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct sock *sk = &vsk->sk;
|
|
|
|
|
2017-10-05 16:46:52 -04:00
|
|
|
if (!(sk->sk_state == TCP_ESTABLISHED ||
|
|
|
|
sk->sk_state == TCP_CLOSING))
|
2016-07-28 15:36:32 +01:00
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Already received SHUTDOWN from peer, reply with RST */
|
|
|
|
if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
|
|
|
|
(void)virtio_transport_reset(vsk, NULL);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
|
|
|
|
(void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
|
|
|
|
|
2025-05-22 01:18:22 +02:00
|
|
|
if (!(current->flags & PF_EXITING))
|
|
|
|
vsock_linger(sk);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
if (sock_flag(sk, SOCK_DONE)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
sock_hold(sk);
|
|
|
|
INIT_DELAYED_WORK(&vsk->close_work,
|
|
|
|
virtio_transport_close_timeout);
|
|
|
|
vsk->close_work_scheduled = true;
|
|
|
|
schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_transport_release(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct sock *sk = &vsk->sk;
|
|
|
|
bool remove_sock = true;
|
|
|
|
|
2021-06-11 14:13:06 +03:00
|
|
|
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
|
2016-07-28 15:36:32 +01:00
|
|
|
remove_sock = virtio_transport_close(vsk);
|
2019-05-17 16:45:43 +02:00
|
|
|
|
2020-11-20 11:47:36 +01:00
|
|
|
if (remove_sock) {
|
|
|
|
sock_set_flag(sk, SOCK_DONE);
|
2021-04-20 13:07:27 +02:00
|
|
|
virtio_transport_remove_sock(vsk);
|
2020-11-20 11:47:36 +01:00
|
|
|
}
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_release);
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_transport_recv_connecting(struct sock *sk,
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
int skerr;
|
2023-01-13 22:21:37 +00:00
|
|
|
int err;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
switch (le16_to_cpu(hdr->op)) {
|
2016-07-28 15:36:32 +01:00
|
|
|
case VIRTIO_VSOCK_OP_RESPONSE:
|
2017-10-05 16:46:52 -04:00
|
|
|
sk->sk_state = TCP_ESTABLISHED;
|
2016-07-28 15:36:32 +01:00
|
|
|
sk->sk_socket->state = SS_CONNECTED;
|
|
|
|
vsock_insert_connected(vsk);
|
|
|
|
sk->sk_state_change(sk);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_INVALID:
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_RST:
|
|
|
|
skerr = ECONNRESET;
|
|
|
|
err = 0;
|
|
|
|
goto destroy;
|
|
|
|
default:
|
|
|
|
skerr = EPROTO;
|
|
|
|
err = -EINVAL;
|
|
|
|
goto destroy;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
destroy:
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_reset(vsk, skb);
|
2017-10-05 16:46:52 -04:00
|
|
|
sk->sk_state = TCP_CLOSE;
|
2016-07-28 15:36:32 +01:00
|
|
|
sk->sk_err = skerr;
|
2021-06-27 18:48:21 -04:00
|
|
|
sk_error_report(sk);
|
2016-07-28 15:36:32 +01:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-30 17:43:30 +02:00
|
|
|
static void
|
|
|
|
virtio_transport_recv_enqueue(struct vsock_sock *vsk,
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb)
|
2019-07-30 17:43:30 +02:00
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
2019-10-17 14:44:03 +02:00
|
|
|
bool can_enqueue, free_pkt = false;
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr;
|
|
|
|
u32 len;
|
2019-07-30 17:43:30 +02:00
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
hdr = virtio_vsock_hdr(skb);
|
|
|
|
len = le32_to_cpu(hdr->len);
|
2019-07-30 17:43:30 +02:00
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
2023-03-14 14:05:48 +03:00
|
|
|
can_enqueue = virtio_transport_inc_rx_pkt(vvs, len);
|
2019-10-17 14:44:03 +02:00
|
|
|
if (!can_enqueue) {
|
|
|
|
free_pkt = true;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-07-30 17:43:30 +02:00
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
|
2021-06-11 14:12:53 +03:00
|
|
|
vvs->msg_count++;
|
|
|
|
|
2019-07-30 17:43:30 +02:00
|
|
|
/* Try to copy small packets into the buffer of last packet queued,
|
|
|
|
* to avoid wasting memory queueing the entire buffer with a small
|
|
|
|
* payload.
|
|
|
|
*/
|
2023-01-13 22:21:37 +00:00
|
|
|
if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) {
|
|
|
|
struct virtio_vsock_hdr *last_hdr;
|
|
|
|
struct sk_buff *last_skb;
|
2019-07-30 17:43:30 +02:00
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
last_skb = skb_peek_tail(&vvs->rx_queue);
|
|
|
|
last_hdr = virtio_vsock_hdr(last_skb);
|
2019-07-30 17:43:30 +02:00
|
|
|
|
|
|
|
/* If there is space in the last packet queued, we copy the
|
2021-06-11 14:12:53 +03:00
|
|
|
* new packet in its buffer. We avoid this if the last packet
|
2021-09-03 15:31:06 +03:00
|
|
|
* queued has VIRTIO_VSOCK_SEQ_EOM set, because this is
|
|
|
|
* delimiter of SEQPACKET message, so 'pkt' is the first packet
|
|
|
|
* of a new message.
|
2019-07-30 17:43:30 +02:00
|
|
|
*/
|
2023-01-13 22:21:37 +00:00
|
|
|
if (skb->len < skb_tailroom(last_skb) &&
|
|
|
|
!(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) {
|
|
|
|
memcpy(skb_put(last_skb, skb->len), skb->data, skb->len);
|
2019-07-30 17:43:30 +02:00
|
|
|
free_pkt = true;
|
2023-01-13 22:21:37 +00:00
|
|
|
last_hdr->flags |= hdr->flags;
|
2023-03-28 14:31:28 +03:00
|
|
|
le32_add_cpu(&last_hdr->len, len);
|
2019-07-30 17:43:30 +02:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
__skb_queue_tail(&vvs->rx_queue, skb);
|
2019-07-30 17:43:30 +02:00
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
if (free_pkt)
|
2023-01-13 22:21:37 +00:00
|
|
|
kfree_skb(skb);
|
2019-07-30 17:43:30 +02:00
|
|
|
}
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
static int
|
|
|
|
virtio_transport_recv_connected(struct sock *sk,
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
int err = 0;
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
switch (le16_to_cpu(hdr->op)) {
|
2016-07-28 15:36:32 +01:00
|
|
|
case VIRTIO_VSOCK_OP_RW:
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_recv_enqueue(vsk, skb);
|
2022-08-19 05:39:24 +00:00
|
|
|
vsock_data_ready(sk);
|
2016-07-28 15:36:32 +01:00
|
|
|
return err;
|
2021-08-02 19:35:06 +02:00
|
|
|
case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
|
|
|
|
virtio_transport_send_credit_update(vsk);
|
|
|
|
break;
|
2016-07-28 15:36:32 +01:00
|
|
|
case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
|
|
|
|
sk->sk_write_space(sk);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_SHUTDOWN:
|
2023-01-13 22:21:37 +00:00
|
|
|
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
|
2016-07-28 15:36:32 +01:00
|
|
|
vsk->peer_shutdown |= RCV_SHUTDOWN;
|
2023-01-13 22:21:37 +00:00
|
|
|
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
|
2016-07-28 15:36:32 +01:00
|
|
|
vsk->peer_shutdown |= SEND_SHUTDOWN;
|
2023-11-03 18:55:48 +01:00
|
|
|
if (vsk->peer_shutdown == SHUTDOWN_MASK) {
|
|
|
|
if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
|
|
|
|
(void)virtio_transport_reset(vsk, NULL);
|
|
|
|
virtio_transport_do_close(vsk, true);
|
|
|
|
}
|
|
|
|
/* Remove this socket anyway because the remote peer sent
|
|
|
|
* the shutdown. This way a new connection will succeed
|
|
|
|
* if the remote peer uses the same source port,
|
|
|
|
* even if the old socket is still unreleased, but now disconnected.
|
|
|
|
*/
|
|
|
|
vsock_remove_sock(vsk);
|
2019-06-14 23:42:37 -07:00
|
|
|
}
|
2023-01-13 22:21:37 +00:00
|
|
|
if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
|
2016-07-28 15:36:32 +01:00
|
|
|
sk->sk_state_change(sk);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_RST:
|
|
|
|
virtio_transport_do_close(vsk, true);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
kfree_skb(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_transport_recv_disconnecting(struct sock *sk,
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
|
2016-07-28 15:36:32 +01:00
|
|
|
virtio_transport_do_close(vsk, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_transport_send_response(struct vsock_sock *vsk,
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_RESPONSE,
|
2023-01-13 22:21:37 +00:00
|
|
|
.remote_cid = le64_to_cpu(hdr->src_cid),
|
|
|
|
.remote_port = le32_to_cpu(hdr->src_port),
|
2016-07-28 15:36:32 +01:00
|
|
|
.reply = true,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 15:36:32 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
|
2019-11-14 10:57:46 +01:00
|
|
|
static bool virtio_transport_space_update(struct sock *sk,
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb)
|
2019-11-14 10:57:46 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
2019-11-14 10:57:46 +01:00
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
bool space_available;
|
|
|
|
|
|
|
|
/* Listener sockets are not associated with any transport, so we are
|
|
|
|
* not able to take the state to see if there is space available in the
|
|
|
|
* remote peer, but since they are only used to receive requests, we
|
|
|
|
* can assume that there is always space available in the other peer.
|
|
|
|
*/
|
|
|
|
if (!vvs)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* buf_alloc and fwd_cnt is always included in the hdr */
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
2023-01-13 22:21:37 +00:00
|
|
|
vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc);
|
|
|
|
vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt);
|
2019-11-14 10:57:46 +01:00
|
|
|
space_available = virtio_transport_has_space(vsk);
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
return space_available;
|
|
|
|
}
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
/* Handle server socket */
|
|
|
|
static int
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb,
|
2019-11-14 10:57:46 +01:00
|
|
|
struct virtio_transport *t)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
struct vsock_sock *vchild;
|
|
|
|
struct sock *child;
|
2019-11-14 10:57:46 +01:00
|
|
|
int ret;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) {
|
|
|
|
virtio_transport_reset_no_sock(t, skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sk_acceptq_is_full(sk)) {
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_reset_no_sock(t, skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2024-11-07 21:46:12 +01:00
|
|
|
/* __vsock_release() might have already flushed accept_queue.
|
|
|
|
* Subsequent enqueues would lead to a memory leak.
|
|
|
|
*/
|
|
|
|
if (sk->sk_shutdown == SHUTDOWN_MASK) {
|
|
|
|
virtio_transport_reset_no_sock(t, skb);
|
|
|
|
return -ESHUTDOWN;
|
|
|
|
}
|
|
|
|
|
2019-11-14 10:57:43 +01:00
|
|
|
child = vsock_create_connected(sk);
|
2016-07-28 15:36:32 +01:00
|
|
|
if (!child) {
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_reset_no_sock(t, skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-11-05 14:11:52 -08:00
|
|
|
sk_acceptq_added(sk);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
lock_sock_nested(child, SINGLE_DEPTH_NESTING);
|
|
|
|
|
2017-10-05 16:46:52 -04:00
|
|
|
child->sk_state = TCP_ESTABLISHED;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
vchild = vsock_sk(child);
|
2023-01-13 22:21:37 +00:00
|
|
|
vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid),
|
|
|
|
le32_to_cpu(hdr->dst_port));
|
|
|
|
vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid),
|
|
|
|
le32_to_cpu(hdr->src_port));
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2019-11-14 10:57:46 +01:00
|
|
|
ret = vsock_assign_transport(vchild, vsk);
|
|
|
|
/* Transport assigned (looking at remote_addr) must be the same
|
|
|
|
* where we received the request.
|
|
|
|
*/
|
|
|
|
if (ret || vchild->transport != &t->transport) {
|
|
|
|
release_sock(child);
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_reset_no_sock(t, skb);
|
2019-11-14 10:57:46 +01:00
|
|
|
sock_put(child);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
if (virtio_transport_space_update(child, skb))
|
2019-11-14 10:57:46 +01:00
|
|
|
child->sk_write_space(child);
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
vsock_insert_connected(vchild);
|
|
|
|
vsock_enqueue_accept(sk, child);
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_send_response(vchild, skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
release_sock(child);
|
|
|
|
|
|
|
|
sk->sk_data_ready(sk);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-11 14:12:53 +03:00
|
|
|
static bool virtio_transport_valid_type(u16 type)
|
|
|
|
{
|
|
|
|
return (type == VIRTIO_VSOCK_TYPE_STREAM) ||
|
|
|
|
(type == VIRTIO_VSOCK_TYPE_SEQPACKET);
|
|
|
|
}
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
|
|
|
|
* lock.
|
|
|
|
*/
|
2019-11-14 10:57:40 +01:00
|
|
|
void virtio_transport_recv_pkt(struct virtio_transport *t,
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff *skb)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
struct sockaddr_vm src, dst;
|
|
|
|
struct vsock_sock *vsk;
|
|
|
|
struct sock *sk;
|
|
|
|
bool space_available;
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
vsock_addr_init(&src, le64_to_cpu(hdr->src_cid),
|
|
|
|
le32_to_cpu(hdr->src_port));
|
|
|
|
vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid),
|
|
|
|
le32_to_cpu(hdr->dst_port));
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
|
|
|
|
dst.svm_cid, dst.svm_port,
|
2023-01-13 22:21:37 +00:00
|
|
|
le32_to_cpu(hdr->len),
|
|
|
|
le16_to_cpu(hdr->type),
|
|
|
|
le16_to_cpu(hdr->op),
|
|
|
|
le32_to_cpu(hdr->flags),
|
|
|
|
le32_to_cpu(hdr->buf_alloc),
|
|
|
|
le32_to_cpu(hdr->fwd_cnt));
|
|
|
|
|
|
|
|
if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) {
|
|
|
|
(void)virtio_transport_reset_no_sock(t, skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
goto free_pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The socket must be in connected or bound table
|
|
|
|
* otherwise send reset back
|
|
|
|
*/
|
|
|
|
sk = vsock_find_connected_socket(&src, &dst);
|
|
|
|
if (!sk) {
|
|
|
|
sk = vsock_find_bound_socket(&dst);
|
|
|
|
if (!sk) {
|
2023-01-13 22:21:37 +00:00
|
|
|
(void)virtio_transport_reset_no_sock(t, skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
goto free_pkt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) {
|
|
|
|
(void)virtio_transport_reset_no_sock(t, skb);
|
2021-06-11 14:12:53 +03:00
|
|
|
sock_put(sk);
|
|
|
|
goto free_pkt;
|
|
|
|
}
|
|
|
|
|
2023-03-29 16:51:58 +00:00
|
|
|
if (!skb_set_owner_sk_safe(skb, sk)) {
|
|
|
|
WARN_ONCE(1, "receiving vsock socket has sk_refcnt == 0\n");
|
|
|
|
goto free_pkt;
|
|
|
|
}
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
vsk = vsock_sk(sk);
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
|
2025-01-10 09:35:07 +01:00
|
|
|
/* Check if sk has been closed or assigned to another transport before
|
|
|
|
* lock_sock (note: listener sockets are not assigned to any transport)
|
|
|
|
*/
|
|
|
|
if (sock_flag(sk, SOCK_DONE) ||
|
|
|
|
(sk->sk_state != TCP_LISTEN && vsk->transport != &t->transport)) {
|
2023-01-13 22:21:37 +00:00
|
|
|
(void)virtio_transport_reset_no_sock(t, skb);
|
virtio_vsock: Fix race condition in virtio_transport_recv_pkt
When client on the host tries to connect(SOCK_STREAM, O_NONBLOCK) to the
server on the guest, there will be a panic on a ThunderX2 (armv8a server):
[ 463.718844] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
[ 463.718848] Mem abort info:
[ 463.718849] ESR = 0x96000044
[ 463.718852] EC = 0x25: DABT (current EL), IL = 32 bits
[ 463.718853] SET = 0, FnV = 0
[ 463.718854] EA = 0, S1PTW = 0
[ 463.718855] Data abort info:
[ 463.718856] ISV = 0, ISS = 0x00000044
[ 463.718857] CM = 0, WnR = 1
[ 463.718859] user pgtable: 4k pages, 48-bit VAs, pgdp=0000008f6f6e9000
[ 463.718861] [0000000000000000] pgd=0000000000000000
[ 463.718866] Internal error: Oops: 96000044 [#1] SMP
[...]
[ 463.718977] CPU: 213 PID: 5040 Comm: vhost-5032 Tainted: G O 5.7.0-rc7+ #139
[ 463.718980] Hardware name: GIGABYTE R281-T91-00/MT91-FS1-00, BIOS F06 09/25/2018
[ 463.718982] pstate: 60400009 (nZCv daif +PAN -UAO)
[ 463.718995] pc : virtio_transport_recv_pkt+0x4c8/0xd40 [vmw_vsock_virtio_transport_common]
[ 463.718999] lr : virtio_transport_recv_pkt+0x1fc/0xd40 [vmw_vsock_virtio_transport_common]
[ 463.719000] sp : ffff80002dbe3c40
[...]
[ 463.719025] Call trace:
[ 463.719030] virtio_transport_recv_pkt+0x4c8/0xd40 [vmw_vsock_virtio_transport_common]
[ 463.719034] vhost_vsock_handle_tx_kick+0x360/0x408 [vhost_vsock]
[ 463.719041] vhost_worker+0x100/0x1a0 [vhost]
[ 463.719048] kthread+0x128/0x130
[ 463.719052] ret_from_fork+0x10/0x18
The race condition is as follows:
Task1 Task2
===== =====
__sock_release virtio_transport_recv_pkt
__vsock_release vsock_find_bound_socket (found sk)
lock_sock_nested
vsock_remove_sock
sock_orphan
sk_set_socket(sk, NULL)
sk->sk_shutdown = SHUTDOWN_MASK
...
release_sock
lock_sock
virtio_transport_recv_connecting
sk->sk_socket->state (panic!)
The root cause is that vsock_find_bound_socket can't hold the lock_sock,
so there is a small race window between vsock_find_bound_socket() and
lock_sock(). If __vsock_release() is running in another task,
sk->sk_socket will be set to NULL inadvertently.
This fixes it by checking sk->sk_shutdown(suggested by Stefano) after
lock_sock since sk->sk_shutdown is set to SHUTDOWN_MASK under the
protection of lock_sock_nested.
Signed-off-by: Jia He <justin.he@arm.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-30 09:38:28 +08:00
|
|
|
release_sock(sk);
|
|
|
|
sock_put(sk);
|
|
|
|
goto free_pkt;
|
|
|
|
}
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
space_available = virtio_transport_space_update(sk, skb);
|
2021-02-08 15:44:54 +01:00
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
/* Update CID in case it has changed after a transport reset event */
|
virtio/vsock: fix the transport to work with VMADDR_CID_ANY
The VMADDR_CID_ANY flag used by a socket means that the socket isn't bound
to any specific CID. For example, a host vsock server may want to be bound
with VMADDR_CID_ANY, so that a guest vsock client can connect to the host
server with CID=VMADDR_CID_HOST (i.e. 2), and meanwhile, a host vsock
client can connect to the same local server with CID=VMADDR_CID_LOCAL
(i.e. 1).
The current implementation sets the destination socket's svm_cid to a
fixed CID value after the first client's connection, which isn't an
expected operation. For example, if the guest client first connects to the
host server, the server's svm_cid gets set to VMADDR_CID_HOST, then other
host clients won't be able to connect to the server anymore.
Reproduce steps:
1. Run the host server:
socat VSOCK-LISTEN:1234,fork -
2. Run a guest client to connect to the host server:
socat - VSOCK-CONNECT:2:1234
3. Run a host client to connect to the host server:
socat - VSOCK-CONNECT:1:1234
Without this patch, step 3. above fails to connect, and socat complains
"socat[1720] E connect(5, AF=40 cid:1 port:1234, 16): Connection
reset by peer".
With this patch, the above works well.
Fixes: c0cfa2d8a788 ("vsock: add multi-transports support")
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
Link: https://lore.kernel.org/r/20211126011823.1760-1-wei.w.wang@intel.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
2021-11-25 20:18:23 -05:00
|
|
|
if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
|
|
|
|
vsk->local_addr.svm_cid = dst.svm_cid;
|
2016-07-28 15:36:32 +01:00
|
|
|
|
|
|
|
if (space_available)
|
|
|
|
sk->sk_write_space(sk);
|
|
|
|
|
|
|
|
switch (sk->sk_state) {
|
2017-10-05 16:46:52 -04:00
|
|
|
case TCP_LISTEN:
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_recv_listen(sk, skb, t);
|
|
|
|
kfree_skb(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
break;
|
2017-10-05 16:46:52 -04:00
|
|
|
case TCP_SYN_SENT:
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_recv_connecting(sk, skb);
|
|
|
|
kfree_skb(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
break;
|
2017-10-05 16:46:52 -04:00
|
|
|
case TCP_ESTABLISHED:
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_recv_connected(sk, skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
break;
|
2017-10-05 16:46:52 -04:00
|
|
|
case TCP_CLOSING:
|
2023-01-13 22:21:37 +00:00
|
|
|
virtio_transport_recv_disconnecting(sk, skb);
|
|
|
|
kfree_skb(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
break;
|
|
|
|
default:
|
2023-01-13 22:21:37 +00:00
|
|
|
(void)virtio_transport_reset_no_sock(t, skb);
|
|
|
|
kfree_skb(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
break;
|
|
|
|
}
|
2019-11-14 10:57:46 +01:00
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
/* Release refcnt obtained when we fetched this socket out of the
|
|
|
|
* bound or connected list.
|
|
|
|
*/
|
|
|
|
sock_put(sk);
|
|
|
|
return;
|
|
|
|
|
|
|
|
free_pkt:
|
2023-01-13 22:21:37 +00:00
|
|
|
kfree_skb(skb);
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
|
|
|
|
|
2023-01-13 22:21:37 +00:00
|
|
|
/* Remove skbs found in a queue that have a vsk that matches.
|
|
|
|
*
|
|
|
|
* Each skb is freed.
|
|
|
|
*
|
|
|
|
* Returns the count of skbs that were reply packets.
|
|
|
|
*/
|
|
|
|
int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue)
|
2016-07-28 15:36:32 +01:00
|
|
|
{
|
2023-01-13 22:21:37 +00:00
|
|
|
struct sk_buff_head freeme;
|
|
|
|
struct sk_buff *skb, *tmp;
|
|
|
|
int cnt = 0;
|
|
|
|
|
|
|
|
skb_queue_head_init(&freeme);
|
|
|
|
|
|
|
|
spin_lock_bh(&queue->lock);
|
|
|
|
skb_queue_walk_safe(queue, skb, tmp) {
|
|
|
|
if (vsock_sk(skb->sk) != vsk)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
__skb_unlink(skb, queue);
|
|
|
|
__skb_queue_tail(&freeme, skb);
|
|
|
|
|
|
|
|
if (virtio_vsock_skb_reply(skb))
|
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&queue->lock);
|
|
|
|
|
|
|
|
__skb_queue_purge(&freeme);
|
|
|
|
|
|
|
|
return cnt;
|
2016-07-28 15:36:32 +01:00
|
|
|
}
|
2023-01-13 22:21:37 +00:00
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs);
|
2016-07-28 15:36:32 +01:00
|
|
|
|
2023-03-27 19:11:51 +00:00
|
|
|
int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_actor)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
struct sock *sk = sk_vsock(vsk);
|
2024-10-13 18:26:40 +02:00
|
|
|
struct virtio_vsock_hdr *hdr;
|
2023-03-27 19:11:51 +00:00
|
|
|
struct sk_buff *skb;
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
u32 pkt_len;
|
2023-03-27 19:11:51 +00:00
|
|
|
int off = 0;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
/* Use __skb_recv_datagram() for race-free handling of the receive. It
|
|
|
|
* works for types other than dgrams.
|
|
|
|
*/
|
|
|
|
skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err);
|
2024-10-13 18:26:40 +02:00
|
|
|
if (!skb) {
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdr = virtio_vsock_hdr(skb);
|
2024-10-13 18:26:41 +02:00
|
|
|
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
|
|
|
|
vvs->msg_count--;
|
|
|
|
|
vsock/virtio: fix `rx_bytes` accounting for stream sockets
In `struct virtio_vsock_sock`, we maintain two counters:
- `rx_bytes`: used internally to track how many bytes have been read.
This supports mechanisms like .stream_has_data() and sock_rcvlowat().
- `fwd_cnt`: used for the credit mechanism to inform available receive
buffer space to the remote peer.
These counters are updated via virtio_transport_inc_rx_pkt() and
virtio_transport_dec_rx_pkt().
Since the beginning with commit 06a8fc78367d ("VSOCK: Introduce
virtio_vsock_common.ko"), we call virtio_transport_dec_rx_pkt() in
virtio_transport_stream_do_dequeue() only when we consume the entire
packet, so partial reads, do not update `rx_bytes` and `fwd_cnt`.
This is fine for `fwd_cnt`, because we still have space used for the
entire packet, and we don't want to update the credit for the other
peer until we free the space of the entire packet. However, this
causes `rx_bytes` to be stale on partial reads.
Previously, this didn’t cause issues because `rx_bytes` was used only by
.stream_has_data(), and any unread portion of a packet implied data was
still available. However, since commit 93b808876682
("virtio/vsock: fix logic which reduces credit update messages"), we now
rely on `rx_bytes` to determine if a credit update should be sent when
the data in the RX queue drops below SO_RCVLOWAT value.
This patch fixes the accounting by updating `rx_bytes` with the number
of bytes actually read, even on partial reads, while leaving `fwd_cnt`
untouched until the packet is fully consumed. Also introduce a new
`buf_used` counter to check that the remote peer is honoring the given
credit; this was previously done via `rx_bytes`.
Fixes: 93b808876682 ("virtio/vsock: fix logic which reduces credit update messages")
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Link: https://patch.msgid.link/20250521121705.196379-1-sgarzare@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-21 14:17:05 +02:00
|
|
|
pkt_len = le32_to_cpu(hdr->len);
|
|
|
|
virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len);
|
2023-03-27 19:11:51 +00:00
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
2024-10-13 18:26:40 +02:00
|
|
|
virtio_transport_send_credit_update(vsk);
|
2023-03-27 19:11:51 +00:00
|
|
|
|
2023-05-22 19:56:05 -07:00
|
|
|
return recv_actor(sk, skb);
|
2023-03-27 19:11:51 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_read_skb);
|
|
|
|
|
2023-12-14 15:52:29 +03:00
|
|
|
int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
bool send_update;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
/* If number of available bytes is less than new SO_RCVLOWAT value,
|
|
|
|
* kick sender to send more data, because sender may sleep in its
|
|
|
|
* 'send()' syscall waiting for enough space at our side. Also
|
|
|
|
* don't send credit update when peer already knows actual value -
|
|
|
|
* such transmission will be useless.
|
|
|
|
*/
|
|
|
|
send_update = (vvs->rx_bytes < val) &&
|
|
|
|
(vvs->fwd_cnt != vvs->last_fwd_cnt);
|
|
|
|
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
if (send_update) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = virtio_transport_send_credit_update(vsk);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_set_rcvlowat);
|
|
|
|
|
2016-07-28 15:36:32 +01:00
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_AUTHOR("Asias He");
|
|
|
|
MODULE_DESCRIPTION("common code for virtio vsock");
|