ovpn: implement basic TX path (UDP)

Packets sent over the ovpn interface are processed and transmitted to the
connected peer, if any.

Implementation is UDP only. TCP will be added by a later patch.

Note: no crypto/encapsulation exists yet. Packets are just captured and
sent.

Signed-off-by: Antonio Quartulli <antonio@openvpn.net>
Link: https://patch.msgid.link/20250415-b4-ovpn-v26-7-577f6097b964@openvpn.net
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Antonio Quartulli 2025-04-15 13:17:24 +02:00 committed by Paolo Abeni
parent f6226ae7a0
commit 08857b5ec5
7 changed files with 464 additions and 2 deletions

View file

@ -120,6 +120,7 @@ config OVPN
depends on NET && INET
depends on IPV6 || !IPV6
select DST_CACHE
select NET_UDP_TUNNEL
help
This module enhances the performance of the OpenVPN userspace software
by offloading the data channel processing to kernelspace.

View file

@ -9,14 +9,149 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/gso.h>
#include "io.h"
#include "ovpnpriv.h"
#include "peer.h"
#include "udp.h"
#include "skb.h"
#include "socket.h"
static void ovpn_encrypt_post(struct sk_buff *skb, int ret)
{
struct ovpn_peer *peer = ovpn_skb_cb(skb)->peer;
struct ovpn_socket *sock;
if (unlikely(ret < 0))
goto err;
skb_mark_not_on_list(skb);
rcu_read_lock();
sock = rcu_dereference(peer->sock);
if (unlikely(!sock))
goto err_unlock;
switch (sock->sock->sk->sk_protocol) {
case IPPROTO_UDP:
ovpn_udp_send_skb(peer, sock->sock, skb);
break;
default:
/* no transport configured yet */
goto err_unlock;
}
/* skb passed down the stack - don't free it */
skb = NULL;
err_unlock:
rcu_read_unlock();
err:
if (unlikely(skb))
dev_dstats_tx_dropped(peer->ovpn->dev);
ovpn_peer_put(peer);
kfree_skb(skb);
}
static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb)
{
ovpn_skb_cb(skb)->peer = peer;
/* take a reference to the peer because the crypto code may run async.
* ovpn_encrypt_post() will release it upon completion
*/
if (unlikely(!ovpn_peer_hold(peer))) {
DEBUG_NET_WARN_ON_ONCE(1);
return false;
}
ovpn_encrypt_post(skb, 0);
return true;
}
/* send skb to connected peer, if any */
static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
struct ovpn_peer *peer)
{
struct sk_buff *curr, *next;
/* this might be a GSO-segmented skb list: process each skb
* independently
*/
skb_list_walk_safe(skb, curr, next) {
if (unlikely(!ovpn_encrypt_one(peer, curr))) {
dev_dstats_tx_dropped(ovpn->dev);
kfree_skb(curr);
}
}
ovpn_peer_put(peer);
}
/* Send user data to the network
*/
netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ovpn_priv *ovpn = netdev_priv(dev);
struct sk_buff *segments, *curr, *next;
struct sk_buff_head skb_list;
struct ovpn_peer *peer;
__be16 proto;
int ret;
/* reset netfilter state */
nf_reset_ct(skb);
/* verify IP header size in network packet */
proto = ovpn_ip_check_protocol(skb);
if (unlikely(!proto || skb->protocol != proto))
goto drop;
if (skb_is_gso(skb)) {
segments = skb_gso_segment(skb, 0);
if (IS_ERR(segments)) {
ret = PTR_ERR(segments);
net_err_ratelimited("%s: cannot segment payload packet: %d\n",
netdev_name(dev), ret);
goto drop;
}
consume_skb(skb);
skb = segments;
}
/* from this moment on, "skb" might be a list */
__skb_queue_head_init(&skb_list);
skb_list_walk_safe(skb, curr, next) {
skb_mark_not_on_list(curr);
curr = skb_share_check(curr, GFP_ATOMIC);
if (unlikely(!curr)) {
net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
netdev_name(dev));
dev_dstats_tx_dropped(ovpn->dev);
continue;
}
__skb_queue_tail(&skb_list, curr);
}
skb_list.prev->next = NULL;
/* retrieve peer serving the destination IP of this packet */
peer = ovpn_peer_get_by_dst(ovpn, skb);
if (unlikely(!peer)) {
net_dbg_ratelimited("%s: no peer to send data to\n",
netdev_name(ovpn->dev));
goto drop;
}
ovpn_send(ovpn, skb_list.next, peer);
return NETDEV_TX_OK;
drop:
dev_dstats_tx_dropped(ovpn->dev);
skb_tx_error(skb);
kfree_skb(skb);
kfree_skb_list(skb);
return NET_XMIT_DROP;
}

View file

@ -294,6 +294,38 @@ static void ovpn_peer_remove(struct ovpn_peer *peer,
llist_add(&peer->release_entry, release_list);
}
/**
* ovpn_peer_get_by_dst - Lookup peer to send skb to
* @ovpn: the private data representing the current VPN session
* @skb: the skb to extract the destination address from
*
* This function takes a tunnel packet and looks up the peer to send it to
* after encapsulation. The skb is expected to be the in-tunnel packet, without
* any OpenVPN related header.
*
* Assume that the IP header is accessible in the skb data.
*
* Return: the peer if found or NULL otherwise.
*/
struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
struct sk_buff *skb)
{
struct ovpn_peer *peer = NULL;
/* in P2P mode, no matter the destination, packets are always sent to
* the single peer listening on the other side
*/
if (ovpn->mode == OVPN_MODE_P2P) {
rcu_read_lock();
peer = rcu_dereference(ovpn->peer);
if (unlikely(peer && !ovpn_peer_hold(peer)))
peer = NULL;
rcu_read_unlock();
}
return peer;
}
/**
* ovpn_peer_add_p2p - add peer to related tables in a P2P instance
* @ovpn: the instance to add the peer to

View file

@ -80,5 +80,7 @@ void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk,
struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
struct sk_buff *skb);
struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id);
struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
struct sk_buff *skb);
#endif /* _NET_OVPN_OVPNPEER_H_ */

55
drivers/net/ovpn/skb.h Normal file
View file

@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* OpenVPN data channel offload
*
* Copyright (C) 2020-2025 OpenVPN, Inc.
*
* Author: Antonio Quartulli <antonio@openvpn.net>
* James Yonan <james@openvpn.net>
*/
#ifndef _NET_OVPN_SKB_H_
#define _NET_OVPN_SKB_H_
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/types.h>
struct ovpn_cb {
struct ovpn_peer *peer;
};
static inline struct ovpn_cb *ovpn_skb_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ovpn_cb) > sizeof(skb->cb));
return (struct ovpn_cb *)skb->cb;
}
/* Return IP protocol version from skb header.
* Return 0 if protocol is not IPv4/IPv6 or cannot be read.
*/
static inline __be16 ovpn_ip_check_protocol(struct sk_buff *skb)
{
__be16 proto = 0;
/* skb could be non-linear,
* make sure IP header is in non-fragmented part
*/
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
return 0;
if (ip_hdr(skb)->version == 4) {
proto = htons(ETH_P_IP);
} else if (ip_hdr(skb)->version == 6) {
if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
return 0;
proto = htons(ETH_P_IPV6);
}
return proto;
}
#endif /* _NET_OVPN_SKB_H_ */

View file

@ -7,15 +7,246 @@
*/
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/udp.h>
#include <net/addrconf.h>
#include <net/dst_cache.h>
#include <net/route.h>
#include <net/ipv6_stubs.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include "ovpnpriv.h"
#include "main.h"
#include "bind.h"
#include "io.h"
#include "peer.h"
#include "socket.h"
#include "udp.h"
/**
* ovpn_udp4_output - send IPv4 packet over udp socket
* @peer: the destination peer
* @bind: the binding related to the destination peer
* @cache: dst cache
* @sk: the socket to send the packet over
* @skb: the packet to send
*
* Return: 0 on success or a negative error code otherwise
*/
static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
struct dst_cache *cache, struct sock *sk,
struct sk_buff *skb)
{
struct rtable *rt;
struct flowi4 fl = {
.saddr = bind->local.ipv4.s_addr,
.daddr = bind->remote.in4.sin_addr.s_addr,
.fl4_sport = inet_sk(sk)->inet_sport,
.fl4_dport = bind->remote.in4.sin_port,
.flowi4_proto = sk->sk_protocol,
.flowi4_mark = sk->sk_mark,
};
int ret;
local_bh_disable();
rt = dst_cache_get_ip4(cache, &fl.saddr);
if (rt)
goto transmit;
if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr,
RT_SCOPE_HOST))) {
/* we may end up here when the cached address is not usable
* anymore. In this case we reset address/cache and perform a
* new look up
*/
fl.saddr = 0;
spin_lock_bh(&peer->lock);
bind->local.ipv4.s_addr = 0;
spin_unlock_bh(&peer->lock);
dst_cache_reset(cache);
}
rt = ip_route_output_flow(sock_net(sk), &fl, sk);
if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) {
fl.saddr = 0;
spin_lock_bh(&peer->lock);
bind->local.ipv4.s_addr = 0;
spin_unlock_bh(&peer->lock);
dst_cache_reset(cache);
rt = ip_route_output_flow(sock_net(sk), &fl, sk);
}
if (IS_ERR(rt)) {
ret = PTR_ERR(rt);
net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
netdev_name(peer->ovpn->dev),
&bind->remote.in4,
ret);
goto err;
}
dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
transmit:
udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0,
ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
fl.fl4_dport, false, sk->sk_no_check_tx);
ret = 0;
err:
local_bh_enable();
return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
/**
* ovpn_udp6_output - send IPv6 packet over udp socket
* @peer: the destination peer
* @bind: the binding related to the destination peer
* @cache: dst cache
* @sk: the socket to send the packet over
* @skb: the packet to send
*
* Return: 0 on success or a negative error code otherwise
*/
static int ovpn_udp6_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
struct dst_cache *cache, struct sock *sk,
struct sk_buff *skb)
{
struct dst_entry *dst;
int ret;
struct flowi6 fl = {
.saddr = bind->local.ipv6,
.daddr = bind->remote.in6.sin6_addr,
.fl6_sport = inet_sk(sk)->inet_sport,
.fl6_dport = bind->remote.in6.sin6_port,
.flowi6_proto = sk->sk_protocol,
.flowi6_mark = sk->sk_mark,
.flowi6_oif = bind->remote.in6.sin6_scope_id,
};
local_bh_disable();
dst = dst_cache_get_ip6(cache, &fl.saddr);
if (dst)
goto transmit;
if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) {
/* we may end up here when the cached address is not usable
* anymore. In this case we reset address/cache and perform a
* new look up
*/
fl.saddr = in6addr_any;
spin_lock_bh(&peer->lock);
bind->local.ipv6 = in6addr_any;
spin_unlock_bh(&peer->lock);
dst_cache_reset(cache);
}
dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL);
if (IS_ERR(dst)) {
ret = PTR_ERR(dst);
net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
netdev_name(peer->ovpn->dev),
&bind->remote.in6, ret);
goto err;
}
dst_cache_set_ip6(cache, dst, &fl.saddr);
transmit:
udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0,
ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
fl.fl6_dport, udp_get_no_check6_tx(sk));
ret = 0;
err:
local_bh_enable();
return ret;
}
#endif
/**
* ovpn_udp_output - transmit skb using udp-tunnel
* @peer: the destination peer
* @cache: dst cache
* @sk: the socket to send the packet over
* @skb: the packet to send
*
* rcu_read_lock should be held on entry.
* On return, the skb is consumed.
*
* Return: 0 on success or a negative error code otherwise
*/
static int ovpn_udp_output(struct ovpn_peer *peer, struct dst_cache *cache,
struct sock *sk, struct sk_buff *skb)
{
struct ovpn_bind *bind;
int ret;
/* set sk to null if skb is already orphaned */
if (!skb->destructor)
skb->sk = NULL;
rcu_read_lock();
bind = rcu_dereference(peer->bind);
if (unlikely(!bind)) {
net_warn_ratelimited("%s: no bind for remote peer %u\n",
netdev_name(peer->ovpn->dev), peer->id);
ret = -ENODEV;
goto out;
}
switch (bind->remote.in4.sin_family) {
case AF_INET:
ret = ovpn_udp4_output(peer, bind, cache, sk, skb);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
ret = ovpn_udp6_output(peer, bind, cache, sk, skb);
break;
#endif
default:
ret = -EAFNOSUPPORT;
break;
}
out:
rcu_read_unlock();
return ret;
}
/**
* ovpn_udp_send_skb - prepare skb and send it over via UDP
* @peer: the destination peer
* @sock: the RCU protected peer socket
* @skb: the packet to send
*/
void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock,
struct sk_buff *skb)
{
int ret = -1;
skb->dev = peer->ovpn->dev;
/* no checksum performed at this layer */
skb->ip_summed = CHECKSUM_NONE;
/* get socket info */
if (unlikely(!sock)) {
net_warn_ratelimited("%s: no sock for remote peer %u\n",
netdev_name(peer->ovpn->dev), peer->id);
goto out;
}
/* crypto layer -> transport (UDP) */
ret = ovpn_udp_output(peer, &peer->dst_cache, sock->sk, skb);
out:
if (unlikely(ret < 0)) {
kfree_skb(skb);
return;
}
}
/**
* ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn
* @ovpn_sock: socket to configure
@ -31,7 +262,7 @@ int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
{
struct socket *sock = ovpn_sock->sock;
struct ovpn_socket *old_data;
int ret = 0;
int ret;
/* make sure no pre-existing encapsulation handler exists */
rcu_read_lock();

View file

@ -9,6 +9,9 @@
#ifndef _NET_OVPN_UDP_H_
#define _NET_OVPN_UDP_H_
#include <net/sock.h>
struct ovpn_peer;
struct ovpn_priv;
struct socket;
@ -16,4 +19,7 @@ int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
struct ovpn_priv *ovpn);
void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock);
void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock,
struct sk_buff *skb);
#endif /* _NET_OVPN_UDP_H_ */