net: netpoll: Individualize the skb pool

The current implementation of the netpoll system uses a global skb
pool, which can lead to inefficient memory usage and
waste when targets are disabled or no longer in use.

This can result in a significant amount of memory being unnecessarily
allocated and retained, potentially causing performance issues and
limiting the availability of resources for other system components.

Modify the netpoll system to assign a skb pool to each target instead of
using a global one.

This approach allows for more fine-grained control over memory
allocation and deallocation, ensuring that resources are only allocated
and retained as needed.

Signed-off-by: Breno Leitao <leitao@debian.org>
Link: https://patch.msgid.link/20241114-skb_buffers_v2-v3-1-9be9f52a8b69@debian.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Breno Leitao 2024-11-14 03:00:11 -08:00 committed by Jakub Kicinski
parent 11ee317d88
commit 221a9c1df7
2 changed files with 14 additions and 18 deletions

View file

@ -32,6 +32,7 @@ struct netpoll {
bool ipv6; bool ipv6;
u16 local_port, remote_port; u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN]; u8 remote_mac[ETH_ALEN];
struct sk_buff_head skb_pool;
}; };
struct netpoll_info { struct netpoll_info {

View file

@ -45,9 +45,6 @@
#define MAX_UDP_CHUNK 1460 #define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32 #define MAX_SKBS 32
static struct sk_buff_head skb_pool;
#define USEC_PER_POLL 50 #define USEC_PER_POLL 50
#define MAX_SKB_SIZE \ #define MAX_SKB_SIZE \
@ -234,20 +231,23 @@ void netpoll_poll_enable(struct net_device *dev)
up(&ni->dev_lock); up(&ni->dev_lock);
} }
static void refill_skbs(void) static void refill_skbs(struct netpoll *np)
{ {
struct sk_buff_head *skb_pool;
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&skb_pool.lock, flags); skb_pool = &np->skb_pool;
while (skb_pool.qlen < MAX_SKBS) {
spin_lock_irqsave(&skb_pool->lock, flags);
while (skb_pool->qlen < MAX_SKBS) {
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
if (!skb) if (!skb)
break; break;
__skb_queue_tail(&skb_pool, skb); __skb_queue_tail(skb_pool, skb);
} }
spin_unlock_irqrestore(&skb_pool.lock, flags); spin_unlock_irqrestore(&skb_pool->lock, flags);
} }
static void zap_completion_queue(void) static void zap_completion_queue(void)
@ -284,12 +284,12 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
struct sk_buff *skb; struct sk_buff *skb;
zap_completion_queue(); zap_completion_queue();
refill_skbs(); refill_skbs(np);
repeat: repeat:
skb = alloc_skb(len, GFP_ATOMIC); skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) if (!skb)
skb = skb_dequeue(&skb_pool); skb = skb_dequeue(&np->skb_pool);
if (!skb) { if (!skb) {
if (++count < 10) { if (++count < 10) {
@ -673,6 +673,8 @@ int netpoll_setup(struct netpoll *np)
struct in_device *in_dev; struct in_device *in_dev;
int err; int err;
skb_queue_head_init(&np->skb_pool);
rtnl_lock(); rtnl_lock();
if (np->dev_name[0]) { if (np->dev_name[0]) {
struct net *net = current->nsproxy->net_ns; struct net *net = current->nsproxy->net_ns;
@ -773,7 +775,7 @@ put_noaddr:
} }
/* fill up the skb queue */ /* fill up the skb queue */
refill_skbs(); refill_skbs(np);
err = __netpoll_setup(np, ndev); err = __netpoll_setup(np, ndev);
if (err) if (err)
@ -792,13 +794,6 @@ unlock:
} }
EXPORT_SYMBOL(netpoll_setup); EXPORT_SYMBOL(netpoll_setup);
static int __init netpoll_init(void)
{
skb_queue_head_init(&skb_pool);
return 0;
}
core_initcall(netpoll_init);
static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
{ {
struct netpoll_info *npinfo = struct netpoll_info *npinfo =