mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
udp: relax atomic operation on sk->sk_rmem_alloc
atomic_add_return() is more expensive than atomic_add() and seems overkill in UDP rx fast path. Signed-off-by: Eric Dumazet <edumazet@google.com> Link: https://lore.kernel.org/r/20240328144032.1864988-3-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
6055796995
commit
6a1f12dd85
1 changed files with 1 additions and 6 deletions
|
@ -1516,12 +1516,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
size = skb->truesize;
|
size = skb->truesize;
|
||||||
udp_set_dev_scratch(skb);
|
udp_set_dev_scratch(skb);
|
||||||
|
|
||||||
/* we drop only if the receive buf is full and the receive
|
atomic_add(size, &sk->sk_rmem_alloc);
|
||||||
* queue contains some other skb
|
|
||||||
*/
|
|
||||||
rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
|
|
||||||
if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
|
|
||||||
goto uncharge_drop;
|
|
||||||
|
|
||||||
spin_lock(&list->lock);
|
spin_lock(&list->lock);
|
||||||
err = udp_rmem_schedule(sk, size);
|
err = udp_rmem_schedule(sk, size);
|
||||||
|
|
Loading…
Add table
Reference in a new issue