inet: frags: add inet_frag_putn() helper

inet_frag_putn() can release multiple references
in one step.

Use it in inet_frags_free_cb().

Replace inet_frag_put(X) with inet_frag_putn(X, 1)

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20250312082250.1803501-2-edumazet@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Eric Dumazet 2025-03-12 08:22:47 +00:00 committed by Paolo Abeni
parent 24faa63bce
commit ae2d90355a
7 changed files with 14 additions and 12 deletions

View file

@ -145,9 +145,9 @@ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
unsigned int inet_frag_rbtree_purge(struct rb_root *root,
enum skb_drop_reason reason);
static inline void inet_frag_put(struct inet_frag_queue *q)
static inline void inet_frag_putn(struct inet_frag_queue *q, int refs)
{
if (refcount_dec_and_test(&q->refcnt))
if (refs && refcount_sub_and_test(refs, &q->refcnt))
inet_frag_destroy(q);
}

View file

@ -66,6 +66,7 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
{
struct net_device *dev = NULL;
struct sk_buff *head;
int refs = 1;
rcu_read_lock();
/* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
@ -109,7 +110,7 @@ out:
spin_unlock(&fq->q.lock);
out_rcu_unlock:
rcu_read_unlock();
inet_frag_put(&fq->q);
inet_frag_putn(&fq->q, refs);
}
/* Check if the upper layer header is truncated in the first fragment. */

View file

@ -45,6 +45,7 @@ static void lowpan_frag_expire(struct timer_list *t)
{
struct inet_frag_queue *frag = from_timer(frag, t, timer);
struct frag_queue *fq;
int refs = 1;
fq = container_of(frag, struct frag_queue, q);
@ -56,7 +57,7 @@ static void lowpan_frag_expire(struct timer_list *t)
inet_frag_kill(&fq->q);
out:
spin_unlock(&fq->q.lock);
inet_frag_put(&fq->q);
inet_frag_putn(&fq->q, refs);
}
static inline struct lowpan_frag_queue *
@ -302,13 +303,13 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
fq = fq_find(net, cb, &hdr.source, &hdr.dest);
if (fq != NULL) {
int ret;
int ret, refs = 1;
spin_lock(&fq->q.lock);
ret = lowpan_frag_queue(fq, skb, frag_type);
spin_unlock(&fq->q.lock);
inet_frag_put(&fq->q);
inet_frag_putn(&fq->q, refs);
return ret;
}

View file

@ -145,8 +145,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
}
spin_unlock_bh(&fq->lock);
if (refcount_sub_and_test(count, &fq->refcnt))
inet_frag_destroy(fq);
inet_frag_putn(fq, count);
}
static LLIST_HEAD(fqdir_free_list);

View file

@ -112,7 +112,7 @@ static void ip4_frag_free(struct inet_frag_queue *q)
static void ipq_put(struct ipq *ipq)
{
inet_frag_put(&ipq->q);
inet_frag_putn(&ipq->q, 1);
}
/* Kill ipq entry. It is not destroyed immediately,

View file

@ -447,6 +447,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
struct frag_hdr *fhdr;
struct frag_queue *fq;
struct ipv6hdr *hdr;
int refs = 1;
u8 prevhdr;
/* Jumbo payload inhibits frag. header */
@ -489,7 +490,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
}
spin_unlock_bh(&fq->q.lock);
inet_frag_put(&fq->q);
inet_frag_putn(&fq->q, refs);
return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);

View file

@ -380,7 +380,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
fq = fq_find(net, fhdr->identification, hdr, iif);
if (fq) {
u32 prob_offset = 0;
int ret;
int ret, refs = 1;
spin_lock(&fq->q.lock);
@ -389,7 +389,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
&prob_offset);
spin_unlock(&fq->q.lock);
inet_frag_put(&fq->q);
inet_frag_putn(&fq->q, refs);
if (prob_offset) {
__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
IPSTATS_MIB_INHDRERRORS);