ipsec-next-2025-07-23

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEH7ZpcWbFyOOp6OJbrB3Eaf9PW7cFAmiAlVEACgkQrB3Eaf9P
 W7cpoA//Rc/toCr82EVJUI9wImhRH0w1BExe52dvjGw4RtS9ioWDW+aWCIJeudFy
 wEAWs9NAlLQCgMYwQpans3VcUJYggi9fi73Xgt/dfWciD9WeRKKZrkS7VI3JDCNO
 mg+dJPbsYmpHuFNfo4eSUJuLaV+NiPXfAa0htHqgiy6iNHLl8CzhgbaUjMoGw2vi
 Ctr6zpQZJLTFhFmVsaQk7b72/rpy7I/nhY6wcV8ODKpgVjkLK6TfZHhQdRA9zzUK
 mE+T4O/T5CvhzXjjxMJhGL4QA0/XpScpwLIqooQrC8saKMlH/Jc4ZIdyXatGi2Y1
 jmdYuLi4uDgxLEYT7LL321+mHN5xnvgTsXn27T7dNJuNRSZHcjapBqfINRZ7GrmB
 86ojpBB0BFljX4zxQOZDD8a9OFhjePCynz6nXqNd+L3ULl09qK6F2lg4lZfugJQE
 yrFpe8JHnSDlAvQ+geKrl5Rgi1bx1gwD30a/E2h0ujgZWK1Ny0gT9SdSSNOhwHSu
 AUNoWyn7OcxYQXtkszXZz2hmtJcKxD8BNuNBXQA2bsFDCKBqX00cyc2EBuWYepks
 oOIupLRhi89OS3HT4NWuh4vfZaciGX5iLhbW3L8lULhtRwZEhRwIyHyN0jIC06Yj
 Hta6Xxvm0LTklj1pvUydIUC/a6hbLMENyZFscKKX3LV5HLYVLw4=
 =5hI2
 -----END PGP SIGNATURE-----

Merge tag 'ipsec-next-2025-07-23' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2025-07-23

1) Optimize to hold device only for the asynchronous decryption,
   where it is really needed.
   From Jianbo Liu.

2) Align our inbund SA lookup to RFC 4301. Only SPI and protocol
   should be used for an inbound SA lookup.
   From Aakash Kumar S.

3) Skip redundant statistics update for xfrm crypto offload.
   From Jianbo Liu.

Please pull or let me know if there are problems.

* tag 'ipsec-next-2025-07-23' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next:
  xfrm: Skip redundant statistics update for crypto offload
  xfrm: Duplicate SPI Handling
  xfrm: hold device only for the asynchronous decryption
====================

Link: https://patch.msgid.link/20250723080402.3439619-1-steffen.klassert@secunet.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2025-07-24 15:13:20 +02:00
commit 94619ea2d9
2 changed files with 59 additions and 39 deletions

View file

@ -503,6 +503,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
/* An encap_type of -1 indicates async resumption. */
if (encap_type == -1) {
async = 1;
dev_put(skb->dev);
seq = XFRM_SKB_CB(skb)->seq.input.low;
goto resume;
}
@ -649,18 +650,18 @@ lock:
XFRM_SKB_CB(skb)->seq.input.low = seq;
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
dev_hold(skb->dev);
if (crypto_done)
if (crypto_done) {
nexthdr = x->type_offload->input_tail(x, skb);
else
} else {
dev_hold(skb->dev);
nexthdr = x->type->input(x, skb);
if (nexthdr == -EINPROGRESS)
return 0;
if (nexthdr == -EINPROGRESS)
return 0;
dev_put(skb->dev);
}
resume:
dev_put(skb->dev);
spin_lock(&x->lock);
if (nexthdr < 0) {
if (nexthdr == -EBADMSG) {

View file

@ -1711,6 +1711,26 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
}
EXPORT_SYMBOL(xfrm_state_lookup_byspi);
static struct xfrm_state *xfrm_state_lookup_spi_proto(struct net *net, __be32 spi, u8 proto)
{
struct xfrm_state *x;
unsigned int i;
rcu_read_lock();
for (i = 0; i <= net->xfrm.state_hmask; i++) {
hlist_for_each_entry_rcu(x, &net->xfrm.state_byspi[i], byspi) {
if (x->id.spi == spi && x->id.proto == proto) {
if (!xfrm_state_hold_rcu(x))
continue;
rcu_read_unlock();
return x;
}
}
}
rcu_read_unlock();
return NULL;
}
static void __xfrm_state_insert(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@ -2262,7 +2282,12 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
xfrm_dev_state_update_stats(x);
/* All counters which are needed to decide if state is expired
* are handled by SW for non-packet offload modes. Simply skip
* the following update and save extra boilerplate in drivers.
*/
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
@ -2555,10 +2580,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
unsigned int h;
struct xfrm_state *x0;
int err = -ENOENT;
__be32 minspi = htonl(low);
__be32 maxspi = htonl(high);
u32 range = high - low + 1;
__be32 newspi = 0;
u32 mark = x->mark.v & x->mark.m;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_DEAD) {
@ -2572,39 +2595,35 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
err = -ENOENT;
if (minspi == maxspi) {
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
if (x0) {
NL_SET_ERR_MSG(extack, "Requested SPI is already in use");
xfrm_state_put(x0);
for (h = 0; h < range; h++) {
u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high);
newspi = htonl(spi);
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x0 = xfrm_state_lookup_spi_proto(net, newspi, x->id.proto);
if (!x0) {
x->id.spi = newspi;
h = xfrm_spi_hash(net, &x->id.daddr, newspi, x->id.proto, x->props.family);
XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, x->xso.type);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = 0;
goto unlock;
}
newspi = minspi;
} else {
u32 spi = 0;
for (h = 0; h < high-low+1; h++) {
spi = get_random_u32_inclusive(low, high);
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
if (x0 == NULL) {
newspi = htonl(spi);
break;
}
xfrm_state_put(x0);
}
}
if (newspi) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x->id.spi = newspi;
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
x->xso.type);
xfrm_state_put(x0);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = 0;
} else {
NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto unlock;
}
if (low == high)
break;
}
if (err)
NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
unlock:
spin_unlock_bh(&x->lock);