mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
ipv6: adopt dst_dev() helper
Use the new helper as a step to deal with potential dst->dev races. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250630121934.3399505-9-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
a74fc62eec
commit
1caf272972
17 changed files with 60 additions and 47 deletions
|
@ -274,7 +274,7 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
|
|||
unsigned int mtu;
|
||||
|
||||
if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) {
|
||||
mtu = READ_ONCE(dst->dev->mtu);
|
||||
mtu = READ_ONCE(dst_dev(dst)->mtu);
|
||||
mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
|
||||
} else {
|
||||
mtu = dst_mtu(dst);
|
||||
|
@ -337,7 +337,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
|
|||
|
||||
mtu = IPV6_MIN_MTU;
|
||||
rcu_read_lock();
|
||||
idev = __in6_dev_get(dst->dev);
|
||||
idev = __in6_dev_get(dst_dev(dst));
|
||||
if (idev)
|
||||
mtu = READ_ONCE(idev->cnf.mtu6);
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -306,7 +306,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
|
|||
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
|
||||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
|
||||
((skb_transport_header(skb)[1] + 1) << 3)))) {
|
||||
__IP6_INC_STATS(dev_net(dst->dev), idev,
|
||||
__IP6_INC_STATS(dev_net(dst_dev(dst)), idev,
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
fail_and_free:
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -196,6 +196,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
|
|||
struct flowi6 *fl6, bool apply_ratelimit)
|
||||
{
|
||||
struct net *net = sock_net(sk);
|
||||
struct net_device *dev;
|
||||
struct dst_entry *dst;
|
||||
bool res = false;
|
||||
|
||||
|
@ -208,10 +209,11 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
|
|||
* this lookup should be more aggressive (not longer than timeout).
|
||||
*/
|
||||
dst = ip6_route_output(net, sk, fl6);
|
||||
dev = dst_dev(dst);
|
||||
if (dst->error) {
|
||||
IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_OUTNOROUTES);
|
||||
} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
|
||||
} else if (dev && (dev->flags & IFF_LOOPBACK)) {
|
||||
res = true;
|
||||
} else {
|
||||
struct rt6_info *rt = dst_rt6_info(dst);
|
||||
|
|
|
@ -70,7 +70,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
*/
|
||||
|
||||
memset(&fl6, 0, sizeof(fl6));
|
||||
fl6.flowi6_oif = orig_dst->dev->ifindex;
|
||||
fl6.flowi6_oif = dst_dev(orig_dst)->ifindex;
|
||||
fl6.flowi6_iif = LOOPBACK_IFINDEX;
|
||||
fl6.daddr = *rt6_nexthop(dst_rt6_info(orig_dst),
|
||||
&ip6h->daddr);
|
||||
|
|
|
@ -335,7 +335,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
|
|||
if (has_tunsrc)
|
||||
memcpy(&hdr->saddr, tunsrc, sizeof(*tunsrc));
|
||||
else
|
||||
ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
|
||||
ipv6_dev_get_saddr(net, dst_dev(dst), &hdr->daddr,
|
||||
IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
|
||||
|
||||
skb_postpush_rcsum(skb, hdr, len);
|
||||
|
@ -442,7 +442,7 @@ do_encap:
|
|||
dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
|
||||
local_bh_enable();
|
||||
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
|
||||
if (unlikely(err))
|
||||
goto drop;
|
||||
}
|
||||
|
|
|
@ -1085,9 +1085,11 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|||
htonl(atomic_fetch_inc(&t->o_seqno)));
|
||||
|
||||
/* TooBig packet may have updated dst->dev's mtu */
|
||||
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
|
||||
dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
|
||||
|
||||
if (!t->parms.collect_md && dst) {
|
||||
mtu = READ_ONCE(dst_dev(dst)->mtu);
|
||||
if (dst_mtu(dst) > mtu)
|
||||
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
|
||||
}
|
||||
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
|
||||
NEXTHDR_GRE);
|
||||
if (err != 0) {
|
||||
|
|
|
@ -60,7 +60,7 @@
|
|||
static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *dev = dst->dev;
|
||||
struct net_device *dev = dst_dev(dst);
|
||||
struct inet6_dev *idev = ip6_dst_idev(dst);
|
||||
unsigned int hh_len = LL_RESERVED_SPACE(dev);
|
||||
const struct in6_addr *daddr, *nexthop;
|
||||
|
@ -271,7 +271,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
|||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct in6_addr *first_hop = &fl6->daddr;
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *dev = dst->dev;
|
||||
struct net_device *dev = dst_dev(dst);
|
||||
struct inet6_dev *idev = ip6_dst_idev(dst);
|
||||
struct hop_jumbo_hdr *hop_jumbo;
|
||||
int hoplen = sizeof(*hop_jumbo);
|
||||
|
@ -503,7 +503,8 @@ int ip6_forward(struct sk_buff *skb)
|
|||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct ipv6hdr *hdr = ipv6_hdr(skb);
|
||||
struct inet6_skb_parm *opt = IP6CB(skb);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
struct net *net = dev_net(dst_dev(dst));
|
||||
struct net_device *dev;
|
||||
struct inet6_dev *idev;
|
||||
SKB_DR(reason);
|
||||
u32 mtu;
|
||||
|
@ -591,12 +592,12 @@ int ip6_forward(struct sk_buff *skb)
|
|||
goto drop;
|
||||
}
|
||||
dst = skb_dst(skb);
|
||||
|
||||
dev = dst_dev(dst);
|
||||
/* IPv6 specs say nothing about it, but it is clear that we cannot
|
||||
send redirects to source routed frames.
|
||||
We don't send redirects to frames decapsulated from IPsec.
|
||||
*/
|
||||
if (IP6CB(skb)->iif == dst->dev->ifindex &&
|
||||
if (IP6CB(skb)->iif == dev->ifindex &&
|
||||
opt->srcrt == 0 && !skb_sec_path(skb)) {
|
||||
struct in6_addr *target = NULL;
|
||||
struct inet_peer *peer;
|
||||
|
@ -644,7 +645,7 @@ int ip6_forward(struct sk_buff *skb)
|
|||
|
||||
if (ip6_pkt_too_big(skb, mtu)) {
|
||||
/* Again, force OUTPUT device used as source address */
|
||||
skb->dev = dst->dev;
|
||||
skb->dev = dev;
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
|
@ -653,7 +654,7 @@ int ip6_forward(struct sk_buff *skb)
|
|||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
if (skb_cow(skb, dst->dev->hard_header_len)) {
|
||||
if (skb_cow(skb, dev->hard_header_len)) {
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_OUTDISCARDS);
|
||||
goto drop;
|
||||
|
@ -666,7 +667,7 @@ int ip6_forward(struct sk_buff *skb)
|
|||
hdr->hop_limit--;
|
||||
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
|
||||
net, NULL, skb, skb->dev, dst->dev,
|
||||
net, NULL, skb, skb->dev, dev,
|
||||
ip6_forward_finish);
|
||||
|
||||
error:
|
||||
|
@ -1093,7 +1094,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
|
|||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
|
||||
#endif
|
||||
(fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
|
||||
(fl6->flowi6_oif && fl6->flowi6_oif != dst_dev(dst)->ifindex)) {
|
||||
dst_release(dst);
|
||||
dst = NULL;
|
||||
}
|
||||
|
|
|
@ -1179,7 +1179,7 @@ route_lookup:
|
|||
ndst = dst;
|
||||
}
|
||||
|
||||
tdev = dst->dev;
|
||||
tdev = dst_dev(dst);
|
||||
|
||||
if (tdev == dev) {
|
||||
DEV_STATS_INC(dev, collisions);
|
||||
|
@ -1255,7 +1255,7 @@ route_lookup:
|
|||
/* Calculate max headroom for all the headers and adjust
|
||||
* needed_headroom if necessary.
|
||||
*/
|
||||
max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
|
||||
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr)
|
||||
+ dst->header_len + t->hlen;
|
||||
if (max_headroom > READ_ONCE(dev->needed_headroom))
|
||||
WRITE_ONCE(dev->needed_headroom, max_headroom);
|
||||
|
|
|
@ -168,7 +168,7 @@ struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
|
|||
netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr);
|
||||
return ERR_PTR(-ENETUNREACH);
|
||||
}
|
||||
if (dst->dev == dev) { /* is this necessary? */
|
||||
if (dst_dev(dst) == dev) { /* is this necessary? */
|
||||
netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr);
|
||||
dst_release(dst);
|
||||
return ERR_PTR(-ELOOP);
|
||||
|
|
|
@ -497,7 +497,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
|
|||
(const struct in6_addr *)&x->id.daddr))
|
||||
goto tx_err_link_failure;
|
||||
|
||||
tdev = dst->dev;
|
||||
tdev = dst_dev(dst);
|
||||
|
||||
if (tdev == dev) {
|
||||
DEV_STATS_INC(dev, collisions);
|
||||
|
|
|
@ -473,6 +473,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
|
|||
{
|
||||
struct icmp6hdr *icmp6h = icmp6_hdr(skb);
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *dev;
|
||||
struct inet6_dev *idev;
|
||||
struct net *net;
|
||||
struct sock *sk;
|
||||
|
@ -507,11 +508,12 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
|
|||
|
||||
ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
|
||||
|
||||
idev = __in6_dev_get(dst->dev);
|
||||
dev = dst_dev(dst);
|
||||
idev = __in6_dev_get(dev);
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
|
||||
|
||||
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
|
||||
net, sk, skb, NULL, dst->dev,
|
||||
net, sk, skb, NULL, dev,
|
||||
dst_output);
|
||||
if (!err) {
|
||||
ICMP6MSGOUT_INC_STATS(net, idev, type);
|
||||
|
|
|
@ -38,7 +38,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
|
|||
}
|
||||
skb_dst_drop(skb);
|
||||
skb_dst_set(skb, dst);
|
||||
skb->dev = dst->dev;
|
||||
skb->dev = dst_dev(dst);
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
|
||||
return true;
|
||||
|
|
|
@ -105,7 +105,7 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
|
|||
{
|
||||
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
|
||||
if (hoplimit == 0) {
|
||||
struct net_device *dev = dst->dev;
|
||||
struct net_device *dev = dst_dev(dst);
|
||||
struct inet6_dev *idev;
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
|
@ -228,13 +228,13 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
|
|||
const struct rt6_info *rt = dst_rt6_info(dst);
|
||||
|
||||
return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
|
||||
dst->dev, skb, daddr);
|
||||
dst_dev(dst), skb, daddr);
|
||||
}
|
||||
|
||||
static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
|
||||
{
|
||||
const struct rt6_info *rt = dst_rt6_info(dst);
|
||||
struct net_device *dev = dst->dev;
|
||||
struct net_device *dev = dst_dev(dst);
|
||||
|
||||
daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
|
||||
if (!daddr)
|
||||
|
@ -2943,7 +2943,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
|||
|
||||
if (res.f6i->nh) {
|
||||
struct fib6_nh_match_arg arg = {
|
||||
.dev = dst->dev,
|
||||
.dev = dst_dev(dst),
|
||||
.gw = &rt6->rt6i_gateway,
|
||||
};
|
||||
|
||||
|
@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(ip6_sk_redirect);
|
|||
|
||||
static unsigned int ip6_default_advmss(const struct dst_entry *dst)
|
||||
{
|
||||
struct net_device *dev = dst->dev;
|
||||
struct net_device *dev = dst_dev(dst);
|
||||
unsigned int mtu = dst_mtu(dst);
|
||||
struct net *net;
|
||||
|
||||
|
@ -4301,7 +4301,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
|
|||
|
||||
if (res.f6i->nh) {
|
||||
struct fib6_nh_match_arg arg = {
|
||||
.dev = dst->dev,
|
||||
.dev = dst_dev(dst),
|
||||
.gw = &rt->rt6i_gateway,
|
||||
};
|
||||
|
||||
|
@ -4587,13 +4587,14 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
|
|||
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
struct net_device *dev = dst_dev(dst);
|
||||
struct net *net = dev_net(dev);
|
||||
struct inet6_dev *idev;
|
||||
SKB_DR(reason);
|
||||
int type;
|
||||
|
||||
if (netif_is_l3_master(skb->dev) ||
|
||||
dst->dev == net->loopback_dev)
|
||||
dev == net->loopback_dev)
|
||||
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
|
||||
else
|
||||
idev = ip6_dst_idev(dst);
|
||||
|
@ -5844,11 +5845,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
|
|||
* each as a nexthop within RTA_MULTIPATH.
|
||||
*/
|
||||
if (rt6) {
|
||||
struct net_device *dev;
|
||||
|
||||
if (rt6_flags & RTF_GATEWAY &&
|
||||
nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
|
||||
dev = dst_dev(dst);
|
||||
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
|
||||
|
|
|
@ -242,7 +242,7 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
local_bh_enable();
|
||||
}
|
||||
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
|
||||
if (unlikely(err))
|
||||
goto drop;
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ static int rpl_input(struct sk_buff *skb)
|
|||
local_bh_enable();
|
||||
}
|
||||
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
|
||||
if (unlikely(err))
|
||||
goto drop;
|
||||
} else {
|
||||
|
|
|
@ -128,7 +128,8 @@ static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
|
|||
int proto, struct dst_entry *cache_dst)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
struct net_device *dev = dst_dev(dst);
|
||||
struct net *net = dev_net(dev);
|
||||
struct ipv6hdr *hdr, *inner_hdr;
|
||||
struct ipv6_sr_hdr *isrh;
|
||||
int hdrlen, tot_len, err;
|
||||
|
@ -181,7 +182,7 @@ static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
|
|||
isrh->nexthdr = proto;
|
||||
|
||||
hdr->daddr = isrh->segments[isrh->first_segment];
|
||||
set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
|
||||
set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
|
||||
|
||||
#ifdef CONFIG_IPV6_SEG6_HMAC
|
||||
if (sr_has_hmac(isrh)) {
|
||||
|
@ -212,7 +213,8 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
|
|||
{
|
||||
__u8 first_seg = osrh->first_segment;
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
struct net_device *dev = dst_dev(dst);
|
||||
struct net *net = dev_net(dev);
|
||||
struct ipv6hdr *hdr, *inner_hdr;
|
||||
int hdrlen = ipv6_optlen(osrh);
|
||||
int red_tlv_offset, tlv_offset;
|
||||
|
@ -270,7 +272,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
|
|||
if (skip_srh) {
|
||||
hdr->nexthdr = proto;
|
||||
|
||||
set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
|
||||
set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -306,7 +308,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
|
|||
|
||||
srcaddr:
|
||||
isrh->nexthdr = proto;
|
||||
set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
|
||||
set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
|
||||
|
||||
#ifdef CONFIG_IPV6_SEG6_HMAC
|
||||
if (unlikely(!skip_srh && sr_has_hmac(isrh))) {
|
||||
|
@ -507,7 +509,7 @@ static int seg6_input_core(struct net *net, struct sock *sk,
|
|||
local_bh_enable();
|
||||
}
|
||||
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
|
||||
if (unlikely(err))
|
||||
goto drop;
|
||||
} else {
|
||||
|
@ -518,7 +520,7 @@ static int seg6_input_core(struct net *net, struct sock *sk,
|
|||
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
|
||||
dev_net(skb->dev), NULL, skb, NULL,
|
||||
skb_dst(skb)->dev, seg6_input_finish);
|
||||
skb_dst_dev(skb), seg6_input_finish);
|
||||
|
||||
return seg6_input_finish(dev_net(skb->dev), NULL, skb);
|
||||
drop:
|
||||
|
@ -593,7 +595,7 @@ static int seg6_output_core(struct net *net, struct sock *sk,
|
|||
local_bh_enable();
|
||||
}
|
||||
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
|
||||
if (unlikely(err))
|
||||
goto drop;
|
||||
}
|
||||
|
@ -603,7 +605,7 @@ static int seg6_output_core(struct net *net, struct sock *sk,
|
|||
|
||||
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
|
||||
NULL, skb_dst(skb)->dev, dst_output);
|
||||
NULL, dst_dev(dst), dst_output);
|
||||
|
||||
return dst_output(net, sk, skb);
|
||||
drop:
|
||||
|
|
|
@ -313,7 +313,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
|
|||
if (!local_delivery)
|
||||
dev_flags |= IFF_LOOPBACK;
|
||||
|
||||
if (dst && (dst->dev->flags & dev_flags) && !dst->error) {
|
||||
if (dst && (dst_dev(dst)->flags & dev_flags) && !dst->error) {
|
||||
dst_release(dst);
|
||||
dst = NULL;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue