linux/net/ax25/ax25_out.c
Eric Dumazet 95fc45d1de ax25: rcu protect dev->ax25_ptr
syzbot found a lockdep issue [1].

We should remove ax25 RTNL dependency in ax25_setsockopt()

This should also fix a variety of possible UAF in ax25.

[1]

WARNING: possible circular locking dependency detected
6.13.0-rc3-syzkaller-00762-g9268abe611b0 #0 Not tainted
------------------------------------------------------
syz.5.1818/12806 is trying to acquire lock:
 ffffffff8fcb3988 (rtnl_mutex){+.+.}-{4:4}, at: ax25_setsockopt+0xa55/0xe90 net/ax25/af_ax25.c:680

but task is already holding lock:
 ffff8880617ac258 (sk_lock-AF_AX25){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1618 [inline]
 ffff8880617ac258 (sk_lock-AF_AX25){+.+.}-{0:0}, at: ax25_setsockopt+0x209/0xe90 net/ax25/af_ax25.c:574

which lock already depends on the new lock.

the existing dependency chain (in reverse order) is:

-> #1 (sk_lock-AF_AX25){+.+.}-{0:0}:
        lock_acquire+0x1ed/0x550 kernel/locking/lockdep.c:5849
        lock_sock_nested+0x48/0x100 net/core/sock.c:3642
        lock_sock include/net/sock.h:1618 [inline]
        ax25_kill_by_device net/ax25/af_ax25.c:101 [inline]
        ax25_device_event+0x24d/0x580 net/ax25/af_ax25.c:146
        notifier_call_chain+0x1a5/0x3f0 kernel/notifier.c:85
       __dev_notify_flags+0x207/0x400
        dev_change_flags+0xf0/0x1a0 net/core/dev.c:9026
        dev_ifsioc+0x7c8/0xe70 net/core/dev_ioctl.c:563
        dev_ioctl+0x719/0x1340 net/core/dev_ioctl.c:820
        sock_do_ioctl+0x240/0x460 net/socket.c:1234
        sock_ioctl+0x626/0x8e0 net/socket.c:1339
        vfs_ioctl fs/ioctl.c:51 [inline]
        __do_sys_ioctl fs/ioctl.c:906 [inline]
        __se_sys_ioctl+0xf5/0x170 fs/ioctl.c:892
        do_syscall_x64 arch/x86/entry/common.c:52 [inline]
        do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #0 (rtnl_mutex){+.+.}-{4:4}:
        check_prev_add kernel/locking/lockdep.c:3161 [inline]
        check_prevs_add kernel/locking/lockdep.c:3280 [inline]
        validate_chain+0x18ef/0x5920 kernel/locking/lockdep.c:3904
        __lock_acquire+0x1397/0x2100 kernel/locking/lockdep.c:5226
        lock_acquire+0x1ed/0x550 kernel/locking/lockdep.c:5849
        __mutex_lock_common kernel/locking/mutex.c:585 [inline]
        __mutex_lock+0x1ac/0xee0 kernel/locking/mutex.c:735
        ax25_setsockopt+0xa55/0xe90 net/ax25/af_ax25.c:680
        do_sock_setsockopt+0x3af/0x720 net/socket.c:2324
        __sys_setsockopt net/socket.c:2349 [inline]
        __do_sys_setsockopt net/socket.c:2355 [inline]
        __se_sys_setsockopt net/socket.c:2352 [inline]
        __x64_sys_setsockopt+0x1ee/0x280 net/socket.c:2352
        do_syscall_x64 arch/x86/entry/common.c:52 [inline]
        do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

other info that might help us debug this:

 Possible unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  lock(sk_lock-AF_AX25);
                               lock(rtnl_mutex);
                               lock(sk_lock-AF_AX25);
  lock(rtnl_mutex);

 *** DEADLOCK ***

1 lock held by syz.5.1818/12806:
  #0: ffff8880617ac258 (sk_lock-AF_AX25){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1618 [inline]
  #0: ffff8880617ac258 (sk_lock-AF_AX25){+.+.}-{0:0}, at: ax25_setsockopt+0x209/0xe90 net/ax25/af_ax25.c:574

stack backtrace:
CPU: 1 UID: 0 PID: 12806 Comm: syz.5.1818 Not tainted 6.13.0-rc3-syzkaller-00762-g9268abe611b0 #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024
Call Trace:
 <TASK>
  __dump_stack lib/dump_stack.c:94 [inline]
  dump_stack_lvl+0x241/0x360 lib/dump_stack.c:120
  print_circular_bug+0x13a/0x1b0 kernel/locking/lockdep.c:2074
  check_noncircular+0x36a/0x4a0 kernel/locking/lockdep.c:2206
  check_prev_add kernel/locking/lockdep.c:3161 [inline]
  check_prevs_add kernel/locking/lockdep.c:3280 [inline]
  validate_chain+0x18ef/0x5920 kernel/locking/lockdep.c:3904
  __lock_acquire+0x1397/0x2100 kernel/locking/lockdep.c:5226
  lock_acquire+0x1ed/0x550 kernel/locking/lockdep.c:5849
  __mutex_lock_common kernel/locking/mutex.c:585 [inline]
  __mutex_lock+0x1ac/0xee0 kernel/locking/mutex.c:735
  ax25_setsockopt+0xa55/0xe90 net/ax25/af_ax25.c:680
  do_sock_setsockopt+0x3af/0x720 net/socket.c:2324
  __sys_setsockopt net/socket.c:2349 [inline]
  __do_sys_setsockopt net/socket.c:2355 [inline]
  __se_sys_setsockopt net/socket.c:2352 [inline]
  __x64_sys_setsockopt+0x1ee/0x280 net/socket.c:2352
  do_syscall_x64 arch/x86/entry/common.c:52 [inline]
  do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
 entry_SYSCALL_64_after_hwframe+0x77/0x7f
RIP: 0033:0x7f7b62385d29

Fixes: c433570458 ("ax25: fix a use-after-free in ax25_fillin_cb()")
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250103210514.87290-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-01-06 15:57:01 -08:00

398 lines
8.7 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/spinlock.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static DEFINE_SPINLOCK(ax25_frag_lock);
ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_cb *ax25;
/*
* Take the default packet length for the device if zero is
* specified.
*/
if (paclen == 0) {
rcu_read_lock();
ax25_dev = ax25_dev_ax25dev(dev);
if (!ax25_dev) {
rcu_read_unlock();
return NULL;
}
paclen = ax25_dev->values[AX25_VALUES_PACLEN];
rcu_read_unlock();
}
/*
* Look for an existing connection.
*/
if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
ax25_output(ax25, paclen, skb);
return ax25; /* It already existed */
}
rcu_read_lock();
ax25_dev = ax25_dev_ax25dev(dev);
if (!ax25_dev) {
rcu_read_unlock();
return NULL;
}
if ((ax25 = ax25_create_cb()) == NULL) {
rcu_read_unlock();
return NULL;
}
ax25_fillin_cb(ax25, ax25_dev);
rcu_read_unlock();
ax25->source_addr = *src;
ax25->dest_addr = *dest;
if (digi != NULL) {
ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
if (ax25->digipeat == NULL) {
ax25_cb_put(ax25);
return NULL;
}
}
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_establish_data_link(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (ax25_dev->dama.slave)
ax25_ds_establish_data_link(ax25);
else
ax25_std_establish_data_link(ax25);
break;
#endif
}
/*
* There is one ref for the state machine; a caller needs
* one more to put it back, just like with the existing one.
*/
ax25_cb_hold(ax25);
ax25_cb_add(ax25);
ax25->state = AX25_STATE_1;
ax25_start_heartbeat(ax25);
ax25_output(ax25, paclen, skb);
return ax25; /* We had to create it */
}
EXPORT_SYMBOL(ax25_send_frame);
/*
* All outgoing AX.25 I frames pass via this routine. Therefore this is
* where the fragmentation of frames takes place. If fragment is set to
* zero then we are not allowed to do fragmentation, even if the frame
* is too large.
*/
void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
{
struct sk_buff *skbn;
unsigned char *p;
int frontlen, len, fragno, ka9qfrag, first = 1;
if (paclen < 16) {
WARN_ON_ONCE(1);
kfree_skb(skb);
return;
}
if ((skb->len - 1) > paclen) {
if (*skb->data == AX25_P_TEXT) {
skb_pull(skb, 1); /* skip PID */
ka9qfrag = 0;
} else {
paclen -= 2; /* Allow for fragment control info */
ka9qfrag = 1;
}
fragno = skb->len / paclen;
if (skb->len % paclen == 0) fragno--;
frontlen = skb_headroom(skb); /* Address space + CTRL */
while (skb->len > 0) {
spin_lock_bh(&ax25_frag_lock);
if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
spin_unlock_bh(&ax25_frag_lock);
printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
return;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
spin_unlock_bh(&ax25_frag_lock);
len = (paclen > skb->len) ? skb->len : paclen;
if (ka9qfrag == 1) {
skb_reserve(skbn, frontlen + 2);
skb_set_network_header(skbn,
skb_network_offset(skb));
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
p = skb_push(skbn, 2);
*p++ = AX25_P_SEGMENT;
*p = fragno--;
if (first) {
*p |= AX25_SEG_FIRST;
first = 0;
}
} else {
skb_reserve(skbn, frontlen + 1);
skb_set_network_header(skbn,
skb_network_offset(skb));
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
p = skb_push(skbn, 1);
*p = AX25_P_TEXT;
}
skb_pull(skb, len);
skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
}
kfree_skb(skb);
} else {
skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
}
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_kick(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
/*
* A DAMA slave is _required_ to work as normal AX.25L2V2
* if no DAMA master is available.
*/
case AX25_PROTO_DAMA_SLAVE:
if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
break;
#endif
}
}
/*
* This procedure is passed a buffer descriptor for an iframe. It builds
* the rest of the control part of the frame and then writes it out.
*/
static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
{
unsigned char *frame;
if (skb == NULL)
return;
skb_reset_network_header(skb);
if (ax25->modulus == AX25_MODULUS) {
frame = skb_push(skb, 1);
*frame = AX25_I;
*frame |= (poll_bit) ? AX25_PF : 0;
*frame |= (ax25->vr << 5);
*frame |= (ax25->vs << 1);
} else {
frame = skb_push(skb, 2);
frame[0] = AX25_I;
frame[0] |= (ax25->vs << 1);
frame[1] = (poll_bit) ? AX25_EPF : 0;
frame[1] |= (ax25->vr << 1);
}
ax25_start_idletimer(ax25);
ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
}
void ax25_kick(ax25_cb *ax25)
{
struct sk_buff *skb, *skbn;
int last = 1;
unsigned short start, end, next;
if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
return;
if (ax25->condition & AX25_COND_PEER_RX_BUSY)
return;
if (skb_peek(&ax25->write_queue) == NULL)
return;
start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
end = (ax25->va + ax25->window) % ax25->modulus;
if (start == end)
return;
/*
* Transmit data until either we're out of data to send or
* the window is full. Send a poll on the final I frame if
* the window is filled.
*/
/*
* Dequeue the frame and copy it.
* Check for race with ax25_clear_queues().
*/
skb = skb_dequeue(&ax25->write_queue);
if (!skb)
return;
ax25->vs = start;
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&ax25->write_queue, skb);
break;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
next = (ax25->vs + 1) % ax25->modulus;
last = (next == end);
/*
* Transmit the frame copy.
* bke 960114: do not set the Poll bit on the last frame
* in DAMA mode.
*/
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
break;
#endif
}
ax25->vs = next;
/*
* Requeue the original data frame.
*/
skb_queue_tail(&ax25->ack_queue, skb);
} while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
ax25->condition &= ~AX25_COND_ACK_PENDING;
if (!ax25_t1timer_running(ax25)) {
ax25_stop_t3timer(ax25);
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}
}
void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
{
unsigned char *ptr;
int headroom;
if (ax25->ax25_dev == NULL) {
ax25_disconnect(ax25, ENETUNREACH);
return;
}
headroom = ax25_addr_size(ax25->digipeat);
if (unlikely(skb_headroom(skb) < headroom)) {
skb = skb_expand_head(skb, headroom);
if (!skb) {
printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
return;
}
}
ptr = skb_push(skb, headroom);
ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
ax25_queue_xmit(skb, ax25->ax25_dev->dev);
}
/*
* A small shim to dev_queue_xmit to add the KISS control byte, and do
* any packet forwarding in operation.
*/
void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned char *ptr;
rcu_read_lock();
skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
rcu_read_unlock();
ptr = skb_push(skb, 1);
*ptr = 0x00; /* KISS */
dev_queue_xmit(skb);
}
int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
{
if (ax25->vs == nr) {
ax25_frames_acked(ax25, nr);
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
return 1;
} else {
if (ax25->va != nr) {
ax25_frames_acked(ax25, nr);
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
return 1;
}
}
return 0;
}