net: usb: Convert tasklet API to new bottom half workqueue mechanism

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the usbnet driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Jun Miao <jun.miao@intel.com>
Link: https://patch.msgid.link/20250618173923.950510-1-jun.miao@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jun Miao 2025-06-18 13:39:23 -04:00 committed by Jakub Kicinski
parent deb21a6e5b
commit 2c04d279e8
2 changed files with 19 additions and 19 deletions

View file

@ -461,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
__skb_queue_tail(&dev->done, skb); __skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1) if (dev->done.qlen == 1)
tasklet_schedule(&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
spin_unlock(&dev->done.lock); spin_unlock(&dev->done.lock);
spin_unlock_irqrestore(&list->lock, flags); spin_unlock_irqrestore(&list->lock, flags);
return old_state; return old_state;
@ -549,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
default: default:
netif_dbg(dev, rx_err, dev->net, netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval); "rx submit, %d\n", retval);
tasklet_schedule (&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
break; break;
case 0: case 0:
__usbnet_queue_skb(&dev->rxq, skb, rx_start); __usbnet_queue_skb(&dev->rxq, skb, rx_start);
@ -709,7 +709,7 @@ void usbnet_resume_rx(struct usbnet *dev)
num++; num++;
} }
tasklet_schedule(&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
netif_dbg(dev, rx_status, dev->net, netif_dbg(dev, rx_status, dev->net,
"paused rx queue disabled, %d skbs requeued\n", num); "paused rx queue disabled, %d skbs requeued\n", num);
@ -778,7 +778,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)
{ {
if (netif_running(dev->net)) { if (netif_running(dev->net)) {
(void) unlink_urbs (dev, &dev->rxq); (void) unlink_urbs (dev, &dev->rxq);
tasklet_schedule(&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
} }
} }
EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@ -861,14 +861,14 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */ /* deferred work (timer, softirq, task) must also stop */
dev->flags = 0; dev->flags = 0;
timer_delete_sync(&dev->delay); timer_delete_sync(&dev->delay);
tasklet_kill(&dev->bh); disable_work_sync(&dev->bh_work);
cancel_work_sync(&dev->kevent); cancel_work_sync(&dev->kevent);
/* We have cyclic dependencies. Those calls are needed /* We have cyclic dependencies. Those calls are needed
* to break a cycle. We cannot fall into the gaps because * to break a cycle. We cannot fall into the gaps because
* we have a flag * we have a flag
*/ */
tasklet_kill(&dev->bh); disable_work_sync(&dev->bh_work);
timer_delete_sync(&dev->delay); timer_delete_sync(&dev->delay);
cancel_work_sync(&dev->kevent); cancel_work_sync(&dev->kevent);
@ -955,7 +955,7 @@ int usbnet_open (struct net_device *net)
clear_bit(EVENT_RX_KILL, &dev->flags); clear_bit(EVENT_RX_KILL, &dev->flags);
// delay posting reads until we're fully open // delay posting reads until we're fully open
tasklet_schedule (&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
if (info->manage_power) { if (info->manage_power) {
retval = info->manage_power(dev, 1); retval = info->manage_power(dev, 1);
if (retval < 0) { if (retval < 0) {
@ -1123,7 +1123,7 @@ static void __handle_link_change(struct usbnet *dev)
*/ */
} else { } else {
/* submitting URBs for reading packets */ /* submitting URBs for reading packets */
tasklet_schedule(&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
} }
/* hard_mtu or rx_urb_size may change during link change */ /* hard_mtu or rx_urb_size may change during link change */
@ -1198,11 +1198,11 @@ fail_halt:
} else { } else {
clear_bit (EVENT_RX_HALT, &dev->flags); clear_bit (EVENT_RX_HALT, &dev->flags);
if (!usbnet_going_away(dev)) if (!usbnet_going_away(dev))
tasklet_schedule(&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
} }
} }
/* tasklet could resubmit itself forever if memory is tight */ /* work could resubmit itself forever if memory is tight */
if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = NULL; struct urb *urb = NULL;
int resched = 1; int resched = 1;
@ -1224,7 +1224,7 @@ fail_halt:
fail_lowmem: fail_lowmem:
if (resched) if (resched)
if (!usbnet_going_away(dev)) if (!usbnet_going_away(dev))
tasklet_schedule(&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
} }
} }
@ -1325,7 +1325,7 @@ void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
struct usbnet *dev = netdev_priv(net); struct usbnet *dev = netdev_priv(net);
unlink_urbs (dev, &dev->txq); unlink_urbs (dev, &dev->txq);
tasklet_schedule (&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
/* this needs to be handled individually because the generic layer /* this needs to be handled individually because the generic layer
* doesn't know what is sufficient and could not restore private * doesn't know what is sufficient and could not restore private
* information if a remedy of an unconditional reset were used. * information if a remedy of an unconditional reset were used.
@ -1547,7 +1547,7 @@ static inline void usb_free_skb(struct sk_buff *skb)
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
// tasklet (work deferred from completions, in_irq) or timer // work (work deferred from completions, in_irq) or timer
static void usbnet_bh (struct timer_list *t) static void usbnet_bh (struct timer_list *t)
{ {
@ -1601,16 +1601,16 @@ static void usbnet_bh (struct timer_list *t)
"rxqlen %d --> %d\n", "rxqlen %d --> %d\n",
temp, dev->rxq.qlen); temp, dev->rxq.qlen);
if (dev->rxq.qlen < RX_QLEN(dev)) if (dev->rxq.qlen < RX_QLEN(dev))
tasklet_schedule (&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
} }
if (dev->txq.qlen < TX_QLEN (dev)) if (dev->txq.qlen < TX_QLEN (dev))
netif_wake_queue (dev->net); netif_wake_queue (dev->net);
} }
} }
static void usbnet_bh_tasklet(struct tasklet_struct *t) static void usbnet_bh_work(struct work_struct *work)
{ {
struct usbnet *dev = from_tasklet(dev, t, bh); struct usbnet *dev = from_work(dev, work, bh_work);
usbnet_bh(&dev->delay); usbnet_bh(&dev->delay);
} }
@ -1742,7 +1742,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq); skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done); skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause); skb_queue_head_init(&dev->rxq_pause);
tasklet_setup(&dev->bh, usbnet_bh_tasklet); INIT_WORK(&dev->bh_work, usbnet_bh_work);
INIT_WORK (&dev->kevent, usbnet_deferred_kevent); INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred); init_usb_anchor(&dev->deferred);
timer_setup(&dev->delay, usbnet_bh, 0); timer_setup(&dev->delay, usbnet_bh, 0);
@ -1971,7 +1971,7 @@ int usbnet_resume (struct usb_interface *intf)
if (!(dev->txq.qlen >= TX_QLEN(dev))) if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net); netif_tx_wake_all_queues(dev->net);
tasklet_schedule (&dev->bh); queue_work(system_bh_wq, &dev->bh_work);
} }
} }

View file

@ -58,7 +58,7 @@ struct usbnet {
unsigned interrupt_count; unsigned interrupt_count;
struct mutex interrupt_mutex; struct mutex interrupt_mutex;
struct usb_anchor deferred; struct usb_anchor deferred;
struct tasklet_struct bh; struct work_struct bh_work;
struct work_struct kevent; struct work_struct kevent;
unsigned long flags; unsigned long flags;