2017-02-06 16:55:42 -05:00
|
|
|
/* Broadcom NetXtreme-C/E network driver.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2016-2017 Broadcom Limited
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
#include <linux/bpf_trace.h>
|
|
|
|
#include <linux/filter.h>
|
2025-03-07 10:30:06 -08:00
|
|
|
#include <net/netdev_lock.h>
|
2023-08-04 20:05:24 +02:00
|
|
|
#include <net/page_pool/helpers.h>
|
2025-07-14 13:02:02 -04:00
|
|
|
#include <linux/bnxt/hsi.h>
|
2017-02-06 16:55:42 -05:00
|
|
|
#include "bnxt.h"
|
|
|
|
#include "bnxt_xdp.h"
|
|
|
|
|
2022-04-01 20:21:10 -04:00
|
|
|
DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
|
|
|
|
|
2019-07-08 17:53:02 -04:00
|
|
|
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
|
|
|
|
struct bnxt_tx_ring_info *txr,
|
2022-04-08 03:59:05 -04:00
|
|
|
dma_addr_t mapping, u32 len,
|
|
|
|
struct xdp_buff *xdp)
|
2017-02-06 16:55:43 -05:00
|
|
|
{
|
2022-04-08 03:59:05 -04:00
|
|
|
struct skb_shared_info *sinfo;
|
2022-07-11 22:26:17 -04:00
|
|
|
struct bnxt_sw_tx_bd *tx_buf;
|
2017-02-06 16:55:43 -05:00
|
|
|
struct tx_bd *txbd;
|
2022-04-08 03:59:05 -04:00
|
|
|
int num_frags = 0;
|
2017-02-06 16:55:43 -05:00
|
|
|
u32 flags;
|
|
|
|
u16 prod;
|
2022-04-08 03:59:05 -04:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (xdp && xdp_buff_has_frags(xdp)) {
|
|
|
|
sinfo = xdp_get_shared_info_from_buff(xdp);
|
|
|
|
num_frags = sinfo->nr_frags;
|
|
|
|
}
|
2017-02-06 16:55:43 -05:00
|
|
|
|
2022-04-08 03:59:05 -04:00
|
|
|
/* fill up the first buffer */
|
2017-02-06 16:55:43 -05:00
|
|
|
prod = txr->tx_prod;
|
2023-11-20 15:44:02 -08:00
|
|
|
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
|
2022-04-08 03:59:05 -04:00
|
|
|
tx_buf->nr_frags = num_frags;
|
|
|
|
if (xdp)
|
|
|
|
tx_buf->page = virt_to_head_page(xdp->data);
|
2017-02-06 16:55:43 -05:00
|
|
|
|
2023-11-20 15:44:02 -08:00
|
|
|
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
|
2025-03-21 14:16:38 -07:00
|
|
|
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) |
|
2022-07-11 22:26:17 -04:00
|
|
|
bnxt_lhint_arr[len >> 9];
|
2017-02-06 16:55:43 -05:00
|
|
|
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
|
2023-11-13 16:16:15 -08:00
|
|
|
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
|
2017-02-06 16:55:43 -05:00
|
|
|
txbd->tx_bd_haddr = cpu_to_le64(mapping);
|
|
|
|
|
2022-04-08 03:59:05 -04:00
|
|
|
/* now let us fill up the frags into the next buffers */
|
|
|
|
for (i = 0; i < num_frags ; i++) {
|
|
|
|
skb_frag_t *frag = &sinfo->frags[i];
|
|
|
|
struct bnxt_sw_tx_bd *frag_tx_buf;
|
|
|
|
dma_addr_t frag_mapping;
|
|
|
|
int frag_len;
|
|
|
|
|
|
|
|
prod = NEXT_TX(prod);
|
2023-04-11 18:50:37 -07:00
|
|
|
WRITE_ONCE(txr->tx_prod, prod);
|
2022-04-08 03:59:05 -04:00
|
|
|
|
|
|
|
/* first fill up the first buffer */
|
2023-11-20 15:44:02 -08:00
|
|
|
frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
|
2022-04-08 03:59:05 -04:00
|
|
|
frag_tx_buf->page = skb_frag_page(frag);
|
|
|
|
|
2023-11-20 15:44:02 -08:00
|
|
|
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
|
2022-04-08 03:59:05 -04:00
|
|
|
|
|
|
|
frag_len = skb_frag_size(frag);
|
|
|
|
flags = frag_len << TX_BD_LEN_SHIFT;
|
|
|
|
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
|
2023-12-14 13:31:38 -08:00
|
|
|
frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
|
|
|
|
skb_frag_off(frag);
|
2022-04-08 03:59:05 -04:00
|
|
|
txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
|
|
|
|
|
|
|
|
len = frag_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
flags &= ~TX_BD_LEN;
|
|
|
|
txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
|
|
|
|
TX_BD_FLAGS_PACKET_END);
|
|
|
|
/* Sync TX BD */
|
|
|
|
wmb();
|
2017-02-06 16:55:43 -05:00
|
|
|
prod = NEXT_TX(prod);
|
2023-04-11 18:50:37 -07:00
|
|
|
WRITE_ONCE(txr->tx_prod, prod);
|
2022-04-08 03:59:05 -04:00
|
|
|
|
2022-07-11 22:26:17 -04:00
|
|
|
return tx_buf;
|
2019-07-08 17:53:02 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
|
2022-04-08 03:59:05 -04:00
|
|
|
dma_addr_t mapping, u32 len, u16 rx_prod,
|
|
|
|
struct xdp_buff *xdp)
|
2019-07-08 17:53:02 -04:00
|
|
|
{
|
|
|
|
struct bnxt_sw_tx_bd *tx_buf;
|
|
|
|
|
2022-04-08 03:59:05 -04:00
|
|
|
tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
|
2019-07-08 17:53:02 -04:00
|
|
|
tx_buf->rx_prod = rx_prod;
|
|
|
|
tx_buf->action = XDP_TX;
|
2022-04-08 03:59:05 -04:00
|
|
|
|
2017-02-06 16:55:43 -05:00
|
|
|
}
|
|
|
|
|
2019-07-08 17:53:03 -04:00
|
|
|
static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
|
|
|
|
struct bnxt_tx_ring_info *txr,
|
|
|
|
dma_addr_t mapping, u32 len,
|
|
|
|
struct xdp_frame *xdpf)
|
|
|
|
{
|
|
|
|
struct bnxt_sw_tx_bd *tx_buf;
|
|
|
|
|
2022-04-08 03:59:05 -04:00
|
|
|
tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
|
2019-07-08 17:53:03 -04:00
|
|
|
tx_buf->action = XDP_REDIRECT;
|
|
|
|
tx_buf->xdpf = xdpf;
|
|
|
|
dma_unmap_addr_set(tx_buf, mapping, mapping);
|
2025-07-10 14:39:38 -07:00
|
|
|
dma_unmap_len_set(tx_buf, len, len);
|
2019-07-08 17:53:03 -04:00
|
|
|
}
|
|
|
|
|
2023-07-28 13:50:20 -07:00
|
|
|
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
|
2017-02-06 16:55:43 -05:00
|
|
|
{
|
2023-11-13 16:16:17 -08:00
|
|
|
struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
|
2017-02-06 16:55:43 -05:00
|
|
|
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
|
2023-11-13 16:16:10 -08:00
|
|
|
u16 tx_hw_cons = txr->tx_hw_cons;
|
2019-07-08 17:53:02 -04:00
|
|
|
bool rx_doorbell_needed = false;
|
2017-02-06 16:55:43 -05:00
|
|
|
struct bnxt_sw_tx_bd *tx_buf;
|
|
|
|
u16 tx_cons = txr->tx_cons;
|
|
|
|
u16 last_tx_cons = tx_cons;
|
2023-11-13 16:16:10 -08:00
|
|
|
int j, frags;
|
2017-02-06 16:55:43 -05:00
|
|
|
|
2023-07-28 13:50:20 -07:00
|
|
|
if (!budget)
|
|
|
|
return;
|
|
|
|
|
2023-11-20 15:44:02 -08:00
|
|
|
while (RING_TX(bp, tx_cons) != tx_hw_cons) {
|
|
|
|
tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
|
2019-07-08 17:53:02 -04:00
|
|
|
|
2019-07-08 17:53:03 -04:00
|
|
|
if (tx_buf->action == XDP_REDIRECT) {
|
|
|
|
struct pci_dev *pdev = bp->pdev;
|
|
|
|
|
|
|
|
dma_unmap_single(&pdev->dev,
|
|
|
|
dma_unmap_addr(tx_buf, mapping),
|
|
|
|
dma_unmap_len(tx_buf, len),
|
net: broadcom: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-22 07:59:44 +02:00
|
|
|
DMA_TO_DEVICE);
|
2019-07-08 17:53:03 -04:00
|
|
|
xdp_return_frame(tx_buf->xdpf);
|
|
|
|
tx_buf->action = 0;
|
|
|
|
tx_buf->xdpf = NULL;
|
|
|
|
} else if (tx_buf->action == XDP_TX) {
|
2023-07-19 18:04:40 -07:00
|
|
|
tx_buf->action = 0;
|
2019-07-08 17:53:02 -04:00
|
|
|
rx_doorbell_needed = true;
|
|
|
|
last_tx_cons = tx_cons;
|
2022-04-08 03:59:05 -04:00
|
|
|
|
|
|
|
frags = tx_buf->nr_frags;
|
|
|
|
for (j = 0; j < frags; j++) {
|
|
|
|
tx_cons = NEXT_TX(tx_cons);
|
2023-11-20 15:44:02 -08:00
|
|
|
tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
|
2022-04-08 03:59:05 -04:00
|
|
|
page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
|
|
|
|
}
|
2023-07-19 18:04:40 -07:00
|
|
|
} else {
|
2023-11-13 16:16:10 -08:00
|
|
|
bnxt_sched_reset_txr(bp, txr, tx_cons);
|
2023-07-19 18:04:40 -07:00
|
|
|
return;
|
2019-07-08 17:53:02 -04:00
|
|
|
}
|
2017-02-06 16:55:43 -05:00
|
|
|
tx_cons = NEXT_TX(tx_cons);
|
|
|
|
}
|
2023-07-28 13:50:20 -07:00
|
|
|
|
2023-11-13 16:16:10 -08:00
|
|
|
bnapi->events &= ~BNXT_TX_CMP_EVENT;
|
2023-04-11 18:50:37 -07:00
|
|
|
WRITE_ONCE(txr->tx_cons, tx_cons);
|
2019-07-08 17:53:02 -04:00
|
|
|
if (rx_doorbell_needed) {
|
2023-12-11 16:51:12 -08:00
|
|
|
tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)];
|
2019-07-08 17:53:02 -04:00
|
|
|
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
|
2022-04-08 03:59:05 -04:00
|
|
|
|
2017-02-06 16:55:43 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-08 03:58:56 -04:00
|
|
|
bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
|
|
|
{
|
|
|
|
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
|
|
|
|
|
|
|
|
return !!xdp_prog;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
2022-12-26 22:19:37 -05:00
|
|
|
u16 cons, u8 *data_ptr, unsigned int len,
|
2022-04-08 03:58:56 -04:00
|
|
|
struct xdp_buff *xdp)
|
|
|
|
{
|
2023-07-31 07:20:42 -07:00
|
|
|
u32 buflen = BNXT_RX_PAGE_SIZE;
|
2022-04-08 03:58:56 -04:00
|
|
|
struct bnxt_sw_rx_bd *rx_buf;
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
dma_addr_t mapping;
|
|
|
|
u32 offset;
|
|
|
|
|
|
|
|
pdev = bp->pdev;
|
|
|
|
rx_buf = &rxr->rx_buf_ring[cons];
|
|
|
|
offset = bp->rx_offset;
|
|
|
|
|
|
|
|
mapping = rx_buf->mapping - bp->rx_dma_offset;
|
2022-12-26 22:19:37 -05:00
|
|
|
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
|
2022-04-08 03:58:56 -04:00
|
|
|
|
2022-08-22 11:06:51 -04:00
|
|
|
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
|
2024-04-02 02:37:51 -07:00
|
|
|
xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true);
|
2022-04-08 03:58:56 -04:00
|
|
|
}
|
|
|
|
|
2022-04-08 03:59:05 -04:00
|
|
|
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
|
|
|
|
struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
struct skb_shared_info *shinfo;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!xdp || !xdp_buff_has_frags(xdp))
|
|
|
|
return;
|
|
|
|
shinfo = xdp_get_shared_info_from_buff(xdp);
|
|
|
|
for (i = 0; i < shinfo->nr_frags; i++) {
|
|
|
|
struct page *page = skb_frag_page(&shinfo->frags[i]);
|
|
|
|
|
|
|
|
page_pool_recycle_direct(rxr->page_pool, page);
|
|
|
|
}
|
|
|
|
shinfo->nr_frags = 0;
|
|
|
|
}
|
|
|
|
|
2017-02-06 16:55:42 -05:00
|
|
|
/* returns the following:
|
|
|
|
* true - packet consumed by XDP and new buffer is allocated.
|
|
|
|
* false - packet should be passed to the stack.
|
|
|
|
*/
|
|
|
|
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
|
2024-04-02 02:37:50 -07:00
|
|
|
struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
|
2022-12-26 22:19:38 -05:00
|
|
|
unsigned int *len, u8 *event)
|
2017-02-06 16:55:42 -05:00
|
|
|
{
|
|
|
|
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
|
2017-02-06 16:55:43 -05:00
|
|
|
struct bnxt_tx_ring_info *txr;
|
2017-02-06 16:55:42 -05:00
|
|
|
struct bnxt_sw_rx_bd *rx_buf;
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
dma_addr_t mapping;
|
2022-04-08 03:59:05 -04:00
|
|
|
u32 tx_needed = 1;
|
2017-02-06 16:55:42 -05:00
|
|
|
void *orig_data;
|
2017-02-06 16:55:43 -05:00
|
|
|
u32 tx_avail;
|
2017-02-06 16:55:42 -05:00
|
|
|
u32 offset;
|
|
|
|
u32 act;
|
|
|
|
|
|
|
|
if (!xdp_prog)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
pdev = bp->pdev;
|
|
|
|
offset = bp->rx_offset;
|
|
|
|
|
2023-11-13 16:16:17 -08:00
|
|
|
txr = rxr->bnapi->tx_ring[0];
|
2020-12-22 22:09:28 +01:00
|
|
|
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
|
2024-04-02 02:37:50 -07:00
|
|
|
orig_data = xdp->data;
|
2017-02-06 16:55:42 -05:00
|
|
|
|
2024-04-02 02:37:50 -07:00
|
|
|
act = bpf_prog_run_xdp(xdp_prog, xdp);
|
2017-02-06 16:55:42 -05:00
|
|
|
|
2017-02-06 16:55:43 -05:00
|
|
|
tx_avail = bnxt_tx_avail(bp, txr);
|
|
|
|
/* If the tx ring is not full, we must not update the rx producer yet
|
|
|
|
* because we may still be transmitting on some BDs.
|
|
|
|
*/
|
|
|
|
if (tx_avail != bp->tx_ring_size)
|
|
|
|
*event &= ~BNXT_RX_EVENT;
|
|
|
|
|
2024-04-02 02:37:50 -07:00
|
|
|
*len = xdp->data_end - xdp->data;
|
|
|
|
if (orig_data != xdp->data) {
|
|
|
|
offset = xdp->data - xdp->data_hard_start;
|
|
|
|
*data_ptr = xdp->data_hard_start + offset;
|
2022-12-26 22:19:38 -05:00
|
|
|
}
|
2022-04-08 03:58:56 -04:00
|
|
|
|
2017-02-06 16:55:42 -05:00
|
|
|
switch (act) {
|
|
|
|
case XDP_PASS:
|
|
|
|
return false;
|
|
|
|
|
2017-02-06 16:55:43 -05:00
|
|
|
case XDP_TX:
|
2022-04-08 03:58:56 -04:00
|
|
|
rx_buf = &rxr->rx_buf_ring[cons];
|
|
|
|
mapping = rx_buf->mapping - bp->rx_dma_offset;
|
2023-11-13 16:16:10 -08:00
|
|
|
*event &= BNXT_TX_CMP_EVENT;
|
2022-04-08 03:59:05 -04:00
|
|
|
|
2024-04-02 02:37:50 -07:00
|
|
|
if (unlikely(xdp_buff_has_frags(xdp))) {
|
|
|
|
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
|
2022-04-08 03:58:56 -04:00
|
|
|
|
2022-04-08 03:59:05 -04:00
|
|
|
tx_needed += sinfo->nr_frags;
|
|
|
|
*event = BNXT_AGG_EVENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx_avail < tx_needed) {
|
2017-02-06 16:55:43 -05:00
|
|
|
trace_xdp_exception(bp->dev, xdp_prog, act);
|
2024-04-02 02:37:50 -07:00
|
|
|
bnxt_xdp_buff_frags_free(rxr, xdp);
|
2017-02-06 16:55:43 -05:00
|
|
|
bnxt_reuse_rx_data(rxr, cons, page);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
|
|
|
|
bp->rx_dir);
|
2022-04-08 03:59:05 -04:00
|
|
|
|
|
|
|
*event |= BNXT_TX_EVENT;
|
2019-07-08 17:53:01 -04:00
|
|
|
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
|
2024-04-02 02:37:50 -07:00
|
|
|
NEXT_RX(rxr->rx_prod), xdp);
|
2017-02-06 16:55:43 -05:00
|
|
|
bnxt_reuse_rx_data(rxr, cons, page);
|
|
|
|
return true;
|
2019-07-08 17:53:03 -04:00
|
|
|
case XDP_REDIRECT:
|
|
|
|
/* if we are calling this here then we know that the
|
|
|
|
* redirect is coming from a frame received by the
|
|
|
|
* bnxt_en driver.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* if we are unable to allocate a new buffer, abort and reuse */
|
|
|
|
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
|
|
|
|
trace_xdp_exception(bp->dev, xdp_prog, act);
|
2024-04-02 02:37:50 -07:00
|
|
|
bnxt_xdp_buff_frags_free(rxr, xdp);
|
2019-07-08 17:53:03 -04:00
|
|
|
bnxt_reuse_rx_data(rxr, cons, page);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-04-02 02:37:50 -07:00
|
|
|
if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
|
2019-07-08 17:53:03 -04:00
|
|
|
trace_xdp_exception(bp->dev, xdp_prog, act);
|
2019-07-08 17:53:04 -04:00
|
|
|
page_pool_recycle_direct(rxr->page_pool, page);
|
2019-07-08 17:53:03 -04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
*event |= BNXT_REDIRECT_EVENT;
|
|
|
|
break;
|
2017-02-06 16:55:42 -05:00
|
|
|
default:
|
2021-11-30 11:08:07 +01:00
|
|
|
bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
|
2020-08-23 17:36:59 -05:00
|
|
|
fallthrough;
|
2017-02-06 16:55:42 -05:00
|
|
|
case XDP_ABORTED:
|
|
|
|
trace_xdp_exception(bp->dev, xdp_prog, act);
|
2020-08-23 17:36:59 -05:00
|
|
|
fallthrough;
|
2017-02-06 16:55:42 -05:00
|
|
|
case XDP_DROP:
|
2024-04-02 02:37:50 -07:00
|
|
|
bnxt_xdp_buff_frags_free(rxr, xdp);
|
2017-02-06 16:55:42 -05:00
|
|
|
bnxt_reuse_rx_data(rxr, cons, page);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-08 17:53:03 -04:00
|
|
|
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
|
|
|
struct xdp_frame **frames, u32 flags)
|
|
|
|
{
|
|
|
|
struct bnxt *bp = netdev_priv(dev);
|
|
|
|
struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
|
|
|
|
struct pci_dev *pdev = bp->pdev;
|
|
|
|
struct bnxt_tx_ring_info *txr;
|
|
|
|
dma_addr_t mapping;
|
2021-03-08 12:06:58 +01:00
|
|
|
int nxmit = 0;
|
2019-07-08 17:53:03 -04:00
|
|
|
int ring;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
|
|
|
|
!bp->tx_nr_rings_xdp ||
|
|
|
|
!xdp_prog)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
|
|
|
|
txr = &bp->tx_ring[ring];
|
|
|
|
|
2022-04-01 20:21:12 -04:00
|
|
|
if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-04-01 20:21:10 -04:00
|
|
|
if (static_branch_unlikely(&bnxt_xdp_locking_key))
|
|
|
|
spin_lock(&txr->xdp_tx_lock);
|
|
|
|
|
2019-07-08 17:53:03 -04:00
|
|
|
for (i = 0; i < num_frames; i++) {
|
|
|
|
struct xdp_frame *xdp = frames[i];
|
|
|
|
|
2022-04-01 20:21:12 -04:00
|
|
|
if (!bnxt_tx_avail(bp, txr))
|
2021-03-08 12:06:58 +01:00
|
|
|
break;
|
2019-07-08 17:53:03 -04:00
|
|
|
|
|
|
|
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
2021-03-08 12:06:58 +01:00
|
|
|
if (dma_mapping_error(&pdev->dev, mapping))
|
|
|
|
break;
|
|
|
|
|
2019-07-08 17:53:03 -04:00
|
|
|
__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
|
2021-03-08 12:06:58 +01:00
|
|
|
nxmit++;
|
2019-07-08 17:53:03 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & XDP_XMIT_FLUSH) {
|
|
|
|
/* Sync BD data before updating doorbell */
|
|
|
|
wmb();
|
|
|
|
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
|
|
|
|
}
|
|
|
|
|
2022-04-01 20:21:10 -04:00
|
|
|
if (static_branch_unlikely(&bnxt_xdp_locking_key))
|
|
|
|
spin_unlock(&txr->xdp_tx_lock);
|
|
|
|
|
2021-03-08 12:06:58 +01:00
|
|
|
return nxmit;
|
2019-07-08 17:53:03 -04:00
|
|
|
}
|
|
|
|
|
2017-02-06 16:55:42 -05:00
|
|
|
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
struct net_device *dev = bp->dev;
|
2023-11-13 16:16:18 -08:00
|
|
|
int tx_xdp = 0, tx_cp, rc, tc;
|
2017-02-06 16:55:42 -05:00
|
|
|
struct bpf_prog *old;
|
|
|
|
|
2025-03-05 08:37:32 -08:00
|
|
|
netdev_assert_locked(dev);
|
|
|
|
|
2022-04-08 03:59:06 -04:00
|
|
|
if (prog && !prog->aux->xdp_has_frags &&
|
|
|
|
bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
|
|
|
|
netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
|
2017-02-06 16:55:42 -05:00
|
|
|
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2025-01-14 14:28:49 +00:00
|
|
|
if (prog && bp->flags & BNXT_FLAG_HDS) {
|
|
|
|
netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2017-02-06 16:55:42 -05:00
|
|
|
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
|
|
|
|
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2022-12-26 22:19:39 -05:00
|
|
|
if (prog)
|
2017-02-06 16:55:42 -05:00
|
|
|
tx_xdp = bp->rx_nr_rings;
|
|
|
|
|
2024-01-17 15:45:15 -08:00
|
|
|
tc = bp->num_tc;
|
2017-02-06 16:55:42 -05:00
|
|
|
if (!tc)
|
|
|
|
tc = 1;
|
2017-08-28 13:40:26 -04:00
|
|
|
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
|
|
|
|
true, tc, tx_xdp);
|
2017-02-06 16:55:42 -05:00
|
|
|
if (rc) {
|
|
|
|
netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
if (netif_running(dev))
|
|
|
|
bnxt_close_nic(bp, true, false);
|
|
|
|
|
|
|
|
old = xchg(&bp->xdp_prog, prog);
|
|
|
|
if (old)
|
|
|
|
bpf_prog_put(old);
|
|
|
|
|
|
|
|
if (prog) {
|
|
|
|
bnxt_set_rx_skb_mode(bp, true);
|
eth: bnxt: fix deadlock when xdp is attached or detached
When xdp is attached or detached, dev->ndo_bpf() is called by
do_setlink(), and it acquires netdev_lock() if needed.
Unlike other drivers, the bnxt driver is protected by netdev_lock while
xdp is attached/detached because it sets dev->request_ops_lock to true.
So, the bnxt_xdp(), that is callback of ->ndo_bpf should not acquire
netdev_lock().
But the xdp_features_{set | clear}_redirect_target() was changed to
acquire netdev_lock() internally.
It causes a deadlock.
To fix this problem, bnxt driver should use
xdp_features_{set | clear}_redirect_target_locked() instead.
Splat looks like:
============================================
WARNING: possible recursive locking detected
6.15.0-rc6+ #1 Not tainted
--------------------------------------------
bpftool/1745 is trying to acquire lock:
ffff888131b85038 (&dev->lock){+.+.}-{4:4}, at: xdp_features_set_redirect_target+0x1f/0x80
but task is already holding lock:
ffff888131b85038 (&dev->lock){+.+.}-{4:4}, at: do_setlink.constprop.0+0x24e/0x35d0
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&dev->lock);
lock(&dev->lock);
*** DEADLOCK ***
May be due to missing lock nesting notation
3 locks held by bpftool/1745:
#0: ffffffffa56131c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_setlink+0x1fe/0x570
#1: ffffffffaafa75a0 (&net->rtnl_mutex){+.+.}-{4:4}, at: rtnl_setlink+0x236/0x570
#2: ffff888131b85038 (&dev->lock){+.+.}-{4:4}, at: do_setlink.constprop.0+0x24e/0x35d0
stack backtrace:
CPU: 1 UID: 0 PID: 1745 Comm: bpftool Not tainted 6.15.0-rc6+ #1 PREEMPT(undef)
Hardware name: ASUS System Product Name/PRIME Z690-P D4, BIOS 0603 11/01/2021
Call Trace:
<TASK>
dump_stack_lvl+0x7a/0xd0
print_deadlock_bug+0x294/0x3d0
__lock_acquire+0x153b/0x28f0
lock_acquire+0x184/0x340
? xdp_features_set_redirect_target+0x1f/0x80
__mutex_lock+0x1ac/0x18a0
? xdp_features_set_redirect_target+0x1f/0x80
? xdp_features_set_redirect_target+0x1f/0x80
? __pfx_bnxt_rx_page_skb+0x10/0x10 [bnxt_en
? __pfx___mutex_lock+0x10/0x10
? __pfx_netdev_update_features+0x10/0x10
? bnxt_set_rx_skb_mode+0x284/0x540 [bnxt_en
? __pfx_bnxt_set_rx_skb_mode+0x10/0x10 [bnxt_en
? xdp_features_set_redirect_target+0x1f/0x80
xdp_features_set_redirect_target+0x1f/0x80
bnxt_xdp+0x34e/0x730 [bnxt_en 11cbcce8fa11cff1dddd7ef358d6219e4ca9add3]
dev_xdp_install+0x3f4/0x830
? __pfx_bnxt_xdp+0x10/0x10 [bnxt_en 11cbcce8fa11cff1dddd7ef358d6219e4ca9add3]
? __pfx_dev_xdp_install+0x10/0x10
dev_xdp_attach+0x560/0xf70
dev_change_xdp_fd+0x22d/0x280
do_setlink.constprop.0+0x2989/0x35d0
? __pfx_do_setlink.constprop.0+0x10/0x10
? lock_acquire+0x184/0x340
? find_held_lock+0x32/0x90
? rtnl_setlink+0x236/0x570
? rcu_is_watching+0x11/0xb0
? trace_contention_end+0xdc/0x120
? __mutex_lock+0x946/0x18a0
? __pfx___mutex_lock+0x10/0x10
? __lock_acquire+0xa95/0x28f0
? rcu_is_watching+0x11/0xb0
? rcu_is_watching+0x11/0xb0
? cap_capable+0x172/0x350
rtnl_setlink+0x2cd/0x570
Fixes: 03df156dd3a6 ("xdp: double protect netdev->xdp_flags with netdev->lock")
Signed-off-by: Taehee Yoo <ap420073@gmail.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Reviewed-by: Michael Chan <michael.chan@broadcom.com>
Link: https://patch.msgid.link/20250520071155.2462843-1-ap420073@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-05-20 07:11:55 +00:00
|
|
|
xdp_features_set_redirect_target_locked(dev, true);
|
2017-02-06 16:55:42 -05:00
|
|
|
} else {
|
eth: bnxt: fix deadlock when xdp is attached or detached
When xdp is attached or detached, dev->ndo_bpf() is called by
do_setlink(), and it acquires netdev_lock() if needed.
Unlike other drivers, the bnxt driver is protected by netdev_lock while
xdp is attached/detached because it sets dev->request_ops_lock to true.
So, the bnxt_xdp(), that is callback of ->ndo_bpf should not acquire
netdev_lock().
But the xdp_features_{set | clear}_redirect_target() was changed to
acquire netdev_lock() internally.
It causes a deadlock.
To fix this problem, bnxt driver should use
xdp_features_{set | clear}_redirect_target_locked() instead.
Splat looks like:
============================================
WARNING: possible recursive locking detected
6.15.0-rc6+ #1 Not tainted
--------------------------------------------
bpftool/1745 is trying to acquire lock:
ffff888131b85038 (&dev->lock){+.+.}-{4:4}, at: xdp_features_set_redirect_target+0x1f/0x80
but task is already holding lock:
ffff888131b85038 (&dev->lock){+.+.}-{4:4}, at: do_setlink.constprop.0+0x24e/0x35d0
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&dev->lock);
lock(&dev->lock);
*** DEADLOCK ***
May be due to missing lock nesting notation
3 locks held by bpftool/1745:
#0: ffffffffa56131c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_setlink+0x1fe/0x570
#1: ffffffffaafa75a0 (&net->rtnl_mutex){+.+.}-{4:4}, at: rtnl_setlink+0x236/0x570
#2: ffff888131b85038 (&dev->lock){+.+.}-{4:4}, at: do_setlink.constprop.0+0x24e/0x35d0
stack backtrace:
CPU: 1 UID: 0 PID: 1745 Comm: bpftool Not tainted 6.15.0-rc6+ #1 PREEMPT(undef)
Hardware name: ASUS System Product Name/PRIME Z690-P D4, BIOS 0603 11/01/2021
Call Trace:
<TASK>
dump_stack_lvl+0x7a/0xd0
print_deadlock_bug+0x294/0x3d0
__lock_acquire+0x153b/0x28f0
lock_acquire+0x184/0x340
? xdp_features_set_redirect_target+0x1f/0x80
__mutex_lock+0x1ac/0x18a0
? xdp_features_set_redirect_target+0x1f/0x80
? xdp_features_set_redirect_target+0x1f/0x80
? __pfx_bnxt_rx_page_skb+0x10/0x10 [bnxt_en
? __pfx___mutex_lock+0x10/0x10
? __pfx_netdev_update_features+0x10/0x10
? bnxt_set_rx_skb_mode+0x284/0x540 [bnxt_en
? __pfx_bnxt_set_rx_skb_mode+0x10/0x10 [bnxt_en
? xdp_features_set_redirect_target+0x1f/0x80
xdp_features_set_redirect_target+0x1f/0x80
bnxt_xdp+0x34e/0x730 [bnxt_en 11cbcce8fa11cff1dddd7ef358d6219e4ca9add3]
dev_xdp_install+0x3f4/0x830
? __pfx_bnxt_xdp+0x10/0x10 [bnxt_en 11cbcce8fa11cff1dddd7ef358d6219e4ca9add3]
? __pfx_dev_xdp_install+0x10/0x10
dev_xdp_attach+0x560/0xf70
dev_change_xdp_fd+0x22d/0x280
do_setlink.constprop.0+0x2989/0x35d0
? __pfx_do_setlink.constprop.0+0x10/0x10
? lock_acquire+0x184/0x340
? find_held_lock+0x32/0x90
? rtnl_setlink+0x236/0x570
? rcu_is_watching+0x11/0xb0
? trace_contention_end+0xdc/0x120
? __mutex_lock+0x946/0x18a0
? __pfx___mutex_lock+0x10/0x10
? __lock_acquire+0xa95/0x28f0
? rcu_is_watching+0x11/0xb0
? rcu_is_watching+0x11/0xb0
? cap_capable+0x172/0x350
rtnl_setlink+0x2cd/0x570
Fixes: 03df156dd3a6 ("xdp: double protect netdev->xdp_flags with netdev->lock")
Signed-off-by: Taehee Yoo <ap420073@gmail.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Reviewed-by: Michael Chan <michael.chan@broadcom.com>
Link: https://patch.msgid.link/20250520071155.2462843-1-ap420073@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-05-20 07:11:55 +00:00
|
|
|
xdp_features_clear_redirect_target_locked(dev);
|
2017-02-06 16:55:42 -05:00
|
|
|
bnxt_set_rx_skb_mode(bp, false);
|
|
|
|
}
|
|
|
|
bp->tx_nr_rings_xdp = tx_xdp;
|
|
|
|
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
|
2023-11-13 16:16:18 -08:00
|
|
|
tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
|
|
|
|
bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings);
|
2017-02-06 16:55:42 -05:00
|
|
|
bnxt_set_tpa_flags(bp);
|
|
|
|
bnxt_set_ring_params(bp);
|
|
|
|
|
|
|
|
if (netif_running(dev))
|
|
|
|
return bnxt_open_nic(bp, true, false);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-03 13:56:16 -07:00
|
|
|
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
2017-02-06 16:55:42 -05:00
|
|
|
{
|
|
|
|
struct bnxt *bp = netdev_priv(dev);
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
switch (xdp->command) {
|
|
|
|
case XDP_SETUP_PROG:
|
|
|
|
rc = bnxt_xdp_set(bp, xdp->prog);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rc = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
2022-04-08 03:59:04 -04:00
|
|
|
|
|
|
|
struct sk_buff *
|
|
|
|
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
|
2025-03-09 13:42:15 +00:00
|
|
|
struct page_pool *pool, struct xdp_buff *xdp)
|
2022-04-08 03:59:04 -04:00
|
|
|
{
|
|
|
|
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
|
|
|
|
|
|
|
|
if (!skb)
|
|
|
|
return NULL;
|
2025-03-09 13:42:15 +00:00
|
|
|
|
2022-04-08 03:59:04 -04:00
|
|
|
xdp_update_skb_shared_info(skb, num_frags,
|
|
|
|
sinfo->xdp_frags_size,
|
2025-03-09 13:42:12 +00:00
|
|
|
BNXT_RX_PAGE_SIZE * num_frags,
|
2022-04-08 03:59:04 -04:00
|
|
|
xdp_buff_is_frag_pfmemalloc(xdp));
|
|
|
|
return skb;
|
|
|
|
}
|