net: ti: icssg-prueth: Add XDP support

Add native XDP support. We do not support zero copy yet.

Signed-off-by: Roger Quadros <rogerq@kernel.org>
Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
Signed-off-by: Meghana Malladi <m-malladi@ti.com>
Link: https://patch.msgid.link/20250305101422.1908370-4-m-malladi@ti.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Roger Quadros 2025-03-05 15:44:22 +05:30 committed by Paolo Abeni
parent 73f7f13118
commit 62aa3246f4
3 changed files with 356 additions and 16 deletions

View file

@ -98,11 +98,20 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
{ {
struct cppi5_host_desc_t *first_desc, *next_desc; struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma; dma_addr_t buf_dma, next_desc_dma;
struct prueth_swdata *swdata;
struct page *page;
u32 buf_dma_len; u32 buf_dma_len;
first_desc = desc; first_desc = desc;
next_desc = first_desc; next_desc = first_desc;
swdata = cppi5_hdesc_get_swdata(desc);
if (swdata->type == PRUETH_SWDATA_PAGE) {
page = swdata->data.page;
page_pool_recycle_direct(page->pp, swdata->data.page);
goto free_desc;
}
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@ -126,6 +135,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
} }
free_desc:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
} }
EXPORT_SYMBOL_GPL(prueth_xmit_free); EXPORT_SYMBOL_GPL(prueth_xmit_free);
@ -139,6 +149,7 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
struct prueth_swdata *swdata; struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn; struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0; unsigned int total_bytes = 0;
struct xdp_frame *xdpf;
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t desc_dma; dma_addr_t desc_dma;
int res, num_tx = 0; int res, num_tx = 0;
@ -161,16 +172,28 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
desc_dma); desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx); swdata = cppi5_hdesc_get_swdata(desc_tx);
prueth_xmit_free(tx_chn, desc_tx);
if (swdata->type != PRUETH_SWDATA_SKB)
continue;
skb = swdata->data.skb; switch (swdata->type) {
ndev = skb->dev; case PRUETH_SWDATA_SKB:
ndev->stats.tx_packets++; skb = swdata->data.skb;
ndev->stats.tx_bytes += skb->len; dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
total_bytes += skb->len; total_bytes += skb->len;
napi_consume_skb(skb, budget); napi_consume_skb(skb, budget);
break;
case PRUETH_SWDATA_XDPF:
xdpf = swdata->data.xdpf;
dev_sw_netstats_tx_add(ndev, 1, xdpf->len);
total_bytes += xdpf->len;
xdp_return_frame(xdpf);
break;
default:
netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type);
prueth_xmit_free(tx_chn, desc_tx);
ndev->stats.tx_dropped++;
continue;
}
prueth_xmit_free(tx_chn, desc_tx);
num_tx++; num_tx++;
} }
@ -529,7 +552,153 @@ void emac_rx_timestamp(struct prueth_emac *emac,
ssh->hwtstamp = ns_to_ktime(ns); ssh->hwtstamp = ns_to_ktime(ns);
} }
static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id) /**
* emac_xmit_xdp_frame - transmits an XDP frame
* @emac: emac device
* @xdpf: data to transmit
* @page: page from page pool if already DMA mapped
* @q_idx: queue id
*
* Return: XDP state
*/
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
struct page *page,
unsigned int q_idx)
{
struct cppi5_host_desc_t *first_desc;
struct net_device *ndev = emac->ndev;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
struct prueth_swdata *swdata;
u32 *epib;
int ret;
if (q_idx >= PRUETH_MAX_TX_QUEUES) {
netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
return ICSSG_XDP_CONSUMED; /* drop */
}
tx_chn = &emac->tx_chns[q_idx];
first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
if (!first_desc) {
netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
goto drop_free_descs; /* drop */
}
if (page) { /* already DMA mapped by page_pool */
buf_dma = page_pool_get_dma_addr(page);
buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
} else { /* Map the linear buffer */
buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
netdev_err(ndev, "xdp tx: failed to map data buffer\n");
goto drop_free_descs; /* drop */
}
}
cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
PRUETH_NAV_PS_DATA_SIZE);
cppi5_hdesc_set_pkttype(first_desc, 0);
epib = first_desc->epib;
epib[0] = 0;
epib[1] = 0;
/* set dst tag to indicate internal qid at the firmware which is at
* bit8..bit15. bit0..bit7 indicates port num for directed
* packets in case of switch mode operation
*/
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
swdata = cppi5_hdesc_get_swdata(first_desc);
if (page) {
swdata->type = PRUETH_SWDATA_PAGE;
swdata->data.page = page;
} else {
swdata->type = PRUETH_SWDATA_XDPF;
swdata->data.xdpf = xdpf;
}
cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
if (ret) {
netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
goto drop_free_descs;
}
return ICSSG_XDP_TX;
drop_free_descs:
prueth_xmit_free(tx_chn, first_desc);
return ICSSG_XDP_CONSUMED;
}
EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
/**
* emac_run_xdp - run an XDP program
* @emac: emac device
* @xdp: XDP buffer containing the frame
* @page: page with RX data if already DMA mapped
* @len: Rx descriptor packet length
*
* Return: XDP state
*/
static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
struct page *page, u32 *len)
{
struct net_device *ndev = emac->ndev;
struct bpf_prog *xdp_prog;
struct xdp_frame *xdpf;
u32 pkt_len = *len;
u32 act, result;
int q_idx, err;
xdp_prog = READ_ONCE(emac->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
return ICSSG_XDP_PASS;
case XDP_TX:
/* Send packet to TX ring for immediate transmission */
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
ndev->stats.tx_dropped++;
goto drop;
}
q_idx = smp_processor_id() % emac->tx_ch_num;
result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
if (result == ICSSG_XDP_CONSUMED)
goto drop;
dev_sw_netstats_rx_add(ndev, xdpf->len);
return result;
case XDP_REDIRECT:
err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
if (err)
goto drop;
dev_sw_netstats_rx_add(ndev, pkt_len);
return ICSSG_XDP_REDIR;
default:
bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
drop:
trace_xdp_exception(emac->ndev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
ndev->stats.rx_dropped++;
page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
return ICSSG_XDP_CONSUMED;
}
}
static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
{ {
struct prueth_rx_chn *rx_chn = &emac->rx_chns; struct prueth_rx_chn *rx_chn = &emac->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0; u32 buf_dma_len, pkt_len, port_id = 0;
@ -540,10 +709,12 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
struct page *page, *new_page; struct page *page, *new_page;
struct page_pool *pool; struct page_pool *pool;
struct sk_buff *skb; struct sk_buff *skb;
struct xdp_buff xdp;
u32 *psdata; u32 *psdata;
void *pa; void *pa;
int ret; int ret;
*xdp_state = 0;
pool = rx_chn->pg_pool; pool = rx_chn->pg_pool;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
if (ret) { if (ret) {
@ -584,9 +755,21 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
goto requeue; goto requeue;
} }
/* prepare skb and send to n/w stack */
pa = page_address(page); pa = page_address(page);
skb = napi_build_skb(pa, PAGE_SIZE); if (emac->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
*xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
if (*xdp_state == ICSSG_XDP_PASS)
skb = xdp_build_skb_from_buff(&xdp);
else
goto requeue;
} else {
/* prepare skb and send to n/w stack */
skb = napi_build_skb(pa, PAGE_SIZE);
}
if (!skb) { if (!skb) {
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
page_pool_recycle_direct(pool, page); page_pool_recycle_direct(pool, page);
@ -849,13 +1032,23 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
struct prueth_tx_chn *tx_chn = data; struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx; struct cppi5_host_desc_t *desc_tx;
struct prueth_swdata *swdata; struct prueth_swdata *swdata;
struct xdp_frame *xdpf;
struct sk_buff *skb; struct sk_buff *skb;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx); swdata = cppi5_hdesc_get_swdata(desc_tx);
if (swdata->type == PRUETH_SWDATA_SKB) {
switch (swdata->type) {
case PRUETH_SWDATA_SKB:
skb = swdata->data.skb; skb = swdata->data.skb;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
break;
case PRUETH_SWDATA_XDPF:
xdpf = swdata->data.xdpf;
xdp_return_frame(xdpf);
break;
default:
break;
} }
prueth_xmit_free(tx_chn, desc_tx); prueth_xmit_free(tx_chn, desc_tx);
@ -892,15 +1085,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
int flow = emac->is_sr1 ? int flow = emac->is_sr1 ?
PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
int xdp_state_or = 0;
int num_rx = 0; int num_rx = 0;
int cur_budget; int cur_budget;
u32 xdp_state;
int ret; int ret;
while (flow--) { while (flow--) {
cur_budget = budget - num_rx; cur_budget = budget - num_rx;
while (cur_budget--) { while (cur_budget--) {
ret = emac_rx_packet(emac, flow); ret = emac_rx_packet(emac, flow, &xdp_state);
xdp_state_or |= xdp_state;
if (ret) if (ret)
break; break;
num_rx++; num_rx++;
@ -910,6 +1106,9 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
break; break;
} }
if (xdp_state_or & ICSSG_XDP_REDIR)
xdp_do_flush();
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
if (unlikely(emac->rx_pace_timeout_ns)) { if (unlikely(emac->rx_pace_timeout_ns)) {
hrtimer_start(&emac->rx_hrtimer, hrtimer_start(&emac->rx_hrtimer,

View file

@ -559,6 +559,33 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable, .perout_enable = prueth_perout_enable,
}; };
static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
{
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
struct page_pool *pool = emac->rx_chns.pg_pool;
int ret;
ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
if (ret)
return ret;
ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
if (ret)
xdp_rxq_info_unreg(rxq);
return ret;
}
static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
{
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
if (!xdp_rxq_info_is_reg(rxq))
return;
xdp_rxq_info_unreg(rxq);
}
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{ {
struct net_device *real_dev; struct net_device *real_dev;
@ -780,10 +807,14 @@ static int emac_ndo_open(struct net_device *ndev)
if (ret) if (ret)
goto free_tx_ts_irq; goto free_tx_ts_irq;
ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); ret = prueth_create_xdp_rxqs(emac);
if (ret) if (ret)
goto reset_rx_chn; goto reset_rx_chn;
ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
if (ret)
goto destroy_xdp_rxqs;
for (i = 0; i < emac->tx_ch_num; i++) { for (i = 0; i < emac->tx_ch_num; i++) {
ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
if (ret) if (ret)
@ -809,6 +840,8 @@ reset_tx_chan:
* any SKB for completion. So set false to free_skb * any SKB for completion. So set false to free_skb
*/ */
prueth_reset_tx_chan(emac, i, false); prueth_reset_tx_chan(emac, i, false);
destroy_xdp_rxqs:
prueth_destroy_xdp_rxqs(emac);
reset_rx_chn: reset_rx_chn:
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
free_tx_ts_irq: free_tx_ts_irq:
@ -879,7 +912,7 @@ static int emac_ndo_stop(struct net_device *ndev)
k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
prueth_destroy_xdp_rxqs(emac);
napi_disable(&emac->napi_rx); napi_disable(&emac->napi_rx);
hrtimer_cancel(&emac->rx_hrtimer); hrtimer_cancel(&emac->rx_hrtimer);
@ -1024,6 +1057,93 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
return 0; return 0;
} }
/**
* emac_xdp_xmit - Implements ndo_xdp_xmit
* @dev: netdev
* @n: number of frames
* @frames: array of XDP buffer pointers
* @flags: XDP extra info
*
* Return: number of frames successfully sent. Failed frames
* will be free'ed by XDP core.
*
* For error cases, a negative errno code is returned and no-frames
* are transmitted (caller must handle freeing frames).
**/
static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags)
{
struct prueth_emac *emac = netdev_priv(dev);
struct net_device *ndev = emac->ndev;
struct xdp_frame *xdpf;
unsigned int q_idx;
int nxmit = 0;
u32 err;
int i;
q_idx = smp_processor_id() % emac->tx_ch_num;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
for (i = 0; i < n; i++) {
xdpf = frames[i];
err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
if (err != ICSSG_XDP_TX) {
ndev->stats.tx_dropped++;
break;
}
nxmit++;
}
return nxmit;
}
/**
* emac_xdp_setup - add/remove an XDP program
* @emac: emac device
* @bpf: XDP program
*
* Return: Always 0 (Success)
**/
static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
{
struct bpf_prog *prog = bpf->prog;
xdp_features_t val;
val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT;
xdp_set_features_flag(emac->ndev, val);
if (!emac->xdpi.prog && !prog)
return 0;
WRITE_ONCE(emac->xdp_prog, prog);
xdp_attachment_setup(&emac->xdpi, bpf);
return 0;
}
/**
* emac_ndo_bpf - implements ndo_bpf for icssg_prueth
* @ndev: network adapter device
* @bpf: XDP program
*
* Return: 0 on success, error code on failure.
**/
static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
{
struct prueth_emac *emac = netdev_priv(ndev);
switch (bpf->command) {
case XDP_SETUP_PROG:
return emac_xdp_setup(emac, bpf);
default:
return -EINVAL;
}
}
static const struct net_device_ops emac_netdev_ops = { static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open, .ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop, .ndo_stop = emac_ndo_stop,
@ -1038,6 +1158,8 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_fix_features = emac_ndo_fix_features, .ndo_fix_features = emac_ndo_fix_features,
.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
.ndo_bpf = emac_ndo_bpf,
.ndo_xdp_xmit = emac_xdp_xmit,
}; };
static int prueth_netdev_init(struct prueth *prueth, static int prueth_netdev_init(struct prueth *prueth,
@ -1066,6 +1188,8 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth; emac->prueth = prueth;
emac->ndev = ndev; emac->ndev = ndev;
emac->port_id = port; emac->port_id = port;
emac->xdp_prog = NULL;
emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq"); emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
if (!emac->cmd_wq) { if (!emac->cmd_wq) {
ret = -ENOMEM; ret = -ENOMEM;

View file

@ -8,6 +8,8 @@
#ifndef __NET_TI_ICSSG_PRUETH_H #ifndef __NET_TI_ICSSG_PRUETH_H
#define __NET_TI_ICSSG_PRUETH_H #define __NET_TI_ICSSG_PRUETH_H
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/genalloc.h> #include <linux/genalloc.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
@ -134,6 +136,7 @@ struct prueth_rx_chn {
unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */ unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */
char name[32]; char name[32];
struct page_pool *pg_pool; struct page_pool *pg_pool;
struct xdp_rxq_info xdp_rxq;
}; };
enum prueth_swdata_type { enum prueth_swdata_type {
@ -141,6 +144,7 @@ enum prueth_swdata_type {
PRUETH_SWDATA_SKB, PRUETH_SWDATA_SKB,
PRUETH_SWDATA_PAGE, PRUETH_SWDATA_PAGE,
PRUETH_SWDATA_CMD, PRUETH_SWDATA_CMD,
PRUETH_SWDATA_XDPF,
}; };
struct prueth_swdata { struct prueth_swdata {
@ -149,6 +153,7 @@ struct prueth_swdata {
struct sk_buff *skb; struct sk_buff *skb;
struct page *page; struct page *page;
u32 cmd; u32 cmd;
struct xdp_frame *xdpf;
} data; } data;
}; };
@ -159,6 +164,12 @@ struct prueth_swdata {
#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */ #define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
/* XDP BPF state */
#define ICSSG_XDP_PASS 0
#define ICSSG_XDP_CONSUMED BIT(0)
#define ICSSG_XDP_TX BIT(1)
#define ICSSG_XDP_REDIR BIT(2)
/* Minimum coalesce time in usecs for both Tx and Rx */ /* Minimum coalesce time in usecs for both Tx and Rx */
#define ICSSG_MIN_COALESCE_USECS 20 #define ICSSG_MIN_COALESCE_USECS 20
@ -227,6 +238,8 @@ struct prueth_emac {
unsigned long rx_pace_timeout_ns; unsigned long rx_pace_timeout_ns;
struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID]; struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
struct bpf_prog *xdp_prog;
struct xdp_attachment_info xdpi;
}; };
/* The buf includes headroom compatible with both skb and xdpf */ /* The buf includes headroom compatible with both skb and xdpf */
@ -465,5 +478,9 @@ void prueth_put_cores(struct prueth *prueth, int slice);
/* Revision specific helper */ /* Revision specific helper */
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns); u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
struct page *page,
unsigned int q_idx);
#endif /* __NET_TI_ICSSG_PRUETH_H */ #endif /* __NET_TI_ICSSG_PRUETH_H */