2021-04-01 10:11:15 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (c) 2021, Intel Corporation. */
|
|
|
|
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
#include <net/xdp_sock_drv.h>
|
|
|
|
|
2021-04-01 10:11:15 +08:00
|
|
|
#include "stmmac.h"
|
|
|
|
#include "stmmac_xdp.h"
|
|
|
|
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
|
|
|
|
struct xsk_buff_pool *pool, u16 queue)
|
|
|
|
{
|
|
|
|
struct stmmac_channel *ch = &priv->channel[queue];
|
|
|
|
bool need_update;
|
|
|
|
u32 frame_size;
|
|
|
|
int err;
|
|
|
|
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
if (queue >= priv->plat->rx_queues_to_use ||
|
|
|
|
queue >= priv->plat->tx_queues_to_use)
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
frame_size = xsk_pool_get_rx_frame_size(pool);
|
|
|
|
/* XDP ZC does not span multiple frame, make sure XSK pool buffer
|
|
|
|
* size can at least store Q-in-Q frame.
|
|
|
|
*/
|
|
|
|
if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(priv->dev, "Failed to map xsk pool\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
|
|
|
|
|
|
|
|
if (need_update) {
|
|
|
|
napi_disable(&ch->rx_napi);
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
napi_disable(&ch->tx_napi);
|
2021-08-25 08:55:29 +08:00
|
|
|
stmmac_disable_rx_queue(priv, queue);
|
|
|
|
stmmac_disable_tx_queue(priv, queue);
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
set_bit(queue, priv->af_xdp_zc_qps);
|
|
|
|
|
|
|
|
if (need_update) {
|
|
|
|
stmmac_enable_rx_queue(priv, queue);
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
stmmac_enable_tx_queue(priv, queue);
|
2021-08-25 08:55:29 +08:00
|
|
|
napi_enable(&ch->rxtx_napi);
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
|
|
|
|
err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
|
|
|
|
{
|
|
|
|
struct stmmac_channel *ch = &priv->channel[queue];
|
|
|
|
struct xsk_buff_pool *pool;
|
|
|
|
bool need_update;
|
|
|
|
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
if (queue >= priv->plat->rx_queues_to_use ||
|
|
|
|
queue >= priv->plat->tx_queues_to_use)
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pool = xsk_get_pool_from_qid(priv->dev, queue);
|
|
|
|
if (!pool)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
|
|
|
|
|
|
|
|
if (need_update) {
|
2021-08-25 08:55:29 +08:00
|
|
|
napi_disable(&ch->rxtx_napi);
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
stmmac_disable_rx_queue(priv, queue);
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
stmmac_disable_tx_queue(priv, queue);
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
synchronize_rcu();
|
|
|
|
}
|
|
|
|
|
|
|
|
xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
|
|
|
|
|
|
|
|
clear_bit(queue, priv->af_xdp_zc_qps);
|
|
|
|
|
|
|
|
if (need_update) {
|
|
|
|
stmmac_enable_rx_queue(priv, queue);
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
stmmac_enable_tx_queue(priv, queue);
|
2021-08-25 08:55:29 +08:00
|
|
|
napi_enable(&ch->rx_napi);
|
|
|
|
napi_enable(&ch->tx_napi);
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
|
|
|
|
u16 queue)
|
|
|
|
{
|
|
|
|
return pool ? stmmac_xdp_enable_pool(priv, pool, queue) :
|
|
|
|
stmmac_xdp_disable_pool(priv, queue);
|
|
|
|
}
|
|
|
|
|
2021-04-01 10:11:15 +08:00
|
|
|
int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct net_device *dev = priv->dev;
|
|
|
|
struct bpf_prog *old_prog;
|
|
|
|
bool need_update;
|
|
|
|
bool if_running;
|
|
|
|
|
|
|
|
if_running = netif_running(dev);
|
|
|
|
|
|
|
|
if (prog && dev->mtu > ETH_DATA_LEN) {
|
|
|
|
/* For now, the driver doesn't support XDP functionality with
|
|
|
|
* jumbo frames so we return error.
|
|
|
|
*/
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2023-05-24 20:57:14 +08:00
|
|
|
if (!prog)
|
|
|
|
xdp_features_clear_redirect_target(dev);
|
|
|
|
|
2021-04-01 10:11:15 +08:00
|
|
|
need_update = !!priv->xdp_prog != !!prog;
|
|
|
|
if (if_running && need_update)
|
2021-11-11 22:39:49 +08:00
|
|
|
stmmac_xdp_release(dev);
|
2021-04-01 10:11:15 +08:00
|
|
|
|
|
|
|
old_prog = xchg(&priv->xdp_prog, prog);
|
|
|
|
if (old_prog)
|
|
|
|
bpf_prog_put(old_prog);
|
|
|
|
|
|
|
|
/* Disable RX SPH for XDP operation */
|
|
|
|
priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
|
|
|
|
|
|
|
|
if (if_running && need_update)
|
2021-11-11 22:39:49 +08:00
|
|
|
stmmac_xdp_open(dev);
|
2021-04-01 10:11:15 +08:00
|
|
|
|
2023-05-24 20:57:14 +08:00
|
|
|
if (prog)
|
|
|
|
xdp_features_set_redirect_target(dev, false);
|
|
|
|
|
2021-04-01 10:11:15 +08:00
|
|
|
return 0;
|
|
|
|
}
|