mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
net: Take common prefetch code structure into a function
Many device drivers use the same prefetch code structure to deal with small L1 cacheline size. Take this code into a function and call it from the drivers. Suggested-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Reviewed-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
04e006b483
commit
f468f21b7a
12 changed files with 39 additions and 72 deletions
|
@ -2372,10 +2372,7 @@ no_mem:
|
||||||
if (fl->use_pages) {
|
if (fl->use_pages) {
|
||||||
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
|
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
|
||||||
|
|
||||||
prefetch(addr);
|
net_prefetch(addr);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(addr + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
__refill_fl(adap, fl);
|
__refill_fl(adap, fl);
|
||||||
if (lro > 0) {
|
if (lro > 0) {
|
||||||
lro_add_page(adap, qs, fl,
|
lro_add_page(adap, qs, fl,
|
||||||
|
|
|
@ -557,10 +557,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
|
||||||
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
|
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(va);
|
net_prefetch(va);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(va + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
|
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
|
||||||
HNS_RX_HEAD_SIZE);
|
HNS_RX_HEAD_SIZE);
|
||||||
|
|
|
@ -3091,10 +3091,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
|
||||||
* lines. In such a case, single fetch would suffice to cache in the
|
* lines. In such a case, single fetch would suffice to cache in the
|
||||||
* relevant part of the header.
|
* relevant part of the header.
|
||||||
*/
|
*/
|
||||||
prefetch(ring->va);
|
net_prefetch(ring->va);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(ring->va + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
ret = hns3_alloc_skb(ring, length, ring->va);
|
ret = hns3_alloc_skb(ring, length, ring->va);
|
||||||
|
|
|
@ -310,10 +310,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
|
||||||
rx_buffer->page_offset;
|
rx_buffer->page_offset;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(page_addr);
|
net_prefetch(page_addr);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* allocate a skb to store the frags */
|
/* allocate a skb to store the frags */
|
||||||
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
|
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
|
||||||
|
|
|
@ -1992,10 +1992,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(xdp->data);
|
net_prefetch(xdp->data);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(xdp->data + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
/* Note, we get here by enabling legacy-rx via:
|
/* Note, we get here by enabling legacy-rx via:
|
||||||
*
|
*
|
||||||
* ethtool --set-priv-flags <dev> legacy-rx on
|
* ethtool --set-priv-flags <dev> legacy-rx on
|
||||||
|
@ -2078,10 +2076,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
|
||||||
* likely have a consumer accessing first few bytes of meta
|
* likely have a consumer accessing first few bytes of meta
|
||||||
* data, and then actual data.
|
* data, and then actual data.
|
||||||
*/
|
*/
|
||||||
prefetch(xdp->data_meta);
|
net_prefetch(xdp->data_meta);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(xdp->data_meta + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
/* build an skb around the page buffer */
|
/* build an skb around the page buffer */
|
||||||
skb = build_skb(xdp->data_hard_start, truesize);
|
skb = build_skb(xdp->data_hard_start, truesize);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
|
|
|
@ -1309,10 +1309,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
|
||||||
return NULL;
|
return NULL;
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
||||||
prefetch(va);
|
net_prefetch(va);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(va + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* allocate a skb to store the frags */
|
/* allocate a skb to store the frags */
|
||||||
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
|
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
|
||||||
|
@ -1376,10 +1373,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
|
||||||
return NULL;
|
return NULL;
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
||||||
prefetch(va);
|
net_prefetch(va);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(va + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
/* build an skb around the page buffer */
|
/* build an skb around the page buffer */
|
||||||
skb = build_skb(va - IAVF_SKB_PAD, truesize);
|
skb = build_skb(va - IAVF_SKB_PAD, truesize);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
|
|
|
@ -919,10 +919,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
|
||||||
* likely have a consumer accessing first few bytes of meta
|
* likely have a consumer accessing first few bytes of meta
|
||||||
* data, and then actual data.
|
* data, and then actual data.
|
||||||
*/
|
*/
|
||||||
prefetch(xdp->data_meta);
|
net_prefetch(xdp->data_meta);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch((void *)(xdp->data + L1_CACHE_BYTES));
|
|
||||||
#endif
|
|
||||||
/* build an skb around the page buffer */
|
/* build an skb around the page buffer */
|
||||||
skb = build_skb(xdp->data_hard_start, truesize);
|
skb = build_skb(xdp->data_hard_start, truesize);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
|
@ -964,10 +961,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(xdp->data);
|
net_prefetch(xdp->data);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch((void *)(xdp->data + L1_CACHE_BYTES));
|
|
||||||
#endif /* L1_CACHE_BYTES */
|
|
||||||
|
|
||||||
/* allocate a skb to store the frags */
|
/* allocate a skb to store the frags */
|
||||||
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
|
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
|
||||||
|
|
|
@ -8047,10 +8047,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(va);
|
net_prefetch(va);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(va + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* allocate a skb to store the frags */
|
/* allocate a skb to store the frags */
|
||||||
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
|
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
|
||||||
|
@ -8104,10 +8101,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(va);
|
net_prefetch(va);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(va + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* build an skb around the page buffer */
|
/* build an skb around the page buffer */
|
||||||
skb = build_skb(va - IGB_SKB_PAD, truesize);
|
skb = build_skb(va - IGB_SKB_PAD, truesize);
|
||||||
|
|
|
@ -1550,10 +1550,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(va);
|
net_prefetch(va);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(va + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* build an skb around the page buffer */
|
/* build an skb around the page buffer */
|
||||||
skb = build_skb(va - IGC_SKB_PAD, truesize);
|
skb = build_skb(va - IGC_SKB_PAD, truesize);
|
||||||
|
@ -1589,10 +1586,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(va);
|
net_prefetch(va);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(va + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* allocate a skb to store the frags */
|
/* allocate a skb to store the frags */
|
||||||
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
|
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
|
||||||
|
|
|
@ -2095,10 +2095,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(xdp->data);
|
net_prefetch(xdp->data);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(xdp->data + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
/* Note, we get here by enabling legacy-rx via:
|
/* Note, we get here by enabling legacy-rx via:
|
||||||
*
|
*
|
||||||
* ethtool --set-priv-flags <dev> legacy-rx on
|
* ethtool --set-priv-flags <dev> legacy-rx on
|
||||||
|
@ -2161,10 +2159,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
|
||||||
* likely have a consumer accessing first few bytes of meta
|
* likely have a consumer accessing first few bytes of meta
|
||||||
* data, and then actual data.
|
* data, and then actual data.
|
||||||
*/
|
*/
|
||||||
prefetch(xdp->data_meta);
|
net_prefetch(xdp->data_meta);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(xdp->data_meta + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* build an skb to around the page buffer */
|
/* build an skb to around the page buffer */
|
||||||
skb = build_skb(xdp->data_hard_start, truesize);
|
skb = build_skb(xdp->data_hard_start, truesize);
|
||||||
|
|
|
@ -866,10 +866,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* prefetch first cache line of first page */
|
/* prefetch first cache line of first page */
|
||||||
prefetch(xdp->data);
|
net_prefetch(xdp->data);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(xdp->data + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
/* Note, we get here by enabling legacy-rx via:
|
/* Note, we get here by enabling legacy-rx via:
|
||||||
*
|
*
|
||||||
* ethtool --set-priv-flags <dev> legacy-rx on
|
* ethtool --set-priv-flags <dev> legacy-rx on
|
||||||
|
@ -947,10 +945,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
|
||||||
* have a consumer accessing first few bytes of meta data,
|
* have a consumer accessing first few bytes of meta data,
|
||||||
* and then actual data.
|
* and then actual data.
|
||||||
*/
|
*/
|
||||||
prefetch(xdp->data_meta);
|
net_prefetch(xdp->data_meta);
|
||||||
#if L1_CACHE_BYTES < 128
|
|
||||||
prefetch(xdp->data_meta + L1_CACHE_BYTES);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* build an skb around the page buffer */
|
/* build an skb around the page buffer */
|
||||||
skb = build_skb(xdp->data_hard_start, truesize);
|
skb = build_skb(xdp->data_hard_start, truesize);
|
||||||
|
|
|
@ -2193,6 +2193,22 @@ int netdev_get_num_tc(struct net_device *dev)
|
||||||
return dev->num_tc;
|
return dev->num_tc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void net_prefetch(void *p)
|
||||||
|
{
|
||||||
|
prefetch(p);
|
||||||
|
#if L1_CACHE_BYTES < 128
|
||||||
|
prefetch((u8 *)p + L1_CACHE_BYTES);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void net_prefetchw(void *p)
|
||||||
|
{
|
||||||
|
prefetchw(p);
|
||||||
|
#if L1_CACHE_BYTES < 128
|
||||||
|
prefetchw((u8 *)p + L1_CACHE_BYTES);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void netdev_unbind_sb_channel(struct net_device *dev,
|
void netdev_unbind_sb_channel(struct net_device *dev,
|
||||||
struct net_device *sb_dev);
|
struct net_device *sb_dev);
|
||||||
int netdev_bind_sb_channel_queue(struct net_device *dev,
|
int netdev_bind_sb_channel_queue(struct net_device *dev,
|
||||||
|
|
Loading…
Add table
Reference in a new issue