2019-11-04 09:38:56 -08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (c) 2019, Intel Corporation. */
|
|
|
|
|
2021-12-28 16:49:13 -08:00
|
|
|
#include <linux/filter.h>
|
|
|
|
|
2019-11-04 09:38:56 -08:00
|
|
|
#include "ice_txrx_lib.h"
|
2021-08-19 17:08:58 -07:00
|
|
|
#include "ice_eswitch.h"
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
#include "ice_lib.h"
|
2019-11-04 09:38:56 -08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_release_rx_desc - Store the new tail and head values
|
|
|
|
* @rx_ring: ring to bump
|
|
|
|
* @val: new head index
|
|
|
|
*/
|
2021-08-19 13:59:58 +02:00
|
|
|
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
|
2019-11-04 09:38:56 -08:00
|
|
|
{
|
2020-02-06 01:20:02 -08:00
|
|
|
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
|
2019-11-04 09:38:56 -08:00
|
|
|
|
|
|
|
rx_ring->next_to_use = val;
|
|
|
|
|
|
|
|
/* update next to alloc since we have filled the ring */
|
|
|
|
rx_ring->next_to_alloc = val;
|
|
|
|
|
|
|
|
/* QRX_TAIL will be updated with any tail value, but hardware ignores
|
|
|
|
* the lower 3 bits. This makes it so we only bump tail on meaningful
|
|
|
|
* boundaries. Also, this allows us to bump tail on intervals of 8 up to
|
|
|
|
* the budget depending on the current traffic load.
|
|
|
|
*/
|
|
|
|
val &= ~0x7;
|
|
|
|
if (prev_ntu != val) {
|
|
|
|
/* Force memory writes to complete before letting h/w
|
|
|
|
* know there are new descriptors to fetch. (Only
|
|
|
|
* applicable for weak-ordered memory model archs,
|
|
|
|
* such as IA-64).
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
writel(val, rx_ring->tail);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_ptype_to_htype - get a hash type
|
|
|
|
* @ptype: the ptype value from the descriptor
|
|
|
|
*
|
2021-02-23 15:47:05 -08:00
|
|
|
* Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
|
|
|
|
* skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
|
|
|
|
* Rx desc.
|
2019-11-04 09:38:56 -08:00
|
|
|
*/
|
2021-02-23 15:47:05 -08:00
|
|
|
static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
|
2019-11-04 09:38:56 -08:00
|
|
|
{
|
2021-02-23 15:47:05 -08:00
|
|
|
struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
|
|
|
|
|
|
|
|
if (!decoded.known)
|
|
|
|
return PKT_HASH_TYPE_NONE;
|
|
|
|
if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
|
|
|
|
return PKT_HASH_TYPE_L4;
|
|
|
|
if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
|
|
|
|
return PKT_HASH_TYPE_L3;
|
|
|
|
if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
|
|
|
|
return PKT_HASH_TYPE_L2;
|
|
|
|
|
2019-11-04 09:38:56 -08:00
|
|
|
return PKT_HASH_TYPE_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_rx_hash - set the hash value in the skb
|
|
|
|
* @rx_ring: descriptor ring
|
|
|
|
* @rx_desc: specific descriptor
|
|
|
|
* @skb: pointer to current skb
|
|
|
|
* @rx_ptype: the ptype value from the descriptor
|
|
|
|
*/
|
|
|
|
static void
|
2021-08-19 13:59:58 +02:00
|
|
|
ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
|
2021-02-23 15:47:05 -08:00
|
|
|
struct sk_buff *skb, u16 rx_ptype)
|
2019-11-04 09:38:56 -08:00
|
|
|
{
|
|
|
|
struct ice_32b_rx_flex_desc_nic *nic_mdid;
|
|
|
|
u32 hash;
|
|
|
|
|
|
|
|
if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
|
|
|
|
hash = le32_to_cpu(nic_mdid->rss_hash);
|
|
|
|
skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_rx_csum - Indicate in skb if checksum is good
|
|
|
|
* @ring: the ring we care about
|
|
|
|
* @skb: skb currently being received and modified
|
|
|
|
* @rx_desc: the receive descriptor
|
|
|
|
* @ptype: the packet type decoded by hardware
|
|
|
|
*
|
|
|
|
* skb->protocol must be set before this function is called
|
|
|
|
*/
|
|
|
|
static void
|
2021-08-19 13:59:58 +02:00
|
|
|
ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
|
2021-02-23 15:47:05 -08:00
|
|
|
union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
|
2019-11-04 09:38:56 -08:00
|
|
|
{
|
|
|
|
struct ice_rx_ptype_decoded decoded;
|
2020-05-15 17:42:19 -07:00
|
|
|
u16 rx_status0, rx_status1;
|
2019-11-04 09:38:56 -08:00
|
|
|
bool ipv4, ipv6;
|
|
|
|
|
2020-05-15 17:42:19 -07:00
|
|
|
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
|
|
|
|
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
|
2019-11-04 09:38:56 -08:00
|
|
|
|
|
|
|
decoded = ice_decode_rx_desc_ptype(ptype);
|
|
|
|
|
|
|
|
/* Start with CHECKSUM_NONE and by default csum_level = 0 */
|
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
skb_checksum_none_assert(skb);
|
|
|
|
|
|
|
|
/* check if Rx checksum is enabled */
|
|
|
|
if (!(ring->netdev->features & NETIF_F_RXCSUM))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* check if HW has decoded the packet and checksum */
|
2020-05-15 17:42:19 -07:00
|
|
|
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
|
2019-11-04 09:38:56 -08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!(decoded.known && decoded.outer_ip))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
|
|
|
|
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
|
|
|
|
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
|
|
|
|
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
|
|
|
|
|
2020-05-15 17:42:19 -07:00
|
|
|
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
|
|
|
|
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
|
2019-11-04 09:38:56 -08:00
|
|
|
goto checksum_fail;
|
2020-05-15 17:42:19 -07:00
|
|
|
|
|
|
|
if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
|
2019-11-04 09:38:56 -08:00
|
|
|
goto checksum_fail;
|
|
|
|
|
|
|
|
/* check for L4 errors and handle packets that were not able to be
|
|
|
|
* checksummed due to arrival speed
|
|
|
|
*/
|
2020-05-15 17:42:19 -07:00
|
|
|
if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
|
2019-11-04 09:38:56 -08:00
|
|
|
goto checksum_fail;
|
|
|
|
|
2020-05-06 09:32:30 -07:00
|
|
|
/* check for outer UDP checksum error in tunneled packets */
|
2020-05-15 17:42:19 -07:00
|
|
|
if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
|
|
|
|
(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
|
2020-05-06 09:32:30 -07:00
|
|
|
goto checksum_fail;
|
|
|
|
|
|
|
|
/* If there is an outer header present that might contain a checksum
|
|
|
|
* we need to bump the checksum level by 1 to reflect the fact that
|
|
|
|
* we are indicating we validated the inner checksum.
|
|
|
|
*/
|
|
|
|
if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
|
|
|
|
skb->csum_level = 1;
|
|
|
|
|
2019-11-04 09:38:56 -08:00
|
|
|
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
|
|
|
|
switch (decoded.inner_prot) {
|
|
|
|
case ICE_RX_PTYPE_INNER_PROT_TCP:
|
|
|
|
case ICE_RX_PTYPE_INNER_PROT_UDP:
|
|
|
|
case ICE_RX_PTYPE_INNER_PROT_SCTP:
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
2021-03-05 02:52:57 -06:00
|
|
|
break;
|
2019-11-04 09:38:56 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
checksum_fail:
|
|
|
|
ring->vsi->back->hw_csum_rx_error++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
|
|
|
|
* @rx_ring: Rx descriptor ring packet is being transacted on
|
|
|
|
* @rx_desc: pointer to the EOP Rx descriptor
|
|
|
|
* @skb: pointer to current skb being populated
|
|
|
|
* @ptype: the packet type decoded by hardware
|
|
|
|
*
|
|
|
|
* This function checks the ring, descriptor, and packet information in
|
|
|
|
* order to populate the hash, checksum, VLAN, protocol, and
|
|
|
|
* other fields within the skb.
|
|
|
|
*/
|
|
|
|
void
|
2021-08-19 13:59:58 +02:00
|
|
|
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
|
2019-11-04 09:38:56 -08:00
|
|
|
union ice_32b_rx_flex_desc *rx_desc,
|
2021-02-23 15:47:05 -08:00
|
|
|
struct sk_buff *skb, u16 ptype)
|
2019-11-04 09:38:56 -08:00
|
|
|
{
|
|
|
|
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
|
|
|
|
|
|
|
|
/* modifies the skb - consumes the enet header */
|
2021-10-26 12:38:40 +02:00
|
|
|
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
|
2019-11-04 09:38:56 -08:00
|
|
|
|
|
|
|
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
|
ice: enable receive hardware timestamping
Add SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl handlers to respond to
requests to enable timestamping support. If the request is for enabling
Rx timestamps, set a bit in the Rx descriptors to indicate that receive
timestamps should be reported.
Hardware captures receive timestamps in the PHY which only captures part
of the timer, and reports only 40 bits into the Rx descriptor. The upper
32 bits represent the contents of GLTSYN_TIME_L at the point of packet
reception, while the lower 8 bits represent the upper 8 bits of
GLTSYN_TIME_0.
The networking and PTP stack expect 64 bit timestamps in nanoseconds. To
support this, implement some logic to extend the timestamps by using the
full PHC time.
If the Rx timestamp was captured prior to the PHC time, then the real
timestamp is
PHC - (lower_32_bits(PHC) - timestamp)
If the Rx timestamp was captured after the PHC time, then the real
timestamp is
PHC + (timestamp - lower_32_bits(PHC))
These calculations are correct as long as neither the PHC timestamp nor
the Rx timestamps are more than 2^32-1 nanseconds old. Further, we can
detect when the Rx timestamp is before or after the PHC as long as the
PHC timestamp is no more than 2^31-1 nanoseconds old.
In that case, we calculate the delta between the lower 32 bits of the
PHC and the Rx timestamp. If it's larger than 2^31-1 then the Rx
timestamp must have been captured in the past. If it's smaller, then the
Rx timestamp must have been captured after PHC time.
Add an ice_ptp_extend_32b_ts function that relies on a cached copy of
the PHC time and implements this algorithm to calculate the proper upper
32bits of the Rx timestamps.
Cache the PHC time periodically in all of the Rx rings. This enables
each Rx ring to simply call the extension function with a recent copy of
the PHC time. By ensuring that the PHC time is kept up to date
periodically, we ensure this algorithm doesn't use stale data and
produce incorrect results.
To cache the time, introduce a kworker and a kwork item to periodically
store the Rx time. It might seem like we should use the .do_aux_work
interface of the PTP clock. This doesn't work because all PFs must cache
this time, but only one PF owns the PTP clock device.
Thus, the ice driver will manage its own kthread instead of relying on
the PTP do_aux_work handler.
With this change, the driver can now report Rx timestamps on all
incoming packets.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-06-09 09:39:52 -07:00
|
|
|
|
|
|
|
if (rx_ring->ptp_rx)
|
|
|
|
ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
|
2019-11-04 09:38:56 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_receive_skb - Send a completed packet up the stack
|
|
|
|
* @rx_ring: Rx ring in play
|
|
|
|
* @skb: packet to send up
|
|
|
|
* @vlan_tag: VLAN tag for packet
|
|
|
|
*
|
|
|
|
* This function sends the completed packet (via. skb) up the stack using
|
|
|
|
* gro receive functions (with/without VLAN tag)
|
|
|
|
*/
|
|
|
|
void
|
2021-08-19 13:59:58 +02:00
|
|
|
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
|
2019-11-04 09:38:56 -08:00
|
|
|
{
|
ice: Add hot path support for 802.1Q and 802.1ad VLAN offloads
Currently the driver only supports 802.1Q VLAN insertion and stripping.
However, once Double VLAN Mode (DVM) is fully supported, then both 802.1Q
and 802.1ad VLAN insertion and stripping will be supported. Unfortunately
the VSI context parameters only allow for one VLAN ethertype at a time
for VLAN offloads so only one or the other VLAN ethertype offload can be
supported at once.
To support this, multiple changes are needed.
Rx path changes:
[1] In DVM, the Rx queue context l2tagsel field needs to be cleared so
the outermost tag shows up in the l2tag2_2nd field of the Rx flex
descriptor. In Single VLAN Mode (SVM), the l2tagsel field should remain
1 to support SVM configurations.
[2] Modify the ice_test_staterr() function to take a __le16 instead of
the ice_32b_rx_flex_desc union pointer so this function can be used for
both rx_desc->wb.status_error0 and rx_desc->wb.status_error1.
[3] Add the new inline function ice_get_vlan_tag_from_rx_desc() that
checks if there is a VLAN tag in l2tag1 or l2tag2_2nd.
[4] In ice_receive_skb(), add a check to see if NETIF_F_HW_VLAN_STAG_RX
is enabled in netdev->features. If it is, then this is the VLAN
ethertype that needs to be added to the stripping VLAN tag. Since
ice_fix_features() prevents CTAG_RX and STAG_RX from being enabled
simultaneously, the VLAN ethertype will only ever be 802.1Q or 802.1ad.
Tx path changes:
[1] In DVM, the VLAN tag needs to be placed in the l2tag2 field of the Tx
context descriptor. The new define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN was
added to the list of tx_flags to handle this case.
[2] When the stack requests the VLAN tag to be offloaded on Tx, the
driver needs to set either ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN or
ICE_TX_FLAGS_HW_VLAN, so the tag is inserted in l2tag2 or l2tag1
respectively. To determine which location to use, set a bit in the Tx
ring flags field during ring allocation that can be used to determine
which field to use in the Tx descriptor. In DVM, always use l2tag2,
and in SVM, always use l2tag1.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 08:38:47 -08:00
|
|
|
netdev_features_t features = rx_ring->netdev->features;
|
|
|
|
bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
|
|
|
|
|
|
|
|
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
|
2019-11-04 09:38:56 -08:00
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
ice: Add hot path support for 802.1Q and 802.1ad VLAN offloads
Currently the driver only supports 802.1Q VLAN insertion and stripping.
However, once Double VLAN Mode (DVM) is fully supported, then both 802.1Q
and 802.1ad VLAN insertion and stripping will be supported. Unfortunately
the VSI context parameters only allow for one VLAN ethertype at a time
for VLAN offloads so only one or the other VLAN ethertype offload can be
supported at once.
To support this, multiple changes are needed.
Rx path changes:
[1] In DVM, the Rx queue context l2tagsel field needs to be cleared so
the outermost tag shows up in the l2tag2_2nd field of the Rx flex
descriptor. In Single VLAN Mode (SVM), the l2tagsel field should remain
1 to support SVM configurations.
[2] Modify the ice_test_staterr() function to take a __le16 instead of
the ice_32b_rx_flex_desc union pointer so this function can be used for
both rx_desc->wb.status_error0 and rx_desc->wb.status_error1.
[3] Add the new inline function ice_get_vlan_tag_from_rx_desc() that
checks if there is a VLAN tag in l2tag1 or l2tag2_2nd.
[4] In ice_receive_skb(), add a check to see if NETIF_F_HW_VLAN_STAG_RX
is enabled in netdev->features. If it is, then this is the VLAN
ethertype that needs to be added to the stripping VLAN tag. Since
ice_fix_features() prevents CTAG_RX and STAG_RX from being enabled
simultaneously, the VLAN ethertype will only ever be 802.1Q or 802.1ad.
Tx path changes:
[1] In DVM, the VLAN tag needs to be placed in the l2tag2 field of the Tx
context descriptor. The new define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN was
added to the list of tx_flags to handle this case.
[2] When the stack requests the VLAN tag to be offloaded on Tx, the
driver needs to set either ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN or
ICE_TX_FLAGS_HW_VLAN, so the tag is inserted in l2tag2 or l2tag1
respectively. To determine which location to use, set a bit in the Tx
ring flags field during ring allocation that can be used to determine
which field to use in the Tx descriptor. In DVM, always use l2tag2,
and in SVM, always use l2tag1.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 08:38:47 -08:00
|
|
|
else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
|
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
|
|
|
|
|
2021-01-08 03:39:02 -08:00
|
|
|
napi_gro_receive(&rx_ring->q_vector->napi, skb);
|
2019-11-04 09:38:56 -08:00
|
|
|
}
|
|
|
|
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
/**
|
|
|
|
* ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
|
|
|
|
* @xdp_ring: XDP ring to clean
|
|
|
|
*/
|
|
|
|
static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
|
|
|
{
|
|
|
|
unsigned int total_bytes = 0, total_pkts = 0;
|
2022-01-25 17:04:42 +01:00
|
|
|
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
u16 ntc = xdp_ring->next_to_clean;
|
|
|
|
struct ice_tx_desc *next_dd_desc;
|
|
|
|
u16 next_dd = xdp_ring->next_dd;
|
|
|
|
struct ice_tx_buf *tx_buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
|
|
|
|
if (!(next_dd_desc->cmd_type_offset_bsz &
|
|
|
|
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
|
|
|
|
return;
|
|
|
|
|
2022-01-25 17:04:42 +01:00
|
|
|
for (i = 0; i < tx_thresh; i++) {
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
tx_buf = &xdp_ring->tx_buf[ntc];
|
|
|
|
|
|
|
|
total_bytes += tx_buf->bytecount;
|
|
|
|
/* normally tx_buf->gso_segs was taken but at this point
|
|
|
|
* it's always 1 for us
|
|
|
|
*/
|
|
|
|
total_pkts++;
|
|
|
|
|
|
|
|
page_frag_free(tx_buf->raw_buf);
|
|
|
|
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
|
|
|
|
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
|
|
|
|
dma_unmap_len_set(tx_buf, len, 0);
|
|
|
|
tx_buf->raw_buf = NULL;
|
|
|
|
|
|
|
|
ntc++;
|
|
|
|
if (ntc >= xdp_ring->count)
|
|
|
|
ntc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
next_dd_desc->cmd_type_offset_bsz = 0;
|
2022-01-25 17:04:42 +01:00
|
|
|
xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
if (xdp_ring->next_dd > xdp_ring->count)
|
2022-01-25 17:04:42 +01:00
|
|
|
xdp_ring->next_dd = tx_thresh - 1;
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
xdp_ring->next_to_clean = ntc;
|
|
|
|
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
|
|
|
|
}
|
|
|
|
|
2019-11-04 09:38:56 -08:00
|
|
|
/**
|
|
|
|
* ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
|
|
|
|
* @data: packet data pointer
|
|
|
|
* @size: packet data size
|
|
|
|
* @xdp_ring: XDP ring for transmission
|
|
|
|
*/
|
2021-08-19 13:59:58 +02:00
|
|
|
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
|
2019-11-04 09:38:56 -08:00
|
|
|
{
|
2022-01-25 17:04:42 +01:00
|
|
|
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
2019-11-04 09:38:56 -08:00
|
|
|
u16 i = xdp_ring->next_to_use;
|
|
|
|
struct ice_tx_desc *tx_desc;
|
|
|
|
struct ice_tx_buf *tx_buf;
|
|
|
|
dma_addr_t dma;
|
|
|
|
|
2022-01-25 17:04:42 +01:00
|
|
|
if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
ice_clean_xdp_irq(xdp_ring);
|
|
|
|
|
2019-11-04 09:38:56 -08:00
|
|
|
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
|
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 16:20:02 -05:00
|
|
|
xdp_ring->ring_stats->tx_stats.tx_busy++;
|
2019-11-04 09:38:56 -08:00
|
|
|
return ICE_XDP_CONSUMED;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(xdp_ring->dev, dma))
|
|
|
|
return ICE_XDP_CONSUMED;
|
|
|
|
|
|
|
|
tx_buf = &xdp_ring->tx_buf[i];
|
|
|
|
tx_buf->bytecount = size;
|
|
|
|
tx_buf->gso_segs = 1;
|
|
|
|
tx_buf->raw_buf = data;
|
|
|
|
|
|
|
|
/* record length, and DMA address */
|
|
|
|
dma_unmap_len_set(tx_buf, len, size);
|
|
|
|
dma_unmap_addr_set(tx_buf, dma, dma);
|
|
|
|
|
|
|
|
tx_desc = ICE_TX_DESC(xdp_ring, i);
|
|
|
|
tx_desc->buf_addr = cpu_to_le64(dma);
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
|
2020-05-07 17:41:13 -07:00
|
|
|
size, 0);
|
2019-11-04 09:38:56 -08:00
|
|
|
|
2022-01-25 17:04:46 +01:00
|
|
|
xdp_ring->xdp_tx_active++;
|
2019-11-04 09:38:56 -08:00
|
|
|
i++;
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
if (i == xdp_ring->count) {
|
2019-11-04 09:38:56 -08:00
|
|
|
i = 0;
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
|
|
|
tx_desc->cmd_type_offset_bsz |=
|
|
|
|
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
2022-01-25 17:04:42 +01:00
|
|
|
xdp_ring->next_rs = tx_thresh - 1;
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
}
|
2019-11-04 09:38:56 -08:00
|
|
|
xdp_ring->next_to_use = i;
|
|
|
|
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
if (i > xdp_ring->next_rs) {
|
|
|
|
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
|
|
|
tx_desc->cmd_type_offset_bsz |=
|
|
|
|
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
2022-01-25 17:04:42 +01:00
|
|
|
xdp_ring->next_rs += tx_thresh;
|
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 14:00:02 +02:00
|
|
|
}
|
|
|
|
|
2019-11-04 09:38:56 -08:00
|
|
|
return ICE_XDP_TX;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
|
|
|
|
* @xdp: XDP buffer
|
|
|
|
* @xdp_ring: XDP Tx ring
|
|
|
|
*
|
|
|
|
* Returns negative on failure, 0 on success.
|
|
|
|
*/
|
2021-08-19 13:59:58 +02:00
|
|
|
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
|
2019-11-04 09:38:56 -08:00
|
|
|
{
|
2020-05-28 22:47:29 +02:00
|
|
|
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
|
2019-11-04 09:38:56 -08:00
|
|
|
|
|
|
|
if (unlikely(!xdpf))
|
|
|
|
return ICE_XDP_CONSUMED;
|
|
|
|
|
|
|
|
return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
|
2021-08-19 14:00:01 +02:00
|
|
|
* @xdp_ring: XDP ring
|
2019-11-04 09:38:56 -08:00
|
|
|
* @xdp_res: Result of the receive batch
|
|
|
|
*
|
|
|
|
* This function bumps XDP Tx tail and/or flush redirect map, and
|
|
|
|
* should be called when a batch of packets has been processed in the
|
|
|
|
* napi loop.
|
|
|
|
*/
|
2021-08-19 14:00:01 +02:00
|
|
|
void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
|
2019-11-04 09:38:56 -08:00
|
|
|
{
|
|
|
|
if (xdp_res & ICE_XDP_REDIR)
|
|
|
|
xdp_do_flush_map();
|
|
|
|
|
2021-08-19 14:00:03 +02:00
|
|
|
if (xdp_res & ICE_XDP_TX) {
|
|
|
|
if (static_branch_unlikely(&ice_xdp_locking_key))
|
|
|
|
spin_lock(&xdp_ring->tx_lock);
|
2019-11-04 09:38:56 -08:00
|
|
|
ice_xdp_ring_update_tail(xdp_ring);
|
2021-08-19 14:00:03 +02:00
|
|
|
if (static_branch_unlikely(&ice_xdp_locking_key))
|
|
|
|
spin_unlock(&xdp_ring->tx_lock);
|
|
|
|
}
|
2019-11-04 09:38:56 -08:00
|
|
|
}
|