enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
|
|
|
|
/* Copyright 2017-2019 NXP */
|
|
|
|
|
|
|
|
|
|
#include <linux/timer.h>
|
|
|
|
|
#include <linux/pci.h>
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
|
#include <linux/ethtool.h>
|
2025-05-06 16:07:23 +08:00
|
|
|
|
#include <linux/fsl/ntmp.h>
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
#include <linux/if_vlan.h>
|
2020-10-07 12:48:23 +03:00
|
|
|
|
#include <linux/phylink.h>
|
2020-07-21 10:55:22 +03:00
|
|
|
|
#include <linux/dim.h>
|
2023-08-02 18:02:28 -07:00
|
|
|
|
#include <net/xdp.h>
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
|
|
|
|
#include "enetc_hw.h"
|
2024-10-30 17:39:22 +08:00
|
|
|
|
#include "enetc4_hw.h"
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
|
|
|
|
#define ENETC_MAC_MAXFRM_SIZE 9600
|
|
|
|
|
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
|
|
|
|
|
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
|
|
|
|
|
|
2022-02-09 20:33:02 +08:00
|
|
|
|
#define ENETC_CBD_DATA_MEM_ALIGN 64
|
|
|
|
|
|
2025-05-06 16:07:24 +08:00
|
|
|
|
#define ENETC_MADDR_HASH_TBL_SZ 64
|
|
|
|
|
|
|
|
|
|
enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
|
|
|
|
|
|
|
|
|
|
struct enetc_mac_filter {
|
|
|
|
|
union {
|
|
|
|
|
char mac_addr[ETH_ALEN];
|
|
|
|
|
DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
|
|
|
|
|
};
|
|
|
|
|
int mac_addr_cnt;
|
|
|
|
|
};
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
struct enetc_tx_swbd {
|
2021-03-31 23:08:57 +03:00
|
|
|
|
union {
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
struct xdp_frame *xdp_frame;
|
|
|
|
|
};
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
dma_addr_t dma;
|
net: enetc: add support for XDP_TX
For reflecting packets back into the interface they came from, we create
an array of TX software BDs derived from the RX software BDs. Therefore,
we need to extend the TX software BD structure to contain most of the
stuff that's already present in the RX software BD structure, for
reasons that will become evident in a moment.
For a frame with the XDP_TX verdict, we don't reuse any buffer right
away as we do for XDP_DROP (the same page half) or XDP_PASS (the other
page half, same as the skb code path).
Because the buffer transfers ownership from the RX ring to the TX ring,
reusing any page half right away is very dangerous. So what we can do is
we can recycle the same page half as soon as TX is complete.
The code path is:
enetc_poll
-> enetc_clean_rx_ring_xdp
-> enetc_xdp_tx
-> enetc_refill_rx_ring
(time passes, another MSI interrupt is raised)
enetc_poll
-> enetc_clean_tx_ring
-> enetc_recycle_xdp_tx_buff
But that creates a problem, because there is a potentially large time
window between enetc_xdp_tx and enetc_recycle_xdp_tx_buff, period in
which we'll have less and less RX buffers.
Basically, when the ship starts sinking, the knee-jerk reaction is to
let enetc_refill_rx_ring do what it does for the standard skb code path
(refill every 16 consumed buffers), but that turns out to be very
inefficient. The problem is that we have no rx_swbd->page at our
disposal from the enetc_reuse_page path, so enetc_refill_rx_ring would
have to call enetc_new_page for every buffer that we refill (if we
choose to refill at this early stage). Very inefficient, it only makes
the problem worse, because page allocation is an expensive process, and
CPU time is exactly what we're lacking.
Additionally, there is an even bigger problem: if we let
enetc_refill_rx_ring top up the ring's buffers again from the RX path,
remember that the buffers sent to transmission haven't disappeared
anywhere. They will be eventually sent, and processed in
enetc_clean_tx_ring, and an attempt will be made to recycle them.
But surprise, the RX ring is already full of new buffers, because we
were premature in deciding that we should refill. So not only we took
the expensive decision of allocating new pages, but now we must throw
away perfectly good and reusable buffers.
So what we do is we implement an elastic refill mechanism, which keeps
track of the number of in-flight XDP_TX buffer descriptors. We top up
the RX ring only up to the total ring capacity minus the number of BDs
that are in flight (because we know that those BDs will return to us
eventually).
The enetc driver manages 1 RX ring per CPU, and the default TX ring
management is the same. So we do XDP_TX towards the TX ring of the same
index, because it is affined to the same CPU. This will probably not
produce great results when we have a tc-taprio/tc-mqprio qdisc on the
interface, because in that case, the number of TX rings might be
greater, but I didn't add any checks for that yet (mostly because I
didn't know what checks to add).
It should also be noted that we need to change the DMA mapping direction
for RX buffers, since they may now be reflected into the TX ring of the
same device. We choose to use DMA_BIDIRECTIONAL instead of unmapping and
remapping as DMA_TO_DEVICE, because performance is better this way.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:55 +03:00
|
|
|
|
struct page *page; /* valid only if is_xdp_tx */
|
|
|
|
|
u16 page_offset; /* valid only if is_xdp_tx */
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
u16 len;
|
net: enetc: add support for XDP_TX
For reflecting packets back into the interface they came from, we create
an array of TX software BDs derived from the RX software BDs. Therefore,
we need to extend the TX software BD structure to contain most of the
stuff that's already present in the RX software BD structure, for
reasons that will become evident in a moment.
For a frame with the XDP_TX verdict, we don't reuse any buffer right
away as we do for XDP_DROP (the same page half) or XDP_PASS (the other
page half, same as the skb code path).
Because the buffer transfers ownership from the RX ring to the TX ring,
reusing any page half right away is very dangerous. So what we can do is
we can recycle the same page half as soon as TX is complete.
The code path is:
enetc_poll
-> enetc_clean_rx_ring_xdp
-> enetc_xdp_tx
-> enetc_refill_rx_ring
(time passes, another MSI interrupt is raised)
enetc_poll
-> enetc_clean_tx_ring
-> enetc_recycle_xdp_tx_buff
But that creates a problem, because there is a potentially large time
window between enetc_xdp_tx and enetc_recycle_xdp_tx_buff, period in
which we'll have less and less RX buffers.
Basically, when the ship starts sinking, the knee-jerk reaction is to
let enetc_refill_rx_ring do what it does for the standard skb code path
(refill every 16 consumed buffers), but that turns out to be very
inefficient. The problem is that we have no rx_swbd->page at our
disposal from the enetc_reuse_page path, so enetc_refill_rx_ring would
have to call enetc_new_page for every buffer that we refill (if we
choose to refill at this early stage). Very inefficient, it only makes
the problem worse, because page allocation is an expensive process, and
CPU time is exactly what we're lacking.
Additionally, there is an even bigger problem: if we let
enetc_refill_rx_ring top up the ring's buffers again from the RX path,
remember that the buffers sent to transmission haven't disappeared
anywhere. They will be eventually sent, and processed in
enetc_clean_tx_ring, and an attempt will be made to recycle them.
But surprise, the RX ring is already full of new buffers, because we
were premature in deciding that we should refill. So not only we took
the expensive decision of allocating new pages, but now we must throw
away perfectly good and reusable buffers.
So what we do is we implement an elastic refill mechanism, which keeps
track of the number of in-flight XDP_TX buffer descriptors. We top up
the RX ring only up to the total ring capacity minus the number of BDs
that are in flight (because we know that those BDs will return to us
eventually).
The enetc driver manages 1 RX ring per CPU, and the default TX ring
management is the same. So we do XDP_TX towards the TX ring of the same
index, because it is affined to the same CPU. This will probably not
produce great results when we have a tc-taprio/tc-mqprio qdisc on the
interface, because in that case, the number of TX rings might be
greater, but I didn't add any checks for that yet (mostly because I
didn't know what checks to add).
It should also be noted that we need to change the DMA mapping direction
for RX buffers, since they may now be reflected into the TX ring of the
same device. We choose to use DMA_BIDIRECTIONAL instead of unmapping and
remapping as DMA_TO_DEVICE, because performance is better this way.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:55 +03:00
|
|
|
|
enum dma_data_direction dir;
|
2019-05-23 02:33:29 +00:00
|
|
|
|
u8 is_dma_page:1;
|
|
|
|
|
u8 check_wb:1;
|
2021-04-12 17:03:27 +08:00
|
|
|
|
u8 do_twostep_tstamp:1;
|
2021-03-31 23:08:51 +03:00
|
|
|
|
u8 is_eof:1;
|
net: enetc: add support for XDP_TX
For reflecting packets back into the interface they came from, we create
an array of TX software BDs derived from the RX software BDs. Therefore,
we need to extend the TX software BD structure to contain most of the
stuff that's already present in the RX software BD structure, for
reasons that will become evident in a moment.
For a frame with the XDP_TX verdict, we don't reuse any buffer right
away as we do for XDP_DROP (the same page half) or XDP_PASS (the other
page half, same as the skb code path).
Because the buffer transfers ownership from the RX ring to the TX ring,
reusing any page half right away is very dangerous. So what we can do is
we can recycle the same page half as soon as TX is complete.
The code path is:
enetc_poll
-> enetc_clean_rx_ring_xdp
-> enetc_xdp_tx
-> enetc_refill_rx_ring
(time passes, another MSI interrupt is raised)
enetc_poll
-> enetc_clean_tx_ring
-> enetc_recycle_xdp_tx_buff
But that creates a problem, because there is a potentially large time
window between enetc_xdp_tx and enetc_recycle_xdp_tx_buff, period in
which we'll have less and less RX buffers.
Basically, when the ship starts sinking, the knee-jerk reaction is to
let enetc_refill_rx_ring do what it does for the standard skb code path
(refill every 16 consumed buffers), but that turns out to be very
inefficient. The problem is that we have no rx_swbd->page at our
disposal from the enetc_reuse_page path, so enetc_refill_rx_ring would
have to call enetc_new_page for every buffer that we refill (if we
choose to refill at this early stage). Very inefficient, it only makes
the problem worse, because page allocation is an expensive process, and
CPU time is exactly what we're lacking.
Additionally, there is an even bigger problem: if we let
enetc_refill_rx_ring top up the ring's buffers again from the RX path,
remember that the buffers sent to transmission haven't disappeared
anywhere. They will be eventually sent, and processed in
enetc_clean_tx_ring, and an attempt will be made to recycle them.
But surprise, the RX ring is already full of new buffers, because we
were premature in deciding that we should refill. So not only we took
the expensive decision of allocating new pages, but now we must throw
away perfectly good and reusable buffers.
So what we do is we implement an elastic refill mechanism, which keeps
track of the number of in-flight XDP_TX buffer descriptors. We top up
the RX ring only up to the total ring capacity minus the number of BDs
that are in flight (because we know that those BDs will return to us
eventually).
The enetc driver manages 1 RX ring per CPU, and the default TX ring
management is the same. So we do XDP_TX towards the TX ring of the same
index, because it is affined to the same CPU. This will probably not
produce great results when we have a tc-taprio/tc-mqprio qdisc on the
interface, because in that case, the number of TX rings might be
greater, but I didn't add any checks for that yet (mostly because I
didn't know what checks to add).
It should also be noted that we need to change the DMA mapping direction
for RX buffers, since they may now be reflected into the TX ring of the
same device. We choose to use DMA_BIDIRECTIONAL instead of unmapping and
remapping as DMA_TO_DEVICE, because performance is better this way.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:55 +03:00
|
|
|
|
u8 is_xdp_tx:1;
|
2021-03-31 23:08:57 +03:00
|
|
|
|
u8 is_xdp_redirect:1;
|
2022-05-10 19:36:15 +03:00
|
|
|
|
u8 qbv_en:1;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
};
|
|
|
|
|
|
net: enetc: add LSO support for i.MX95 ENETC PF
ENETC rev 4.1 supports large send offload (LSO), segmenting large TCP
and UDP transmit units into multiple Ethernet frames. To support LSO,
software needs to fill some auxiliary information in Tx BD, such as LSO
header length, frame length, LSO maximum segment size, etc.
At 1Gbps link rate, TCP segmentation was tested using iperf3, and the
CPU performance before and after applying the patch was compared through
the top command. It can be seen that LSO saves a significant amount of
CPU cycles compared to software TSO.
Before applying the patch:
%Cpu(s): 0.1 us, 4.1 sy, 0.0 ni, 85.7 id, 0.0 wa, 0.5 hi, 9.7 si
After applying the patch:
%Cpu(s): 0.1 us, 2.3 sy, 0.0 ni, 94.5 id, 0.0 wa, 0.4 hi, 2.6 si
Signed-off-by: Wei Fang <wei.fang@nxp.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Reviewed-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Link: https://patch.msgid.link/20241219054755.1615626-4-wei.fang@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-12-19 13:47:54 +08:00
|
|
|
|
struct enetc_lso_t {
|
|
|
|
|
bool ipv6;
|
|
|
|
|
bool tcp;
|
|
|
|
|
u8 l3_hdr_len;
|
|
|
|
|
u8 hdr_len; /* LSO header length */
|
|
|
|
|
u8 l3_start;
|
|
|
|
|
u16 lso_seg_size;
|
|
|
|
|
int total_len; /* total data length, not include LSO header */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define ENETC_LSO_MAX_DATA_LEN SZ_256K
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
|
|
|
|
|
#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
|
|
|
|
|
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
|
|
|
|
|
#define ENETC_RXB_DMA_SIZE \
|
|
|
|
|
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
|
net: enetc: add support for XDP_DROP and XDP_PASS
For the RX ring, enetc uses an allocation scheme based on pages split
into two buffers, which is already very efficient in terms of preventing
reallocations / maximizing reuse, so I see no reason why I would change
that.
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half B | half B | half B | half B | half B | half B | half B |
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half A | half A | half A | half A | half A | half A | half A | RX ring
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
^ ^
| |
next_to_clean next_to_alloc
next_to_use
+--------+--------+--------+--------+--------+
| | | | | |
| half B | half B | half B | half B | half B |
| | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half B | half B | half A | half A | half A | half A | half A | RX ring
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | ^ ^
| half A | half A | | |
| | | next_to_clean next_to_use
+--------+--------+
^
|
next_to_alloc
then when enetc_refill_rx_ring is called, whose purpose is to advance
next_to_use, it sees that it can take buffers up to next_to_alloc, and
it says "oh, hey, rx_swbd->page isn't NULL, I don't need to allocate
one!".
The only problem is that for default PAGE_SIZE values of 4096, buffer
sizes are 2048 bytes. While this is enough for normal skb allocations at
an MTU of 1500 bytes, for XDP it isn't, because the XDP headroom is 256
bytes, and including skb_shared_info and alignment, we end up being able
to make use of only 1472 bytes, which is insufficient for the default
MTU.
To solve that problem, we implement scatter/gather processing in the
driver, because we would really like to keep the existing allocation
scheme. A packet of 1500 bytes is received in a buffer of 1472 bytes and
another one of 28 bytes.
Because the headroom required by XDP is different (and much larger) than
the one required by the network stack, whenever a BPF program is added
or deleted on the port, we drain the existing RX buffers and seed new
ones with the required headroom. We also keep the required headroom in
rx_ring->buffer_offset.
The simplest way to implement XDP_PASS, where an skb must be created, is
to create an xdp_buff based on the next_to_clean RX BDs, but not clear
those BDs from the RX ring yet, just keep the original index at which
the BDs for this frame started. Then, if the verdict is XDP_PASS,
instead of converting the xdb_buff to an skb, we replay a call to
enetc_build_skb (just as in the normal enetc_clean_rx_ring case),
starting from the original BD index.
We would also like to be minimally invasive to the regular RX data path,
and not check whether there is a BPF program attached to the ring on
every packet. So we create a separate RX ring processing function for
XDP.
Because we only install/remove the BPF program while the interface is
down, we forgo the rcu_read_lock() in enetc_clean_rx_ring, since there
shouldn't be any circumstance in which we are processing packets and
there is a potentially freed BPF program attached to the RX ring.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:54 +03:00
|
|
|
|
#define ENETC_RXB_DMA_SIZE_XDP \
|
|
|
|
|
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
|
|
|
|
struct enetc_rx_swbd {
|
|
|
|
|
dma_addr_t dma;
|
|
|
|
|
struct page *page;
|
|
|
|
|
u16 page_offset;
|
net: enetc: add support for XDP_TX
For reflecting packets back into the interface they came from, we create
an array of TX software BDs derived from the RX software BDs. Therefore,
we need to extend the TX software BD structure to contain most of the
stuff that's already present in the RX software BD structure, for
reasons that will become evident in a moment.
For a frame with the XDP_TX verdict, we don't reuse any buffer right
away as we do for XDP_DROP (the same page half) or XDP_PASS (the other
page half, same as the skb code path).
Because the buffer transfers ownership from the RX ring to the TX ring,
reusing any page half right away is very dangerous. So what we can do is
we can recycle the same page half as soon as TX is complete.
The code path is:
enetc_poll
-> enetc_clean_rx_ring_xdp
-> enetc_xdp_tx
-> enetc_refill_rx_ring
(time passes, another MSI interrupt is raised)
enetc_poll
-> enetc_clean_tx_ring
-> enetc_recycle_xdp_tx_buff
But that creates a problem, because there is a potentially large time
window between enetc_xdp_tx and enetc_recycle_xdp_tx_buff, period in
which we'll have less and less RX buffers.
Basically, when the ship starts sinking, the knee-jerk reaction is to
let enetc_refill_rx_ring do what it does for the standard skb code path
(refill every 16 consumed buffers), but that turns out to be very
inefficient. The problem is that we have no rx_swbd->page at our
disposal from the enetc_reuse_page path, so enetc_refill_rx_ring would
have to call enetc_new_page for every buffer that we refill (if we
choose to refill at this early stage). Very inefficient, it only makes
the problem worse, because page allocation is an expensive process, and
CPU time is exactly what we're lacking.
Additionally, there is an even bigger problem: if we let
enetc_refill_rx_ring top up the ring's buffers again from the RX path,
remember that the buffers sent to transmission haven't disappeared
anywhere. They will be eventually sent, and processed in
enetc_clean_tx_ring, and an attempt will be made to recycle them.
But surprise, the RX ring is already full of new buffers, because we
were premature in deciding that we should refill. So not only we took
the expensive decision of allocating new pages, but now we must throw
away perfectly good and reusable buffers.
So what we do is we implement an elastic refill mechanism, which keeps
track of the number of in-flight XDP_TX buffer descriptors. We top up
the RX ring only up to the total ring capacity minus the number of BDs
that are in flight (because we know that those BDs will return to us
eventually).
The enetc driver manages 1 RX ring per CPU, and the default TX ring
management is the same. So we do XDP_TX towards the TX ring of the same
index, because it is affined to the same CPU. This will probably not
produce great results when we have a tc-taprio/tc-mqprio qdisc on the
interface, because in that case, the number of TX rings might be
greater, but I didn't add any checks for that yet (mostly because I
didn't know what checks to add).
It should also be noted that we need to change the DMA mapping direction
for RX buffers, since they may now be reflected into the TX ring of the
same device. We choose to use DMA_BIDIRECTIONAL instead of unmapping and
remapping as DMA_TO_DEVICE, because performance is better this way.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:55 +03:00
|
|
|
|
enum dma_data_direction dir;
|
|
|
|
|
u16 len;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
};
|
|
|
|
|
|
net: enetc: add support for XDP_TX
For reflecting packets back into the interface they came from, we create
an array of TX software BDs derived from the RX software BDs. Therefore,
we need to extend the TX software BD structure to contain most of the
stuff that's already present in the RX software BD structure, for
reasons that will become evident in a moment.
For a frame with the XDP_TX verdict, we don't reuse any buffer right
away as we do for XDP_DROP (the same page half) or XDP_PASS (the other
page half, same as the skb code path).
Because the buffer transfers ownership from the RX ring to the TX ring,
reusing any page half right away is very dangerous. So what we can do is
we can recycle the same page half as soon as TX is complete.
The code path is:
enetc_poll
-> enetc_clean_rx_ring_xdp
-> enetc_xdp_tx
-> enetc_refill_rx_ring
(time passes, another MSI interrupt is raised)
enetc_poll
-> enetc_clean_tx_ring
-> enetc_recycle_xdp_tx_buff
But that creates a problem, because there is a potentially large time
window between enetc_xdp_tx and enetc_recycle_xdp_tx_buff, period in
which we'll have less and less RX buffers.
Basically, when the ship starts sinking, the knee-jerk reaction is to
let enetc_refill_rx_ring do what it does for the standard skb code path
(refill every 16 consumed buffers), but that turns out to be very
inefficient. The problem is that we have no rx_swbd->page at our
disposal from the enetc_reuse_page path, so enetc_refill_rx_ring would
have to call enetc_new_page for every buffer that we refill (if we
choose to refill at this early stage). Very inefficient, it only makes
the problem worse, because page allocation is an expensive process, and
CPU time is exactly what we're lacking.
Additionally, there is an even bigger problem: if we let
enetc_refill_rx_ring top up the ring's buffers again from the RX path,
remember that the buffers sent to transmission haven't disappeared
anywhere. They will be eventually sent, and processed in
enetc_clean_tx_ring, and an attempt will be made to recycle them.
But surprise, the RX ring is already full of new buffers, because we
were premature in deciding that we should refill. So not only we took
the expensive decision of allocating new pages, but now we must throw
away perfectly good and reusable buffers.
So what we do is we implement an elastic refill mechanism, which keeps
track of the number of in-flight XDP_TX buffer descriptors. We top up
the RX ring only up to the total ring capacity minus the number of BDs
that are in flight (because we know that those BDs will return to us
eventually).
The enetc driver manages 1 RX ring per CPU, and the default TX ring
management is the same. So we do XDP_TX towards the TX ring of the same
index, because it is affined to the same CPU. This will probably not
produce great results when we have a tc-taprio/tc-mqprio qdisc on the
interface, because in that case, the number of TX rings might be
greater, but I didn't add any checks for that yet (mostly because I
didn't know what checks to add).
It should also be noted that we need to change the DMA mapping direction
for RX buffers, since they may now be reflected into the TX ring of the
same device. We choose to use DMA_BIDIRECTIONAL instead of unmapping and
remapping as DMA_TO_DEVICE, because performance is better this way.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:55 +03:00
|
|
|
|
/* ENETC overhead: optional extension BD + 1 BD gap */
|
|
|
|
|
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
|
2024-12-19 13:47:53 +08:00
|
|
|
|
/* For LS1028A, max # of chained Tx BDs is 15, including head and
|
|
|
|
|
* extension BD.
|
|
|
|
|
*/
|
net: enetc: add support for XDP_TX
For reflecting packets back into the interface they came from, we create
an array of TX software BDs derived from the RX software BDs. Therefore,
we need to extend the TX software BD structure to contain most of the
stuff that's already present in the RX software BD structure, for
reasons that will become evident in a moment.
For a frame with the XDP_TX verdict, we don't reuse any buffer right
away as we do for XDP_DROP (the same page half) or XDP_PASS (the other
page half, same as the skb code path).
Because the buffer transfers ownership from the RX ring to the TX ring,
reusing any page half right away is very dangerous. So what we can do is
we can recycle the same page half as soon as TX is complete.
The code path is:
enetc_poll
-> enetc_clean_rx_ring_xdp
-> enetc_xdp_tx
-> enetc_refill_rx_ring
(time passes, another MSI interrupt is raised)
enetc_poll
-> enetc_clean_tx_ring
-> enetc_recycle_xdp_tx_buff
But that creates a problem, because there is a potentially large time
window between enetc_xdp_tx and enetc_recycle_xdp_tx_buff, period in
which we'll have less and less RX buffers.
Basically, when the ship starts sinking, the knee-jerk reaction is to
let enetc_refill_rx_ring do what it does for the standard skb code path
(refill every 16 consumed buffers), but that turns out to be very
inefficient. The problem is that we have no rx_swbd->page at our
disposal from the enetc_reuse_page path, so enetc_refill_rx_ring would
have to call enetc_new_page for every buffer that we refill (if we
choose to refill at this early stage). Very inefficient, it only makes
the problem worse, because page allocation is an expensive process, and
CPU time is exactly what we're lacking.
Additionally, there is an even bigger problem: if we let
enetc_refill_rx_ring top up the ring's buffers again from the RX path,
remember that the buffers sent to transmission haven't disappeared
anywhere. They will be eventually sent, and processed in
enetc_clean_tx_ring, and an attempt will be made to recycle them.
But surprise, the RX ring is already full of new buffers, because we
were premature in deciding that we should refill. So not only we took
the expensive decision of allocating new pages, but now we must throw
away perfectly good and reusable buffers.
So what we do is we implement an elastic refill mechanism, which keeps
track of the number of in-flight XDP_TX buffer descriptors. We top up
the RX ring only up to the total ring capacity minus the number of BDs
that are in flight (because we know that those BDs will return to us
eventually).
The enetc driver manages 1 RX ring per CPU, and the default TX ring
management is the same. So we do XDP_TX towards the TX ring of the same
index, because it is affined to the same CPU. This will probably not
produce great results when we have a tc-taprio/tc-mqprio qdisc on the
interface, because in that case, the number of TX rings might be
greater, but I didn't add any checks for that yet (mostly because I
didn't know what checks to add).
It should also be noted that we need to change the DMA mapping direction
for RX buffers, since they may now be reflected into the TX ring of the
same device. We choose to use DMA_BIDIRECTIONAL instead of unmapping and
remapping as DMA_TO_DEVICE, because performance is better this way.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:55 +03:00
|
|
|
|
#define ENETC_MAX_SKB_FRAGS 13
|
2024-12-19 13:47:53 +08:00
|
|
|
|
/* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
|
|
|
|
|
* including head and extension BD, but the range of MAX_SKB_FRAGS
|
|
|
|
|
* is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
|
|
|
|
|
*/
|
|
|
|
|
#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS
|
|
|
|
|
#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
|
net: enetc: add support for XDP_TX
For reflecting packets back into the interface they came from, we create
an array of TX software BDs derived from the RX software BDs. Therefore,
we need to extend the TX software BD structure to contain most of the
stuff that's already present in the RX software BD structure, for
reasons that will become evident in a moment.
For a frame with the XDP_TX verdict, we don't reuse any buffer right
away as we do for XDP_DROP (the same page half) or XDP_PASS (the other
page half, same as the skb code path).
Because the buffer transfers ownership from the RX ring to the TX ring,
reusing any page half right away is very dangerous. So what we can do is
we can recycle the same page half as soon as TX is complete.
The code path is:
enetc_poll
-> enetc_clean_rx_ring_xdp
-> enetc_xdp_tx
-> enetc_refill_rx_ring
(time passes, another MSI interrupt is raised)
enetc_poll
-> enetc_clean_tx_ring
-> enetc_recycle_xdp_tx_buff
But that creates a problem, because there is a potentially large time
window between enetc_xdp_tx and enetc_recycle_xdp_tx_buff, period in
which we'll have less and less RX buffers.
Basically, when the ship starts sinking, the knee-jerk reaction is to
let enetc_refill_rx_ring do what it does for the standard skb code path
(refill every 16 consumed buffers), but that turns out to be very
inefficient. The problem is that we have no rx_swbd->page at our
disposal from the enetc_reuse_page path, so enetc_refill_rx_ring would
have to call enetc_new_page for every buffer that we refill (if we
choose to refill at this early stage). Very inefficient, it only makes
the problem worse, because page allocation is an expensive process, and
CPU time is exactly what we're lacking.
Additionally, there is an even bigger problem: if we let
enetc_refill_rx_ring top up the ring's buffers again from the RX path,
remember that the buffers sent to transmission haven't disappeared
anywhere. They will be eventually sent, and processed in
enetc_clean_tx_ring, and an attempt will be made to recycle them.
But surprise, the RX ring is already full of new buffers, because we
were premature in deciding that we should refill. So not only we took
the expensive decision of allocating new pages, but now we must throw
away perfectly good and reusable buffers.
So what we do is we implement an elastic refill mechanism, which keeps
track of the number of in-flight XDP_TX buffer descriptors. We top up
the RX ring only up to the total ring capacity minus the number of BDs
that are in flight (because we know that those BDs will return to us
eventually).
The enetc driver manages 1 RX ring per CPU, and the default TX ring
management is the same. So we do XDP_TX towards the TX ring of the same
index, because it is affined to the same CPU. This will probably not
produce great results when we have a tc-taprio/tc-mqprio qdisc on the
interface, because in that case, the number of TX rings might be
greater, but I didn't add any checks for that yet (mostly because I
didn't know what checks to add).
It should also be noted that we need to change the DMA mapping direction
for RX buffers, since they may now be reflected into the TX ring of the
same device. We choose to use DMA_BIDIRECTIONAL instead of unmapping and
remapping as DMA_TO_DEVICE, because performance is better this way.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:55 +03:00
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
struct enetc_ring_stats {
|
2025-06-27 10:11:06 +08:00
|
|
|
|
unsigned long packets;
|
|
|
|
|
unsigned long bytes;
|
|
|
|
|
unsigned long rx_alloc_errs;
|
|
|
|
|
unsigned long xdp_drops;
|
|
|
|
|
unsigned long xdp_tx;
|
|
|
|
|
unsigned long xdp_tx_drops;
|
|
|
|
|
unsigned long xdp_redirect;
|
|
|
|
|
unsigned long xdp_redirect_failures;
|
|
|
|
|
unsigned long recycles;
|
|
|
|
|
unsigned long recycle_failures;
|
|
|
|
|
unsigned long win_drop;
|
net: enetc: add support for XDP_DROP and XDP_PASS
For the RX ring, enetc uses an allocation scheme based on pages split
into two buffers, which is already very efficient in terms of preventing
reallocations / maximizing reuse, so I see no reason why I would change
that.
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half B | half B | half B | half B | half B | half B | half B |
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half A | half A | half A | half A | half A | half A | half A | RX ring
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
^ ^
| |
next_to_clean next_to_alloc
next_to_use
+--------+--------+--------+--------+--------+
| | | | | |
| half B | half B | half B | half B | half B |
| | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half B | half B | half A | half A | half A | half A | half A | RX ring
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | ^ ^
| half A | half A | | |
| | | next_to_clean next_to_use
+--------+--------+
^
|
next_to_alloc
then when enetc_refill_rx_ring is called, whose purpose is to advance
next_to_use, it sees that it can take buffers up to next_to_alloc, and
it says "oh, hey, rx_swbd->page isn't NULL, I don't need to allocate
one!".
The only problem is that for default PAGE_SIZE values of 4096, buffer
sizes are 2048 bytes. While this is enough for normal skb allocations at
an MTU of 1500 bytes, for XDP it isn't, because the XDP headroom is 256
bytes, and including skb_shared_info and alignment, we end up being able
to make use of only 1472 bytes, which is insufficient for the default
MTU.
To solve that problem, we implement scatter/gather processing in the
driver, because we would really like to keep the existing allocation
scheme. A packet of 1500 bytes is received in a buffer of 1472 bytes and
another one of 28 bytes.
Because the headroom required by XDP is different (and much larger) than
the one required by the network stack, whenever a BPF program is added
or deleted on the port, we drain the existing RX buffers and seed new
ones with the required headroom. We also keep the required headroom in
rx_ring->buffer_offset.
The simplest way to implement XDP_PASS, where an skb must be created, is
to create an xdp_buff based on the next_to_clean RX BDs, but not clear
those BDs from the RX ring yet, just keep the original index at which
the BDs for this frame started. Then, if the verdict is XDP_PASS,
instead of converting the xdb_buff to an skb, we replay a call to
enetc_build_skb (just as in the normal enetc_clean_rx_ring case),
starting from the original BD index.
We would also like to be minimally invasive to the regular RX data path,
and not check whether there is a BPF program attached to the ring on
every packet. So we create a separate RX ring processing function for
XDP.
Because we only install/remove the BPF program while the interface is
down, we forgo the rcu_read_lock() in enetc_clean_rx_ring, since there
shouldn't be any circumstance in which we are processing packets and
there is a potentially freed BPF program attached to the RX ring.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:54 +03:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct enetc_xdp_data {
|
|
|
|
|
struct xdp_rxq_info rxq;
|
|
|
|
|
struct bpf_prog *prog;
|
net: enetc: add support for XDP_TX
For reflecting packets back into the interface they came from, we create
an array of TX software BDs derived from the RX software BDs. Therefore,
we need to extend the TX software BD structure to contain most of the
stuff that's already present in the RX software BD structure, for
reasons that will become evident in a moment.
For a frame with the XDP_TX verdict, we don't reuse any buffer right
away as we do for XDP_DROP (the same page half) or XDP_PASS (the other
page half, same as the skb code path).
Because the buffer transfers ownership from the RX ring to the TX ring,
reusing any page half right away is very dangerous. So what we can do is
we can recycle the same page half as soon as TX is complete.
The code path is:
enetc_poll
-> enetc_clean_rx_ring_xdp
-> enetc_xdp_tx
-> enetc_refill_rx_ring
(time passes, another MSI interrupt is raised)
enetc_poll
-> enetc_clean_tx_ring
-> enetc_recycle_xdp_tx_buff
But that creates a problem, because there is a potentially large time
window between enetc_xdp_tx and enetc_recycle_xdp_tx_buff, period in
which we'll have less and less RX buffers.
Basically, when the ship starts sinking, the knee-jerk reaction is to
let enetc_refill_rx_ring do what it does for the standard skb code path
(refill every 16 consumed buffers), but that turns out to be very
inefficient. The problem is that we have no rx_swbd->page at our
disposal from the enetc_reuse_page path, so enetc_refill_rx_ring would
have to call enetc_new_page for every buffer that we refill (if we
choose to refill at this early stage). Very inefficient, it only makes
the problem worse, because page allocation is an expensive process, and
CPU time is exactly what we're lacking.
Additionally, there is an even bigger problem: if we let
enetc_refill_rx_ring top up the ring's buffers again from the RX path,
remember that the buffers sent to transmission haven't disappeared
anywhere. They will be eventually sent, and processed in
enetc_clean_tx_ring, and an attempt will be made to recycle them.
But surprise, the RX ring is already full of new buffers, because we
were premature in deciding that we should refill. So not only we took
the expensive decision of allocating new pages, but now we must throw
away perfectly good and reusable buffers.
So what we do is we implement an elastic refill mechanism, which keeps
track of the number of in-flight XDP_TX buffer descriptors. We top up
the RX ring only up to the total ring capacity minus the number of BDs
that are in flight (because we know that those BDs will return to us
eventually).
The enetc driver manages 1 RX ring per CPU, and the default TX ring
management is the same. So we do XDP_TX towards the TX ring of the same
index, because it is affined to the same CPU. This will probably not
produce great results when we have a tc-taprio/tc-mqprio qdisc on the
interface, because in that case, the number of TX rings might be
greater, but I didn't add any checks for that yet (mostly because I
didn't know what checks to add).
It should also be noted that we need to change the DMA mapping direction
for RX buffers, since they may now be reflected into the TX ring of the
same device. We choose to use DMA_BIDIRECTIONAL instead of unmapping and
remapping as DMA_TO_DEVICE, because performance is better this way.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:55 +03:00
|
|
|
|
int xdp_tx_in_flight;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
};
|
|
|
|
|
|
2021-03-31 23:08:56 +03:00
|
|
|
|
#define ENETC_RX_RING_DEFAULT_SIZE 2048
|
2021-04-17 00:22:21 +03:00
|
|
|
|
#define ENETC_TX_RING_DEFAULT_SIZE 2048
|
2020-07-21 10:55:17 +03:00
|
|
|
|
#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2)
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
net: enetc: split ring resource allocation from assignment
We have a few instances in the enetc driver where the ring resources
(BD ring iomem, software BD ring, software TSO headers, basically
everything except RX buffers) need to be reallocated. For example, when
RX timestamping is enabled, the RX BD format changes to an extended one
(twice as large).
Currently, this is done using a simplistic enetc_close() -> enetc_open()
procedure. But this is quite crude, since it also invokes phylink_stop()
-> phylink_start(), the link is lost, and a few seconds need to pass for
autoneg to complete again.
In fact it's bad also due to the improper (yolo) error checking. In case
we fail to allocate new resources, we've already freed the old ones, so
the interface is more or less stuck.
To avoid that, we need a system where reconfiguration is possible in a
way in which resources are allocated upfront. This means that there will
be a higher memory usage temporarily, but the assignment of resources to
rings can be done when both the old and new resources are still available.
Introduce a struct enetc_bdr_resource which holds the resources for a
ring, be it RX or TX. This structure duplicates a lot of fields from
struct enetc_bdr (and access to the same fields in the ring structure
was left duplicated, to not change cache characteristics in the fast
path).
When enetc_alloc_tx_resources() runs, it returns an array of resource
elements (one per TX ring), in addition to the existing priv->tx_res.
To populate priv->tx_res with that array, one must call
enetc_assign_tx_resources(), and this also frees the old resources.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-01-18 01:02:29 +02:00
|
|
|
|
struct enetc_bdr_resource {
|
|
|
|
|
/* Input arguments saved for teardown */
|
|
|
|
|
struct device *dev; /* for DMA mapping */
|
|
|
|
|
size_t bd_count;
|
|
|
|
|
size_t bd_size;
|
|
|
|
|
|
|
|
|
|
/* Resource proper */
|
|
|
|
|
void *bd_base; /* points to Rx or Tx BD ring */
|
|
|
|
|
dma_addr_t bd_dma_base;
|
|
|
|
|
union {
|
|
|
|
|
struct enetc_tx_swbd *tx_swbd;
|
|
|
|
|
struct enetc_rx_swbd *rx_swbd;
|
|
|
|
|
};
|
|
|
|
|
char *tso_headers;
|
|
|
|
|
dma_addr_t tso_headers_dma;
|
|
|
|
|
};
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
struct enetc_bdr {
|
|
|
|
|
struct device *dev; /* for DMA mapping */
|
|
|
|
|
struct net_device *ndev;
|
|
|
|
|
void *bd_base; /* points to Rx or Tx BD ring */
|
|
|
|
|
union {
|
|
|
|
|
void __iomem *tpir;
|
|
|
|
|
void __iomem *rcir;
|
|
|
|
|
};
|
|
|
|
|
u16 index;
|
2022-11-22 15:09:36 +02:00
|
|
|
|
u16 prio;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
int bd_count; /* # of BDs */
|
|
|
|
|
int next_to_use;
|
|
|
|
|
int next_to_clean;
|
|
|
|
|
union {
|
|
|
|
|
struct enetc_tx_swbd *tx_swbd;
|
|
|
|
|
struct enetc_rx_swbd *rx_swbd;
|
|
|
|
|
};
|
|
|
|
|
union {
|
|
|
|
|
void __iomem *tcir; /* Tx */
|
|
|
|
|
int next_to_alloc; /* Rx */
|
|
|
|
|
};
|
|
|
|
|
void __iomem *idr; /* Interrupt Detect Register pointer */
|
|
|
|
|
|
net: enetc: add support for XDP_DROP and XDP_PASS
For the RX ring, enetc uses an allocation scheme based on pages split
into two buffers, which is already very efficient in terms of preventing
reallocations / maximizing reuse, so I see no reason why I would change
that.
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half B | half B | half B | half B | half B | half B | half B |
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half A | half A | half A | half A | half A | half A | half A | RX ring
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
^ ^
| |
next_to_clean next_to_alloc
next_to_use
+--------+--------+--------+--------+--------+
| | | | | |
| half B | half B | half B | half B | half B |
| | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half B | half B | half A | half A | half A | half A | half A | RX ring
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | ^ ^
| half A | half A | | |
| | | next_to_clean next_to_use
+--------+--------+
^
|
next_to_alloc
then when enetc_refill_rx_ring is called, whose purpose is to advance
next_to_use, it sees that it can take buffers up to next_to_alloc, and
it says "oh, hey, rx_swbd->page isn't NULL, I don't need to allocate
one!".
The only problem is that for default PAGE_SIZE values of 4096, buffer
sizes are 2048 bytes. While this is enough for normal skb allocations at
an MTU of 1500 bytes, for XDP it isn't, because the XDP headroom is 256
bytes, and including skb_shared_info and alignment, we end up being able
to make use of only 1472 bytes, which is insufficient for the default
MTU.
To solve that problem, we implement scatter/gather processing in the
driver, because we would really like to keep the existing allocation
scheme. A packet of 1500 bytes is received in a buffer of 1472 bytes and
another one of 28 bytes.
Because the headroom required by XDP is different (and much larger) than
the one required by the network stack, whenever a BPF program is added
or deleted on the port, we drain the existing RX buffers and seed new
ones with the required headroom. We also keep the required headroom in
rx_ring->buffer_offset.
The simplest way to implement XDP_PASS, where an skb must be created, is
to create an xdp_buff based on the next_to_clean RX BDs, but not clear
those BDs from the RX ring yet, just keep the original index at which
the BDs for this frame started. Then, if the verdict is XDP_PASS,
instead of converting the xdb_buff to an skb, we replay a call to
enetc_build_skb (just as in the normal enetc_clean_rx_ring case),
starting from the original BD index.
We would also like to be minimally invasive to the regular RX data path,
and not check whether there is a BPF program attached to the ring on
every packet. So we create a separate RX ring processing function for
XDP.
Because we only install/remove the BPF program while the interface is
down, we forgo the rcu_read_lock() in enetc_clean_rx_ring, since there
shouldn't be any circumstance in which we are processing packets and
there is a potentially freed BPF program attached to the RX ring.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:54 +03:00
|
|
|
|
int buffer_offset;
|
|
|
|
|
struct enetc_xdp_data xdp;
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
struct enetc_ring_stats stats;
|
|
|
|
|
|
|
|
|
|
dma_addr_t bd_dma_base;
|
2020-01-02 04:59:24 +00:00
|
|
|
|
u8 tsd_enable; /* Time specific departure */
|
2020-03-10 14:51:24 +02:00
|
|
|
|
bool ext_en; /* enable h/w descriptor extensions */
|
2021-10-07 18:30:43 +03:00
|
|
|
|
|
|
|
|
|
/* DMA buffer for TSO headers */
|
|
|
|
|
char *tso_headers;
|
|
|
|
|
dma_addr_t tso_headers_dma;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
|
|
|
|
|
|
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
|
|
|
|
|
{
|
|
|
|
|
if (unlikely(++*i == bdr->bd_count))
|
|
|
|
|
*i = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int enetc_bd_unused(struct enetc_bdr *bdr)
|
|
|
|
|
{
|
|
|
|
|
if (bdr->next_to_clean > bdr->next_to_use)
|
|
|
|
|
return bdr->next_to_clean - bdr->next_to_use - 1;
|
|
|
|
|
|
|
|
|
|
return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
|
|
|
|
|
}
|
|
|
|
|
|
net: enetc: add support for XDP_TX
For reflecting packets back into the interface they came from, we create
an array of TX software BDs derived from the RX software BDs. Therefore,
we need to extend the TX software BD structure to contain most of the
stuff that's already present in the RX software BD structure, for
reasons that will become evident in a moment.
For a frame with the XDP_TX verdict, we don't reuse any buffer right
away as we do for XDP_DROP (the same page half) or XDP_PASS (the other
page half, same as the skb code path).
Because the buffer transfers ownership from the RX ring to the TX ring,
reusing any page half right away is very dangerous. So what we can do is
we can recycle the same page half as soon as TX is complete.
The code path is:
enetc_poll
-> enetc_clean_rx_ring_xdp
-> enetc_xdp_tx
-> enetc_refill_rx_ring
(time passes, another MSI interrupt is raised)
enetc_poll
-> enetc_clean_tx_ring
-> enetc_recycle_xdp_tx_buff
But that creates a problem, because there is a potentially large time
window between enetc_xdp_tx and enetc_recycle_xdp_tx_buff, period in
which we'll have less and less RX buffers.
Basically, when the ship starts sinking, the knee-jerk reaction is to
let enetc_refill_rx_ring do what it does for the standard skb code path
(refill every 16 consumed buffers), but that turns out to be very
inefficient. The problem is that we have no rx_swbd->page at our
disposal from the enetc_reuse_page path, so enetc_refill_rx_ring would
have to call enetc_new_page for every buffer that we refill (if we
choose to refill at this early stage). Very inefficient, it only makes
the problem worse, because page allocation is an expensive process, and
CPU time is exactly what we're lacking.
Additionally, there is an even bigger problem: if we let
enetc_refill_rx_ring top up the ring's buffers again from the RX path,
remember that the buffers sent to transmission haven't disappeared
anywhere. They will be eventually sent, and processed in
enetc_clean_tx_ring, and an attempt will be made to recycle them.
But surprise, the RX ring is already full of new buffers, because we
were premature in deciding that we should refill. So not only we took
the expensive decision of allocating new pages, but now we must throw
away perfectly good and reusable buffers.
So what we do is we implement an elastic refill mechanism, which keeps
track of the number of in-flight XDP_TX buffer descriptors. We top up
the RX ring only up to the total ring capacity minus the number of BDs
that are in flight (because we know that those BDs will return to us
eventually).
The enetc driver manages 1 RX ring per CPU, and the default TX ring
management is the same. So we do XDP_TX towards the TX ring of the same
index, because it is affined to the same CPU. This will probably not
produce great results when we have a tc-taprio/tc-mqprio qdisc on the
interface, because in that case, the number of TX rings might be
greater, but I didn't add any checks for that yet (mostly because I
didn't know what checks to add).
It should also be noted that we need to change the DMA mapping direction
for RX buffers, since they may now be reflected into the TX ring of the
same device. We choose to use DMA_BIDIRECTIONAL instead of unmapping and
remapping as DMA_TO_DEVICE, because performance is better this way.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:55 +03:00
|
|
|
|
static inline int enetc_swbd_unused(struct enetc_bdr *bdr)
|
|
|
|
|
{
|
|
|
|
|
if (bdr->next_to_clean > bdr->next_to_alloc)
|
|
|
|
|
return bdr->next_to_clean - bdr->next_to_alloc - 1;
|
|
|
|
|
|
|
|
|
|
return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1;
|
|
|
|
|
}
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
/* Control BD ring */
|
|
|
|
|
#define ENETC_CBDR_DEFAULT_SIZE 64
|
|
|
|
|
struct enetc_cbdr {
|
|
|
|
|
void *bd_base; /* points to Rx or Tx BD ring */
|
|
|
|
|
void __iomem *pir;
|
|
|
|
|
void __iomem *cir;
|
2021-03-10 14:03:43 +02:00
|
|
|
|
void __iomem *mr; /* mode register */
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
|
|
|
|
int bd_count; /* # of BDs */
|
|
|
|
|
int next_to_use;
|
|
|
|
|
int next_to_clean;
|
|
|
|
|
|
|
|
|
|
dma_addr_t bd_dma_base;
|
2021-03-10 14:03:41 +02:00
|
|
|
|
struct device *dma_dev;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
|
2020-03-10 14:51:23 +02:00
|
|
|
|
|
|
|
|
|
static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
|
|
|
|
|
{
|
2020-03-10 14:51:24 +02:00
|
|
|
|
int hw_idx = i;
|
|
|
|
|
|
2024-09-12 18:37:40 +01:00
|
|
|
|
if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
|
2020-03-10 14:51:24 +02:00
|
|
|
|
hw_idx = 2 * i;
|
2024-09-12 18:37:40 +01:00
|
|
|
|
|
2020-03-10 14:51:24 +02:00
|
|
|
|
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
|
2020-03-10 14:51:23 +02:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-10 14:03:47 +02:00
|
|
|
|
static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
|
|
|
|
|
union enetc_rx_bd **old_rxbd, int *old_index)
|
2020-03-10 14:51:23 +02:00
|
|
|
|
{
|
2021-03-10 14:03:47 +02:00
|
|
|
|
union enetc_rx_bd *new_rxbd = *old_rxbd;
|
|
|
|
|
int new_index = *old_index;
|
|
|
|
|
|
|
|
|
|
new_rxbd++;
|
|
|
|
|
|
2024-09-12 18:37:40 +01:00
|
|
|
|
if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
|
2021-03-10 14:03:47 +02:00
|
|
|
|
new_rxbd++;
|
2020-03-10 14:51:23 +02:00
|
|
|
|
|
2021-03-10 14:03:47 +02:00
|
|
|
|
if (unlikely(++new_index == rx_ring->bd_count)) {
|
|
|
|
|
new_rxbd = rx_ring->bd_base;
|
|
|
|
|
new_index = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*old_rxbd = new_rxbd;
|
|
|
|
|
*old_index = new_index;
|
2020-03-10 14:51:23 +02:00
|
|
|
|
}
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
2020-03-10 14:51:24 +02:00
|
|
|
|
static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
|
|
|
|
|
{
|
|
|
|
|
return ++rxbd;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-22 15:29:56 +02:00
|
|
|
|
struct enetc_msg_swbd {
|
|
|
|
|
void *vaddr;
|
|
|
|
|
dma_addr_t dma;
|
|
|
|
|
int size;
|
|
|
|
|
};
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
#define ENETC_REV1 0x1
|
|
|
|
|
enum enetc_errata {
|
2020-11-03 16:02:13 +02:00
|
|
|
|
ENETC_ERR_VLAN_ISOL = BIT(0),
|
|
|
|
|
ENETC_ERR_UCMCSWP = BIT(1),
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
};
|
|
|
|
|
|
2023-01-19 18:04:27 +02:00
|
|
|
|
#define ENETC_SI_F_PSFP BIT(0)
|
|
|
|
|
#define ENETC_SI_F_QBV BIT(1)
|
|
|
|
|
#define ENETC_SI_F_QBU BIT(2)
|
net: enetc: add LSO support for i.MX95 ENETC PF
ENETC rev 4.1 supports large send offload (LSO), segmenting large TCP
and UDP transmit units into multiple Ethernet frames. To support LSO,
software needs to fill some auxiliary information in Tx BD, such as LSO
header length, frame length, LSO maximum segment size, etc.
At 1Gbps link rate, TCP segmentation was tested using iperf3, and the
CPU performance before and after applying the patch was compared through
the top command. It can be seen that LSO saves a significant amount of
CPU cycles compared to software TSO.
Before applying the patch:
%Cpu(s): 0.1 us, 4.1 sy, 0.0 ni, 85.7 id, 0.0 wa, 0.5 hi, 9.7 si
After applying the patch:
%Cpu(s): 0.1 us, 2.3 sy, 0.0 ni, 94.5 id, 0.0 wa, 0.4 hi, 2.6 si
Signed-off-by: Wei Fang <wei.fang@nxp.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Reviewed-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Link: https://patch.msgid.link/20241219054755.1615626-4-wei.fang@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-12-19 13:47:54 +08:00
|
|
|
|
#define ENETC_SI_F_LSO BIT(3)
|
2019-11-15 03:33:41 +00:00
|
|
|
|
|
2024-10-30 17:39:22 +08:00
|
|
|
|
struct enetc_drvdata {
|
|
|
|
|
u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
|
2024-12-19 13:47:52 +08:00
|
|
|
|
u8 tx_csum:1;
|
2024-12-19 13:47:53 +08:00
|
|
|
|
u8 max_frags;
|
2024-10-30 17:39:22 +08:00
|
|
|
|
u64 sysclk_freq;
|
|
|
|
|
const struct ethtool_ops *eth_ops;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct enetc_platform_info {
|
|
|
|
|
u16 revision;
|
|
|
|
|
u16 dev_id;
|
|
|
|
|
const struct enetc_drvdata *data;
|
|
|
|
|
};
|
|
|
|
|
|
2025-05-06 16:07:27 +08:00
|
|
|
|
struct enetc_si;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This structure defines the some common hooks for ENETC PSI and VSI.
|
|
|
|
|
* In addition, since VSI only uses the struct enetc_si as its private
|
|
|
|
|
* driver data, so this structure also define some hooks specifically
|
|
|
|
|
* for VSI. For VSI-specific hooks, the format is ‘vf_*()’.
|
|
|
|
|
*/
|
|
|
|
|
struct enetc_si_ops {
|
|
|
|
|
int (*get_rss_table)(struct enetc_si *si, u32 *table, int count);
|
|
|
|
|
int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count);
|
|
|
|
|
};
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
/* PCI IEP device data */
|
|
|
|
|
struct enetc_si {
|
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
|
struct enetc_hw hw;
|
|
|
|
|
enum enetc_errata errata;
|
|
|
|
|
|
|
|
|
|
struct net_device *ndev; /* back ref. */
|
|
|
|
|
|
2025-05-06 16:07:23 +08:00
|
|
|
|
union {
|
|
|
|
|
struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */
|
|
|
|
|
struct ntmp_user ntmp_user; /* ENETC 4.1 and later */
|
|
|
|
|
};
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
|
|
|
|
int num_rx_rings; /* how many rings are available in the SI */
|
|
|
|
|
int num_tx_rings;
|
2019-01-22 15:29:57 +02:00
|
|
|
|
int num_fs_entries;
|
|
|
|
|
int num_rss; /* number of RSS buckets */
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
unsigned short pad;
|
2024-10-30 17:39:22 +08:00
|
|
|
|
u16 revision;
|
2019-11-15 03:33:41 +00:00
|
|
|
|
int hw_features;
|
2024-10-30 17:39:22 +08:00
|
|
|
|
const struct enetc_drvdata *drvdata;
|
2025-05-06 16:07:23 +08:00
|
|
|
|
const struct enetc_si_ops *ops;
|
2025-05-06 16:07:25 +08:00
|
|
|
|
|
|
|
|
|
struct workqueue_struct *workqueue;
|
|
|
|
|
struct work_struct rx_mode_task;
|
2025-05-06 16:07:26 +08:00
|
|
|
|
struct dentry *debugfs_root;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define ENETC_SI_ALIGN 32
|
|
|
|
|
|
2024-10-30 17:39:22 +08:00
|
|
|
|
static inline bool is_enetc_rev1(struct enetc_si *si)
|
|
|
|
|
{
|
|
|
|
|
return si->pdev->revision == ENETC_REV1;
|
|
|
|
|
}
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
static inline void *enetc_si_priv(const struct enetc_si *si)
|
|
|
|
|
{
|
|
|
|
|
return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline bool enetc_si_is_pf(struct enetc_si *si)
|
|
|
|
|
{
|
|
|
|
|
return !!(si->hw.port);
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-17 02:42:21 +03:00
|
|
|
|
static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
|
|
|
|
|
{
|
|
|
|
|
switch (pf_pdev->devfn) {
|
|
|
|
|
case 0:
|
|
|
|
|
return 0;
|
|
|
|
|
case 1:
|
|
|
|
|
return 1;
|
|
|
|
|
case 2:
|
|
|
|
|
return 2;
|
|
|
|
|
case 6:
|
|
|
|
|
return 3;
|
|
|
|
|
default:
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
#define ENETC_MAX_NUM_TXQS 8
|
|
|
|
|
#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
|
|
|
|
|
|
|
|
|
|
struct enetc_int_vector {
|
|
|
|
|
void __iomem *rbier;
|
|
|
|
|
void __iomem *tbier_base;
|
2020-07-21 10:55:21 +03:00
|
|
|
|
void __iomem *ricr1;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
unsigned long tx_rings_map;
|
|
|
|
|
int count_tx_rings;
|
2020-07-21 10:55:21 +03:00
|
|
|
|
u32 rx_ictt;
|
2020-07-21 10:55:22 +03:00
|
|
|
|
u16 comp_cnt;
|
|
|
|
|
bool rx_dim_en, rx_napi_work;
|
|
|
|
|
struct napi_struct napi ____cacheline_aligned_in_smp;
|
|
|
|
|
struct dim rx_dim ____cacheline_aligned_in_smp;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
char name[ENETC_INT_NAME_MAX];
|
|
|
|
|
|
2020-07-21 10:55:20 +03:00
|
|
|
|
struct enetc_bdr rx_ring;
|
2023-09-22 10:28:47 -07:00
|
|
|
|
struct enetc_bdr tx_ring[] __counted_by(count_tx_rings);
|
2020-07-21 10:55:22 +03:00
|
|
|
|
} ____cacheline_aligned_in_smp;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
2019-01-22 15:29:57 +02:00
|
|
|
|
struct enetc_cls_rule {
|
|
|
|
|
struct ethtool_rx_flow_spec fs;
|
|
|
|
|
int used;
|
|
|
|
|
};
|
|
|
|
|
|
2024-10-30 17:39:22 +08:00
|
|
|
|
#define ENETC_MAX_BDR_INT 6 /* fixed to max # of available cpus */
|
2020-05-01 08:53:17 +08:00
|
|
|
|
struct psfp_cap {
|
|
|
|
|
u32 max_streamid;
|
|
|
|
|
u32 max_psfp_filter;
|
|
|
|
|
u32 max_psfp_gate;
|
|
|
|
|
u32 max_psfp_gatelist;
|
|
|
|
|
u32 max_psfp_meter;
|
|
|
|
|
};
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
2021-04-12 17:03:26 +08:00
|
|
|
|
#define ENETC_F_TX_TSTAMP_MASK 0xff
|
2019-05-23 02:33:29 +00:00
|
|
|
|
enum enetc_active_offloads {
|
2021-04-12 17:03:26 +08:00
|
|
|
|
/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
|
2021-04-12 17:03:27 +08:00
|
|
|
|
ENETC_F_TX_TSTAMP = BIT(0),
|
|
|
|
|
ENETC_F_TX_ONESTEP_SYNC_TSTAMP = BIT(1),
|
2021-04-12 17:03:26 +08:00
|
|
|
|
|
2021-04-12 17:03:27 +08:00
|
|
|
|
ENETC_F_RX_TSTAMP = BIT(8),
|
|
|
|
|
ENETC_F_QBV = BIT(9),
|
|
|
|
|
ENETC_F_QCI = BIT(10),
|
2023-02-06 11:45:30 +02:00
|
|
|
|
ENETC_F_QBU = BIT(11),
|
2024-12-19 13:47:52 +08:00
|
|
|
|
ENETC_F_TXCSUM = BIT(12),
|
net: enetc: add LSO support for i.MX95 ENETC PF
ENETC rev 4.1 supports large send offload (LSO), segmenting large TCP
and UDP transmit units into multiple Ethernet frames. To support LSO,
software needs to fill some auxiliary information in Tx BD, such as LSO
header length, frame length, LSO maximum segment size, etc.
At 1Gbps link rate, TCP segmentation was tested using iperf3, and the
CPU performance before and after applying the patch was compared through
the top command. It can be seen that LSO saves a significant amount of
CPU cycles compared to software TSO.
Before applying the patch:
%Cpu(s): 0.1 us, 4.1 sy, 0.0 ni, 85.7 id, 0.0 wa, 0.5 hi, 9.7 si
After applying the patch:
%Cpu(s): 0.1 us, 2.3 sy, 0.0 ni, 94.5 id, 0.0 wa, 0.4 hi, 2.6 si
Signed-off-by: Wei Fang <wei.fang@nxp.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Reviewed-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Link: https://patch.msgid.link/20241219054755.1615626-4-wei.fang@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-12-19 13:47:54 +08:00
|
|
|
|
ENETC_F_LSO = BIT(13),
|
2021-04-12 17:03:27 +08:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
enum enetc_flags_bit {
|
|
|
|
|
ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
|
net: enetc: block concurrent XDP transmissions during ring reconfiguration
When testing the XDP_REDIRECT function on the LS1028A platform, we
found a very reproducible issue that the Tx frames can no longer be
sent out even if XDP_REDIRECT is turned off. Specifically, if there
is a lot of traffic on Rx direction, when XDP_REDIRECT is turned on,
the console may display some warnings like "timeout for tx ring #6
clear", and all redirected frames will be dropped, the detailed log
is as follows.
root@ls1028ardb:~# ./xdp-bench redirect eno0 eno2
Redirecting from eno0 (ifindex 3; driver fsl_enetc) to eno2 (ifindex 4; driver fsl_enetc)
[203.849809] fsl_enetc 0000:00:00.2 eno2: timeout for tx ring #5 clear
[204.006051] fsl_enetc 0000:00:00.2 eno2: timeout for tx ring #6 clear
[204.161944] fsl_enetc 0000:00:00.2 eno2: timeout for tx ring #7 clear
eno0->eno2 1420505 rx/s 1420590 err,drop/s 0 xmit/s
xmit eno0->eno2 0 xmit/s 1420590 drop/s 0 drv_err/s 15.71 bulk-avg
eno0->eno2 1420484 rx/s 1420485 err,drop/s 0 xmit/s
xmit eno0->eno2 0 xmit/s 1420485 drop/s 0 drv_err/s 15.71 bulk-avg
By analyzing the XDP_REDIRECT implementation of enetc driver, the
driver will reconfigure Tx and Rx BD rings when a bpf program is
installed or uninstalled, but there is no mechanisms to block the
redirected frames when enetc driver reconfigures rings. Similarly,
XDP_TX verdicts on received frames can also lead to frames being
enqueued in the Tx rings. Because XDP ignores the state set by the
netif_tx_wake_queue() API, so introduce the ENETC_TX_DOWN flag to
suppress transmission of XDP frames.
Fixes: c33bfaf91c4c ("net: enetc: set up XDP program under enetc_reconfigure()")
Cc: stable@vger.kernel.org
Signed-off-by: Wei Fang <wei.fang@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Link: https://patch.msgid.link/20241010092056.298128-3-wei.fang@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-10-10 17:20:54 +08:00
|
|
|
|
ENETC_TX_DOWN,
|
2019-05-23 02:33:29 +00:00
|
|
|
|
};
|
|
|
|
|
|
2020-07-21 10:55:21 +03:00
|
|
|
|
/* interrupt coalescing modes */
|
|
|
|
|
enum enetc_ic_mode {
|
|
|
|
|
/* one interrupt per frame */
|
|
|
|
|
ENETC_IC_NONE = 0,
|
|
|
|
|
/* activated when int coalescing time is set to a non-0 value */
|
|
|
|
|
ENETC_IC_RX_MANUAL = BIT(0),
|
|
|
|
|
ENETC_IC_TX_MANUAL = BIT(1),
|
2020-07-21 10:55:22 +03:00
|
|
|
|
/* use dynamic interrupt moderation */
|
|
|
|
|
ENETC_IC_RX_ADAPTIVE = BIT(2),
|
2020-07-21 10:55:21 +03:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
|
|
|
|
|
#define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
struct enetc_ndev_priv {
|
|
|
|
|
struct net_device *ndev;
|
|
|
|
|
struct device *dev; /* dma-mapping device */
|
|
|
|
|
struct enetc_si *si;
|
|
|
|
|
|
|
|
|
|
int bdr_int_num; /* number of Rx/Tx ring interrupts */
|
|
|
|
|
struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
|
|
|
|
|
u16 num_rx_rings, num_tx_rings;
|
|
|
|
|
u16 rx_bd_count, tx_bd_count;
|
|
|
|
|
|
|
|
|
|
u16 msg_enable;
|
2023-04-18 14:14:53 +03:00
|
|
|
|
|
|
|
|
|
u8 preemptible_tcs;
|
2024-12-19 13:47:53 +08:00
|
|
|
|
u8 max_frags; /* The maximum number of BDs for fragments */
|
2023-04-18 14:14:53 +03:00
|
|
|
|
|
2021-03-10 14:03:48 +02:00
|
|
|
|
enum enetc_active_offloads active_offloads;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
2019-11-15 03:33:41 +00:00
|
|
|
|
u32 speed; /* store speed for compare update pspeed */
|
|
|
|
|
|
net: enetc: use dedicated TX rings for XDP
It is possible for one CPU to perform TX hashing (see netdev_pick_tx)
between the 8 ENETC TX rings, and the TX hashing to select TX queue 1.
At the same time, it is possible for the other CPU to already use TX
ring 1 for XDP (either XDP_TX or XDP_REDIRECT). Since there is no mutual
exclusion between XDP and the network stack, we run into an issue
because the ENETC TX procedure is not reentrant.
The obvious approach would be to just make XDP take the lock of the
network stack's TX queue corresponding to the ring it's about to enqueue
in.
For XDP_REDIRECT, this is quite straightforward, a lock at the beginning
and end of enetc_xdp_xmit() should do the trick.
But for XDP_TX, it's a bit more complicated. For one, we do TX batching
all by ourselves for frames with the XDP_TX verdict. This is something
we would like to keep the way it is, for performance reasons. But
batching means that the network stack's lock should be kept from the
first enqueued XDP_TX frame and until we ring the doorbell. That is
mostly fine, except for cases when in the same NAPI loop we have mixed
XDP_TX and XDP_REDIRECT frames. So if enetc_xdp_xmit() gets called while
we are holding the lock from the RX NAPI, then bam, deadlock. The naive
answer could be 'just flush the XDP_TX frames first, then release the
network stack's TX queue lock, then call xdp_do_flush_map()'. But even
xdp_do_redirect() is capable of flushing the batched XDP_REDIRECT
frames, so unless we unlock/relock the TX queue around xdp_do_redirect(),
there simply isn't any clean way to protect XDP_TX from concurrent
network stack .ndo_start_xmit() on another CPU.
So we need to take a different approach, and that is to reserve two
rings for the sole use of XDP. We leave TX rings
0..ndev->real_num_tx_queues-1 to be handled by the network stack, and we
pick them from the end of the priv->tx_ring array.
We make an effort to keep the mapping done by enetc_alloc_msix() which
decides which CPU handles the TX completions of which TX ring in its
NAPI poll. So the XDP TX ring of CPU 0 is handled by TX ring 6, and the
XDP TX ring of CPU 1 is handled by TX ring 7.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-17 00:22:22 +03:00
|
|
|
|
struct enetc_bdr **xdp_tx_ring;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
struct enetc_bdr *tx_ring[16];
|
|
|
|
|
struct enetc_bdr *rx_ring[16];
|
net: enetc: split ring resource allocation from assignment
We have a few instances in the enetc driver where the ring resources
(BD ring iomem, software BD ring, software TSO headers, basically
everything except RX buffers) need to be reallocated. For example, when
RX timestamping is enabled, the RX BD format changes to an extended one
(twice as large).
Currently, this is done using a simplistic enetc_close() -> enetc_open()
procedure. But this is quite crude, since it also invokes phylink_stop()
-> phylink_start(), the link is lost, and a few seconds need to pass for
autoneg to complete again.
In fact it's bad also due to the improper (yolo) error checking. In case
we fail to allocate new resources, we've already freed the old ones, so
the interface is more or less stuck.
To avoid that, we need a system where reconfiguration is possible in a
way in which resources are allocated upfront. This means that there will
be a higher memory usage temporarily, but the assignment of resources to
rings can be done when both the old and new resources are still available.
Introduce a struct enetc_bdr_resource which holds the resources for a
ring, be it RX or TX. This structure duplicates a lot of fields from
struct enetc_bdr (and access to the same fields in the ring structure
was left duplicated, to not change cache characteristics in the fast
path).
When enetc_alloc_tx_resources() runs, it returns an array of resource
elements (one per TX ring), in addition to the existing priv->tx_res.
To populate priv->tx_res with that array, one must call
enetc_assign_tx_resources(), and this also frees the old resources.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-01-18 01:02:29 +02:00
|
|
|
|
const struct enetc_bdr_resource *tx_res;
|
|
|
|
|
const struct enetc_bdr_resource *rx_res;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
2019-01-22 15:29:57 +02:00
|
|
|
|
struct enetc_cls_rule *cls_rules;
|
|
|
|
|
|
2020-05-01 08:53:17 +08:00
|
|
|
|
struct psfp_cap psfp_cap;
|
|
|
|
|
|
net: enetc: ensure we always have a minimum number of TXQs for stack
Currently it can happen that an mqprio qdisc is installed with num_tc 8,
and this will reserve 8 (out of 8) TXQs for the network stack. Then we
can attach an XDP program, and this will crop 2 TXQs, leaving just 6 for
mqprio. That's not what the user requested, and we should fail it.
On the other hand, if mqprio isn't requested, we still give the 8 TXQs
to the network stack (with hashing among a single traffic class), but
then, cropping 2 TXQs for XDP is fine, because the user didn't
explicitly ask for any number of TXQs, so no expectations are violated.
Simply put, the logic that mqprio should impose a minimum number of TXQs
for the network never existed. Let's say (more or less arbitrarily) that
without mqprio, the driver expects a minimum number of TXQs equal to the
number of CPUs (on NXP LS1028A, that is either 1, or 2). And with mqprio,
mqprio gives the minimum required number of TXQs.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-02-03 02:11:16 +02:00
|
|
|
|
/* Minimum number of TX queues required by the network stack */
|
|
|
|
|
unsigned int min_num_stack_tx_queues;
|
|
|
|
|
|
2020-10-07 12:48:23 +03:00
|
|
|
|
struct phylink *phylink;
|
2020-07-21 10:55:21 +03:00
|
|
|
|
int ic_mode;
|
|
|
|
|
u32 tx_ictt;
|
net: enetc: add support for XDP_DROP and XDP_PASS
For the RX ring, enetc uses an allocation scheme based on pages split
into two buffers, which is already very efficient in terms of preventing
reallocations / maximizing reuse, so I see no reason why I would change
that.
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half B | half B | half B | half B | half B | half B | half B |
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half A | half A | half A | half A | half A | half A | half A | RX ring
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
^ ^
| |
next_to_clean next_to_alloc
next_to_use
+--------+--------+--------+--------+--------+
| | | | | |
| half B | half B | half B | half B | half B |
| | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | | | | | |
| half B | half B | half A | half A | half A | half A | half A | RX ring
| | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+
| | | ^ ^
| half A | half A | | |
| | | next_to_clean next_to_use
+--------+--------+
^
|
next_to_alloc
then when enetc_refill_rx_ring is called, whose purpose is to advance
next_to_use, it sees that it can take buffers up to next_to_alloc, and
it says "oh, hey, rx_swbd->page isn't NULL, I don't need to allocate
one!".
The only problem is that for default PAGE_SIZE values of 4096, buffer
sizes are 2048 bytes. While this is enough for normal skb allocations at
an MTU of 1500 bytes, for XDP it isn't, because the XDP headroom is 256
bytes, and including skb_shared_info and alignment, we end up being able
to make use of only 1472 bytes, which is insufficient for the default
MTU.
To solve that problem, we implement scatter/gather processing in the
driver, because we would really like to keep the existing allocation
scheme. A packet of 1500 bytes is received in a buffer of 1472 bytes and
another one of 28 bytes.
Because the headroom required by XDP is different (and much larger) than
the one required by the network stack, whenever a BPF program is added
or deleted on the port, we drain the existing RX buffers and seed new
ones with the required headroom. We also keep the required headroom in
rx_ring->buffer_offset.
The simplest way to implement XDP_PASS, where an skb must be created, is
to create an xdp_buff based on the next_to_clean RX BDs, but not clear
those BDs from the RX ring yet, just keep the original index at which
the BDs for this frame started. Then, if the verdict is XDP_PASS,
instead of converting the xdb_buff to an skb, we replay a call to
enetc_build_skb (just as in the normal enetc_clean_rx_ring case),
starting from the original BD index.
We would also like to be minimally invasive to the regular RX data path,
and not check whether there is a BPF program attached to the ring on
every packet. So we create a separate RX ring processing function for
XDP.
Because we only install/remove the BPF program while the interface is
down, we forgo the rcu_read_lock() in enetc_clean_rx_ring, since there
shouldn't be any circumstance in which we are processing packets and
there is a potentially freed BPF program attached to the RX ring.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-31 23:08:54 +03:00
|
|
|
|
|
|
|
|
|
struct bpf_prog *xdp_prog;
|
2021-04-12 17:03:27 +08:00
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
struct work_struct tx_onestep_tstamp;
|
|
|
|
|
struct sk_buff_head tx_skbs;
|
2023-02-06 11:45:30 +02:00
|
|
|
|
|
|
|
|
|
/* Serialize access to MAC Merge state between ethtool requests
|
|
|
|
|
* and link state updates
|
|
|
|
|
*/
|
|
|
|
|
struct mutex mm_lock;
|
2024-10-30 17:39:22 +08:00
|
|
|
|
|
|
|
|
|
struct clk *ref_clk; /* RGMII/RMII reference clock */
|
|
|
|
|
u64 sysclk_freq; /* NETC system clock frequency */
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
};
|
|
|
|
|
|
2019-01-22 15:29:56 +02:00
|
|
|
|
/* Messaging */
|
|
|
|
|
|
|
|
|
|
/* VF-PF set primary MAC address message format */
|
|
|
|
|
struct enetc_msg_cmd_set_primary_mac {
|
|
|
|
|
struct enetc_msg_cmd_header header;
|
|
|
|
|
struct sockaddr mac;
|
|
|
|
|
};
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
#define ENETC_CBD(R, i) (&(((struct enetc_cbd *)((R).bd_base))[i]))
|
|
|
|
|
|
|
|
|
|
#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
|
|
|
|
|
|
2019-05-23 02:33:33 +00:00
|
|
|
|
/* PTP driver exports */
|
|
|
|
|
extern int enetc_phc_index;
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
/* SI common */
|
2023-01-19 18:04:30 +02:00
|
|
|
|
u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
|
|
|
|
|
void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
|
|
|
|
|
void enetc_pci_remove(struct pci_dev *pdev);
|
|
|
|
|
int enetc_alloc_msix(struct enetc_ndev_priv *priv);
|
|
|
|
|
void enetc_free_msix(struct enetc_ndev_priv *priv);
|
|
|
|
|
void enetc_get_si_caps(struct enetc_si *si);
|
|
|
|
|
void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
|
|
|
|
|
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
|
|
|
|
|
void enetc_free_si_resources(struct enetc_ndev_priv *priv);
|
2021-03-01 13:18:11 +02:00
|
|
|
|
int enetc_configure_si(struct enetc_ndev_priv *priv);
|
2024-10-30 17:39:22 +08:00
|
|
|
|
int enetc_get_driver_data(struct enetc_si *si);
|
2025-05-06 16:07:24 +08:00
|
|
|
|
void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
|
|
|
|
|
const unsigned char *addr);
|
|
|
|
|
void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter);
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
|
|
|
|
int enetc_open(struct net_device *ndev);
|
|
|
|
|
int enetc_close(struct net_device *ndev);
|
2020-07-21 10:55:21 +03:00
|
|
|
|
void enetc_start(struct net_device *ndev);
|
|
|
|
|
void enetc_stop(struct net_device *ndev);
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
|
|
|
|
|
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
|
2022-09-16 16:32:08 +03:00
|
|
|
|
void enetc_set_features(struct net_device *ndev, netdev_features_t features);
|
2019-05-23 02:33:29 +00:00
|
|
|
|
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
|
2022-09-16 16:32:09 +03:00
|
|
|
|
int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
|
2023-05-30 12:19:47 +03:00
|
|
|
|
void enetc_reset_tc_mqprio(struct net_device *ndev);
|
2023-01-18 01:02:32 +02:00
|
|
|
|
int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
|
2021-03-31 23:08:57 +03:00
|
|
|
|
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
|
|
|
|
|
struct xdp_frame **frames, u32 flags);
|
2019-05-27 18:21:31 +03:00
|
|
|
|
|
2025-05-12 14:24:02 +03:00
|
|
|
|
int enetc_hwtstamp_get(struct net_device *ndev,
|
|
|
|
|
struct kernel_hwtstamp_config *config);
|
|
|
|
|
int enetc_hwtstamp_set(struct net_device *ndev,
|
|
|
|
|
struct kernel_hwtstamp_config *config,
|
|
|
|
|
struct netlink_ext_ack *extack);
|
|
|
|
|
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
/* ethtool */
|
2024-10-30 17:39:22 +08:00
|
|
|
|
extern const struct ethtool_ops enetc_pf_ethtool_ops;
|
|
|
|
|
extern const struct ethtool_ops enetc4_pf_ethtool_ops;
|
|
|
|
|
extern const struct ethtool_ops enetc_vf_ethtool_ops;
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
void enetc_set_ethtool_ops(struct net_device *ndev);
|
2023-02-06 11:45:30 +02:00
|
|
|
|
void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
|
2023-04-18 14:14:53 +03:00
|
|
|
|
void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
|
|
|
|
|
/* control buffer descriptor ring (CBDR) */
|
2021-03-10 14:03:45 +02:00
|
|
|
|
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
|
2021-03-10 14:03:42 +02:00
|
|
|
|
struct enetc_cbdr *cbdr);
|
2021-03-10 14:03:44 +02:00
|
|
|
|
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
|
2025-05-06 16:07:23 +08:00
|
|
|
|
int enetc4_setup_cbdr(struct enetc_si *si);
|
|
|
|
|
void enetc4_teardown_cbdr(struct enetc_si *si);
|
enetc: Introduce basic PF and VF ENETC ethernet drivers
ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE). As such, it contains multiple physical (PF) and
virtual (VF) PCIe functions, discoverable by standard PCI Express.
Introduce basic PF and VF ENETC ethernet drivers. The PF has access to
the ENETC Port registers and resources and makes the required privileged
configurations for the underlying VF devices. Common functionality is
controlled through so called System Interface (SI) register blocks, PFs
and VFs own a SI each. Though SI register blocks are almost identical,
there are a few privileged SI level controls that are accessible only to
PFs, and so the distinction is made between PF SIs (PSI) and VF SIs (VSI).
As such, the bulk of the code, including datapath processing, basic h/w
offload support and generic pci related configuration, is shared between
the 2 drivers and is factored out in common source files (i.e. enetc.c).
Major functionalities included (for both drivers):
MSI-X support for Rx and Tx processing, assignment of Rx/Tx BD ring pairs
to MSI-X entries, multi-queue support, Rx S/G (Rx frame fragmentation) and
jumbo frame (up to 9600B) support, Rx paged allocation and reuse, Tx S/G
support (NETIF_F_SG), Rx and Tx checksum offload, PF MAC filtering and
initial control ring support, VLAN extraction/ insertion, PF Rx VLAN
CTAG filtering, VF mac address config support, VF VLAN isolation support,
etc.
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-22 15:29:54 +02:00
|
|
|
|
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
|
|
|
|
|
char *mac_addr, int si_map);
|
|
|
|
|
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
|
2019-01-22 15:29:57 +02:00
|
|
|
|
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
|
|
|
|
|
int index);
|
2025-05-06 16:07:28 +08:00
|
|
|
|
void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes);
|
2019-01-22 15:29:57 +02:00
|
|
|
|
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
|
|
|
|
|
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
|
2019-11-15 03:33:33 +00:00
|
|
|
|
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
|
2025-05-06 16:07:29 +08:00
|
|
|
|
int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count);
|
|
|
|
|
int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count);
|
2019-11-15 03:33:33 +00:00
|
|
|
|
|
2022-02-09 20:33:02 +08:00
|
|
|
|
static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
|
|
|
|
|
struct enetc_cbd *cbd,
|
|
|
|
|
int size, dma_addr_t *dma,
|
|
|
|
|
void **data_align)
|
|
|
|
|
{
|
|
|
|
|
struct enetc_cbdr *ring = &si->cbd_ring;
|
|
|
|
|
dma_addr_t dma_align;
|
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
|
|
data = dma_alloc_coherent(ring->dma_dev,
|
|
|
|
|
size + ENETC_CBD_DATA_MEM_ALIGN,
|
|
|
|
|
dma, GFP_KERNEL);
|
|
|
|
|
if (!data) {
|
|
|
|
|
dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
|
|
|
|
|
*data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
|
|
|
|
|
|
|
|
|
|
cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
|
|
|
|
|
cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
|
|
|
|
|
cbd->length = cpu_to_le16(size);
|
|
|
|
|
|
|
|
|
|
return data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
|
|
|
|
|
void *data, dma_addr_t *dma)
|
|
|
|
|
{
|
|
|
|
|
struct enetc_cbdr *ring = &si->cbd_ring;
|
|
|
|
|
|
|
|
|
|
dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
|
|
|
|
|
data, *dma);
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-28 12:52:04 +03:00
|
|
|
|
void enetc_reset_ptcmsdur(struct enetc_hw *hw);
|
|
|
|
|
void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
|
|
|
|
|
|
2019-11-15 03:33:33 +00:00
|
|
|
|
#ifdef CONFIG_FSL_ENETC_QOS
|
2022-09-28 12:52:04 +03:00
|
|
|
|
int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
|
2019-11-15 03:33:33 +00:00
|
|
|
|
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
|
2020-10-07 12:48:23 +03:00
|
|
|
|
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
|
2019-11-25 05:56:56 +00:00
|
|
|
|
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
|
2020-01-02 04:59:24 +00:00
|
|
|
|
int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
|
2020-05-01 08:53:18 +08:00
|
|
|
|
int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
|
|
|
|
void *cb_priv);
|
|
|
|
|
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
|
|
|
|
|
int enetc_psfp_init(struct enetc_ndev_priv *priv);
|
|
|
|
|
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
|
2022-09-16 16:32:08 +03:00
|
|
|
|
int enetc_set_psfp(struct net_device *ndev, bool en);
|
2020-05-01 08:53:17 +08:00
|
|
|
|
|
|
|
|
|
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
|
|
|
|
|
{
|
2022-09-28 12:52:02 +03:00
|
|
|
|
struct enetc_hw *hw = &priv->si->hw;
|
2020-05-01 08:53:17 +08:00
|
|
|
|
u32 reg;
|
|
|
|
|
|
2022-09-28 12:52:02 +03:00
|
|
|
|
reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
|
2020-05-01 08:53:17 +08:00
|
|
|
|
priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
|
|
|
|
|
/* Port stream filter capability */
|
2022-09-28 12:52:02 +03:00
|
|
|
|
reg = enetc_port_rd(hw, ENETC_PSFCAPR);
|
2020-05-01 08:53:17 +08:00
|
|
|
|
priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
|
|
|
|
|
/* Port stream gate capability */
|
2022-09-28 12:52:02 +03:00
|
|
|
|
reg = enetc_port_rd(hw, ENETC_PSGCAPR);
|
2020-05-01 08:53:17 +08:00
|
|
|
|
priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
|
|
|
|
|
priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
|
|
|
|
|
/* Port flow meter capability */
|
2022-09-28 12:52:02 +03:00
|
|
|
|
reg = enetc_port_rd(hw, ENETC_PFMCAPR);
|
2020-05-01 08:53:17 +08:00
|
|
|
|
priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-01 08:53:18 +08:00
|
|
|
|
static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
|
2020-05-01 08:53:17 +08:00
|
|
|
|
{
|
2020-05-01 08:53:18 +08:00
|
|
|
|
struct enetc_hw *hw = &priv->si->hw;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
enetc_get_max_cap(priv);
|
|
|
|
|
|
|
|
|
|
err = enetc_psfp_init(priv);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
2020-05-01 08:53:17 +08:00
|
|
|
|
enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
|
|
|
|
|
ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
|
|
|
|
|
ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
|
2020-05-01 08:53:18 +08:00
|
|
|
|
|
|
|
|
|
return 0;
|
2020-05-01 08:53:17 +08:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-01 08:53:18 +08:00
|
|
|
|
static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
|
2020-05-01 08:53:17 +08:00
|
|
|
|
{
|
2020-05-01 08:53:18 +08:00
|
|
|
|
struct enetc_hw *hw = &priv->si->hw;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = enetc_psfp_clean(priv);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
2020-05-01 08:53:17 +08:00
|
|
|
|
enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
|
|
|
|
|
~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
|
|
|
|
|
~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
|
2020-05-01 08:53:18 +08:00
|
|
|
|
|
|
|
|
|
memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
|
|
|
|
|
|
|
|
|
|
return 0;
|
2020-05-01 08:53:17 +08:00
|
|
|
|
}
|
2020-05-01 08:53:18 +08:00
|
|
|
|
|
2019-11-15 03:33:33 +00:00
|
|
|
|
#else
|
2022-09-28 12:52:04 +03:00
|
|
|
|
#define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
|
2019-11-15 03:33:33 +00:00
|
|
|
|
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
|
2020-10-07 12:48:23 +03:00
|
|
|
|
#define enetc_sched_speed_set(priv, speed) (void)0
|
2019-11-25 05:56:56 +00:00
|
|
|
|
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
|
2020-01-02 04:59:24 +00:00
|
|
|
|
#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
|
2020-05-01 08:53:18 +08:00
|
|
|
|
#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
|
|
|
|
|
#define enetc_setup_tc_block_cb NULL
|
|
|
|
|
|
2020-05-01 08:53:17 +08:00
|
|
|
|
#define enetc_get_max_cap(p) \
|
|
|
|
|
memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
|
|
|
|
|
|
2020-05-01 08:53:18 +08:00
|
|
|
|
static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
|
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
|
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2022-09-16 16:32:08 +03:00
|
|
|
|
|
|
|
|
|
static inline int enetc_set_psfp(struct net_device *ndev, bool en)
|
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2019-11-15 03:33:33 +00:00
|
|
|
|
#endif
|