2018-08-08 09:04:32 +01:00
|
|
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
|
|
|
|
* stmmac XGMAC support.
|
|
|
|
*/
|
|
|
|
|
2025-05-07 14:38:12 +08:00
|
|
|
#include <linux/bitfield.h>
|
2018-08-08 09:04:32 +01:00
|
|
|
#include <linux/stmmac.h>
|
|
|
|
#include "common.h"
|
|
|
|
#include "dwxgmac2.h"
|
|
|
|
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
static int dwxgmac2_get_tx_status(struct stmmac_extra_stats *x,
|
2018-08-08 09:04:32 +01:00
|
|
|
struct dma_desc *p, void __iomem *ioaddr)
|
|
|
|
{
|
|
|
|
unsigned int tdes3 = le32_to_cpu(p->des3);
|
|
|
|
int ret = tx_done;
|
|
|
|
|
|
|
|
if (unlikely(tdes3 & XGMAC_TDES3_OWN))
|
|
|
|
return tx_dma_own;
|
|
|
|
if (likely(!(tdes3 & XGMAC_TDES3_LD)))
|
|
|
|
return tx_not_ls;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
static int dwxgmac2_get_rx_status(struct stmmac_extra_stats *x,
|
2018-08-08 09:04:32 +01:00
|
|
|
struct dma_desc *p)
|
|
|
|
{
|
|
|
|
unsigned int rdes3 = le32_to_cpu(p->des3);
|
|
|
|
|
|
|
|
if (unlikely(rdes3 & XGMAC_RDES3_OWN))
|
|
|
|
return dma_own;
|
2019-08-17 20:54:43 +02:00
|
|
|
if (unlikely(rdes3 & XGMAC_RDES3_CTXT))
|
|
|
|
return discard_frame;
|
2018-08-08 09:04:32 +01:00
|
|
|
if (likely(!(rdes3 & XGMAC_RDES3_LD)))
|
2019-08-17 20:54:42 +02:00
|
|
|
return rx_not_ls;
|
|
|
|
if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD)))
|
2018-08-08 09:04:32 +01:00
|
|
|
return discard_frame;
|
|
|
|
|
2019-08-17 20:54:42 +02:00
|
|
|
return good_frame;
|
2018-08-08 09:04:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dwxgmac2_get_tx_len(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
return (le32_to_cpu(p->des2) & XGMAC_TDES2_B1L);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dwxgmac2_get_tx_owner(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
return (le32_to_cpu(p->des3) & XGMAC_TDES3_OWN) > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_set_tx_owner(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
p->des3 |= cpu_to_le32(XGMAC_TDES3_OWN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
|
|
|
|
{
|
2024-08-31 09:11:14 +08:00
|
|
|
u32 flags = XGMAC_RDES3_OWN;
|
2018-08-08 09:04:32 +01:00
|
|
|
|
|
|
|
if (!disable_rx_ic)
|
2024-08-31 09:11:14 +08:00
|
|
|
flags |= XGMAC_RDES3_IOC;
|
|
|
|
|
|
|
|
p->des3 |= cpu_to_le32(flags);
|
2018-08-08 09:04:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dwxgmac2_get_tx_ls(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
|
|
|
|
}
|
|
|
|
|
2025-05-07 14:38:12 +08:00
|
|
|
static u16 dwxgmac2_wrback_get_rx_vlan_tci(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
return le32_to_cpu(p->des0) & XGMAC_RDES0_VLAN_TAG_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool dwxgmac2_wrback_get_rx_vlan_valid(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
u32 et_lt;
|
|
|
|
|
|
|
|
et_lt = FIELD_GET(XGMAC_RDES3_ET_LT, le32_to_cpu(p->des3));
|
|
|
|
|
|
|
|
return et_lt >= XGMAC_ET_LT_VLAN_STAG &&
|
|
|
|
et_lt <= XGMAC_ET_LT_DVLAN_STAG_CTAG;
|
|
|
|
}
|
|
|
|
|
2018-08-08 09:04:32 +01:00
|
|
|
static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
|
|
|
|
{
|
|
|
|
return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_enable_tx_timestamp(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
p->des2 |= cpu_to_le32(XGMAC_TDES2_TTSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dwxgmac2_get_tx_timestamp_status(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
return 0; /* Not supported */
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts)
|
|
|
|
{
|
|
|
|
struct dma_desc *p = (struct dma_desc *)desc;
|
|
|
|
u64 ns = 0;
|
|
|
|
|
|
|
|
ns += le32_to_cpu(p->des1) * 1000000000ULL;
|
|
|
|
ns += le32_to_cpu(p->des0);
|
|
|
|
|
|
|
|
*ts = ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dwxgmac2_rx_check_timestamp(void *desc)
|
|
|
|
{
|
|
|
|
struct dma_desc *p = (struct dma_desc *)desc;
|
|
|
|
unsigned int rdes3 = le32_to_cpu(p->des3);
|
|
|
|
bool desc_valid, ts_valid;
|
|
|
|
|
2019-08-17 20:54:40 +02:00
|
|
|
dma_rmb();
|
|
|
|
|
2018-08-08 09:04:32 +01:00
|
|
|
desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT);
|
|
|
|
ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA);
|
|
|
|
|
2019-08-17 20:54:40 +02:00
|
|
|
if (likely(desc_valid && ts_valid)) {
|
|
|
|
if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
|
|
|
|
return -EINVAL;
|
2018-08-08 09:04:32 +01:00
|
|
|
return 0;
|
2019-08-17 20:54:40 +02:00
|
|
|
}
|
|
|
|
|
2018-08-08 09:04:32 +01:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
|
|
|
|
u32 ats)
|
|
|
|
{
|
|
|
|
struct dma_desc *p = (struct dma_desc *)desc;
|
|
|
|
unsigned int rdes3 = le32_to_cpu(p->des3);
|
|
|
|
int ret = -EBUSY;
|
|
|
|
|
2019-08-17 20:54:40 +02:00
|
|
|
if (likely(rdes3 & XGMAC_RDES3_CDA))
|
2018-08-08 09:04:32 +01:00
|
|
|
ret = dwxgmac2_rx_check_timestamp(next_desc);
|
|
|
|
|
2019-08-17 20:54:40 +02:00
|
|
|
return !ret;
|
2018-08-08 09:04:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
2019-03-27 22:35:35 +02:00
|
|
|
int mode, int end, int bfsize)
|
2018-08-08 09:04:32 +01:00
|
|
|
{
|
|
|
|
dwxgmac2_set_rx_owner(p, disable_rx_ic);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end)
|
|
|
|
{
|
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
|
|
|
p->des2 = 0;
|
|
|
|
p->des3 = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
|
|
|
bool csum_flag, int mode, bool tx_own,
|
|
|
|
bool ls, unsigned int tot_pkt_len)
|
|
|
|
{
|
|
|
|
unsigned int tdes3 = le32_to_cpu(p->des3);
|
|
|
|
|
|
|
|
p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
|
|
|
|
|
2019-08-17 20:54:47 +02:00
|
|
|
tdes3 |= tot_pkt_len & XGMAC_TDES3_FL;
|
2018-08-08 09:04:32 +01:00
|
|
|
if (is_fs)
|
|
|
|
tdes3 |= XGMAC_TDES3_FD;
|
|
|
|
else
|
|
|
|
tdes3 &= ~XGMAC_TDES3_FD;
|
|
|
|
|
|
|
|
if (csum_flag)
|
|
|
|
tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT;
|
|
|
|
else
|
|
|
|
tdes3 &= ~XGMAC_TDES3_CIC;
|
|
|
|
|
|
|
|
if (ls)
|
|
|
|
tdes3 |= XGMAC_TDES3_LD;
|
|
|
|
else
|
|
|
|
tdes3 &= ~XGMAC_TDES3_LD;
|
|
|
|
|
|
|
|
/* Finally set the OWN bit. Later the DMA will start! */
|
|
|
|
if (tx_own)
|
|
|
|
tdes3 |= XGMAC_TDES3_OWN;
|
|
|
|
|
|
|
|
if (is_fs && tx_own)
|
|
|
|
/* When the own bit, for the first frame, has to be set, all
|
|
|
|
* descriptors for the same frame has to be set before, to
|
|
|
|
* avoid race condition.
|
|
|
|
*/
|
|
|
|
dma_wmb();
|
|
|
|
|
|
|
|
p->des3 = cpu_to_le32(tdes3);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
|
|
|
|
int len1, int len2, bool tx_own,
|
|
|
|
bool ls, unsigned int tcphdrlen,
|
|
|
|
unsigned int tcppayloadlen)
|
|
|
|
{
|
|
|
|
unsigned int tdes3 = le32_to_cpu(p->des3);
|
|
|
|
|
|
|
|
if (len1)
|
|
|
|
p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L);
|
|
|
|
if (len2)
|
|
|
|
p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) &
|
|
|
|
XGMAC_TDES2_B2L);
|
|
|
|
if (is_fs) {
|
|
|
|
tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE;
|
|
|
|
tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) &
|
|
|
|
XGMAC_TDES3_THL;
|
|
|
|
tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL;
|
|
|
|
} else {
|
|
|
|
tdes3 &= ~XGMAC_TDES3_FD;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ls)
|
|
|
|
tdes3 |= XGMAC_TDES3_LD;
|
|
|
|
else
|
|
|
|
tdes3 &= ~XGMAC_TDES3_LD;
|
|
|
|
|
|
|
|
/* Finally set the OWN bit. Later the DMA will start! */
|
|
|
|
if (tx_own)
|
|
|
|
tdes3 |= XGMAC_TDES3_OWN;
|
|
|
|
|
|
|
|
if (is_fs && tx_own)
|
|
|
|
/* When the own bit, for the first frame, has to be set, all
|
|
|
|
* descriptors for the same frame has to be set before, to
|
|
|
|
* avoid race condition.
|
|
|
|
*/
|
|
|
|
dma_wmb();
|
|
|
|
|
|
|
|
p->des3 = cpu_to_le32(tdes3);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode)
|
|
|
|
{
|
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
|
|
|
p->des2 = 0;
|
|
|
|
p->des3 = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_set_tx_ic(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
p->des2 |= cpu_to_le32(XGMAC_TDES2_IOC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss)
|
|
|
|
{
|
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
|
|
|
p->des2 = cpu_to_le32(mss);
|
|
|
|
p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr)
|
|
|
|
{
|
2019-06-28 09:29:18 +02:00
|
|
|
p->des0 = cpu_to_le32(lower_32_bits(addr));
|
|
|
|
p->des1 = cpu_to_le32(upper_32_bits(addr));
|
2018-08-08 09:04:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_clear(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
|
|
|
p->des2 = 0;
|
|
|
|
p->des3 = 0;
|
|
|
|
}
|
|
|
|
|
2019-08-07 10:03:12 +02:00
|
|
|
static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
|
|
|
|
enum pkt_hash_types *type)
|
|
|
|
{
|
|
|
|
unsigned int rdes3 = le32_to_cpu(p->des3);
|
|
|
|
u32 ptype;
|
|
|
|
|
|
|
|
if (rdes3 & XGMAC_RDES3_RSV) {
|
|
|
|
ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT;
|
|
|
|
|
|
|
|
switch (ptype) {
|
|
|
|
case XGMAC_L34T_IP4TCP:
|
|
|
|
case XGMAC_L34T_IP4UDP:
|
|
|
|
case XGMAC_L34T_IP6TCP:
|
|
|
|
case XGMAC_L34T_IP6UDP:
|
|
|
|
*type = PKT_HASH_TYPE_L4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
*type = PKT_HASH_TYPE_L3;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
*hash = le32_to_cpu(p->des1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-09-11 11:55:58 +08:00
|
|
|
static void dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
|
2019-08-17 20:54:43 +02:00
|
|
|
{
|
2019-11-06 16:02:58 +01:00
|
|
|
if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T)
|
|
|
|
*len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
|
2019-08-17 20:54:43 +02:00
|
|
|
}
|
|
|
|
|
2021-02-25 17:01:13 +08:00
|
|
|
static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool is_valid)
|
2019-08-17 20:54:43 +02:00
|
|
|
{
|
|
|
|
p->des2 = cpu_to_le32(lower_32_bits(addr));
|
|
|
|
p->des3 = cpu_to_le32(upper_32_bits(addr));
|
|
|
|
}
|
|
|
|
|
2019-08-17 20:54:47 +02:00
|
|
|
static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type)
|
|
|
|
{
|
|
|
|
sarc_type <<= XGMAC_TDES3_SAIC_SHIFT;
|
|
|
|
|
|
|
|
p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC);
|
|
|
|
}
|
|
|
|
|
2019-08-17 20:54:50 +02:00
|
|
|
static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
|
|
|
|
u32 inner_type)
|
|
|
|
{
|
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
|
|
|
p->des2 = 0;
|
|
|
|
p->des3 = 0;
|
|
|
|
|
|
|
|
/* Inner VLAN */
|
|
|
|
if (inner_type) {
|
|
|
|
u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT;
|
|
|
|
|
|
|
|
des &= XGMAC_TDES2_IVT;
|
|
|
|
p->des2 = cpu_to_le32(des);
|
|
|
|
|
|
|
|
des = inner_type << XGMAC_TDES3_IVTIR_SHIFT;
|
|
|
|
des &= XGMAC_TDES3_IVTIR;
|
|
|
|
p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Outer VLAN */
|
|
|
|
p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT);
|
|
|
|
p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV);
|
|
|
|
|
|
|
|
p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
|
|
|
|
{
|
|
|
|
type <<= XGMAC_TDES2_VTIR_SHIFT;
|
|
|
|
p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
|
|
|
|
}
|
|
|
|
|
2020-01-13 17:24:11 +01:00
|
|
|
static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
|
|
|
|
{
|
|
|
|
p->des4 = cpu_to_le32((sec & XGMAC_TDES0_LT) | XGMAC_TDES0_LTV);
|
|
|
|
p->des5 = cpu_to_le32(nsec & XGMAC_TDES1_LT);
|
|
|
|
p->des6 = 0;
|
|
|
|
p->des7 = 0;
|
|
|
|
}
|
|
|
|
|
2018-08-08 09:04:32 +01:00
|
|
|
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
|
|
|
|
.tx_status = dwxgmac2_get_tx_status,
|
|
|
|
.rx_status = dwxgmac2_get_rx_status,
|
|
|
|
.get_tx_len = dwxgmac2_get_tx_len,
|
|
|
|
.get_tx_owner = dwxgmac2_get_tx_owner,
|
|
|
|
.set_tx_owner = dwxgmac2_set_tx_owner,
|
|
|
|
.set_rx_owner = dwxgmac2_set_rx_owner,
|
|
|
|
.get_tx_ls = dwxgmac2_get_tx_ls,
|
2025-05-07 14:38:12 +08:00
|
|
|
.get_rx_vlan_tci = dwxgmac2_wrback_get_rx_vlan_tci,
|
|
|
|
.get_rx_vlan_valid = dwxgmac2_wrback_get_rx_vlan_valid,
|
2018-08-08 09:04:32 +01:00
|
|
|
.get_rx_frame_len = dwxgmac2_get_rx_frame_len,
|
|
|
|
.enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
|
|
|
|
.get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
|
|
|
|
.get_rx_timestamp_status = dwxgmac2_get_rx_timestamp_status,
|
|
|
|
.get_timestamp = dwxgmac2_get_timestamp,
|
|
|
|
.set_tx_ic = dwxgmac2_set_tx_ic,
|
|
|
|
.prepare_tx_desc = dwxgmac2_prepare_tx_desc,
|
|
|
|
.prepare_tso_tx_desc = dwxgmac2_prepare_tso_tx_desc,
|
|
|
|
.release_tx_desc = dwxgmac2_release_tx_desc,
|
|
|
|
.init_rx_desc = dwxgmac2_init_rx_desc,
|
|
|
|
.init_tx_desc = dwxgmac2_init_tx_desc,
|
|
|
|
.set_mss = dwxgmac2_set_mss,
|
|
|
|
.set_addr = dwxgmac2_set_addr,
|
|
|
|
.clear = dwxgmac2_clear,
|
2019-08-07 10:03:12 +02:00
|
|
|
.get_rx_hash = dwxgmac2_get_rx_hash,
|
2019-08-17 20:54:43 +02:00
|
|
|
.get_rx_header_len = dwxgmac2_get_rx_header_len,
|
|
|
|
.set_sec_addr = dwxgmac2_set_sec_addr,
|
2019-08-17 20:54:47 +02:00
|
|
|
.set_sarc = dwxgmac2_set_sarc,
|
2019-08-17 20:54:50 +02:00
|
|
|
.set_vlan_tag = dwxgmac2_set_vlan_tag,
|
|
|
|
.set_vlan = dwxgmac2_set_vlan,
|
2020-01-13 17:24:11 +01:00
|
|
|
.set_tbs = dwxgmac2_set_tbs,
|
2018-08-08 09:04:32 +01:00
|
|
|
};
|