2019-11-16 15:10:59 +05:30
|
|
|
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
|
2018-04-16 16:08:12 +01:00
|
|
|
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
|
|
|
|
// stmmac HW Interface Callbacks
|
|
|
|
|
|
|
|
#ifndef __STMMAC_HWIF_H__
|
|
|
|
#define __STMMAC_HWIF_H__
|
|
|
|
|
2018-05-04 10:01:38 +01:00
|
|
|
#include <linux/netdevice.h>
|
2019-05-24 10:20:15 +02:00
|
|
|
#include <linux/stmmac.h>
|
2018-05-04 10:01:38 +01:00
|
|
|
|
2018-04-16 16:08:12 +01:00
|
|
|
#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
|
|
|
|
({ \
|
|
|
|
int __result = -EINVAL; \
|
2018-05-04 10:01:38 +01:00
|
|
|
if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) { \
|
2018-04-16 16:08:12 +01:00
|
|
|
(__priv)->hw->__module->__cname((__arg0), ##__args); \
|
|
|
|
__result = 0; \
|
|
|
|
} \
|
|
|
|
__result; \
|
|
|
|
})
|
|
|
|
#define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \
|
|
|
|
({ \
|
|
|
|
int __result = -EINVAL; \
|
2018-05-04 10:01:38 +01:00
|
|
|
if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) \
|
2018-04-16 16:08:12 +01:00
|
|
|
__result = (__priv)->hw->__module->__cname((__arg0), ##__args); \
|
|
|
|
__result; \
|
|
|
|
})
|
|
|
|
|
|
|
|
struct stmmac_extra_stats;
|
2023-04-11 15:04:05 -05:00
|
|
|
struct stmmac_priv;
|
2018-04-16 16:08:12 +01:00
|
|
|
struct stmmac_safety_stats;
|
|
|
|
struct dma_desc;
|
|
|
|
struct dma_extended_desc;
|
2020-01-13 17:24:09 +01:00
|
|
|
struct dma_edesc;
|
2018-04-16 16:08:12 +01:00
|
|
|
|
|
|
|
/* Descriptors helpers */
|
|
|
|
struct stmmac_desc_ops {
|
|
|
|
/* DMA RX descriptor ring initialization */
|
|
|
|
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
|
2019-03-27 22:35:35 +02:00
|
|
|
int end, int bfsize);
|
2018-04-16 16:08:12 +01:00
|
|
|
/* DMA TX descriptor ring initialization */
|
|
|
|
void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
|
|
|
|
/* Invoked by the xmit function to prepare the tx descriptor */
|
|
|
|
void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len,
|
|
|
|
bool csum_flag, int mode, bool tx_own, bool ls,
|
|
|
|
unsigned int tot_pkt_len);
|
|
|
|
void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
|
|
|
|
int len2, bool tx_own, bool ls, unsigned int tcphdrlen,
|
|
|
|
unsigned int tcppayloadlen);
|
|
|
|
/* Set/get the owner of the descriptor */
|
|
|
|
void (*set_tx_owner)(struct dma_desc *p);
|
|
|
|
int (*get_tx_owner)(struct dma_desc *p);
|
|
|
|
/* Clean the tx descriptor as soon as the tx irq is received */
|
|
|
|
void (*release_tx_desc)(struct dma_desc *p, int mode);
|
|
|
|
/* Clear interrupt on tx frame completion. When this bit is
|
|
|
|
* set an interrupt happens as soon as the frame is transmitted */
|
|
|
|
void (*set_tx_ic)(struct dma_desc *p);
|
|
|
|
/* Last tx segment reports the transmit status */
|
|
|
|
int (*get_tx_ls)(struct dma_desc *p);
|
2023-11-21 13:38:42 +08:00
|
|
|
/* Get the tag of the descriptor */
|
|
|
|
u16 (*get_rx_vlan_tci)(struct dma_desc *p);
|
|
|
|
/* Get the valid status of descriptor */
|
|
|
|
bool (*get_rx_vlan_valid)(struct dma_desc *p);
|
2018-04-16 16:08:12 +01:00
|
|
|
/* Return the transmit status looking at the TDES1 */
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
int (*tx_status)(struct stmmac_extra_stats *x,
|
2023-04-11 15:04:04 -05:00
|
|
|
struct dma_desc *p, void __iomem *ioaddr);
|
2018-04-16 16:08:12 +01:00
|
|
|
/* Get the buffer size from the descriptor */
|
|
|
|
int (*get_tx_len)(struct dma_desc *p);
|
|
|
|
/* Handle extra events on specific interrupts hw dependent */
|
2018-05-18 14:56:07 +01:00
|
|
|
void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic);
|
2018-04-16 16:08:12 +01:00
|
|
|
/* Get the receive frame size */
|
|
|
|
int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
|
|
|
|
/* Return the reception status looking at the RDES1 */
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
int (*rx_status)(struct stmmac_extra_stats *x,
|
2023-04-11 15:04:04 -05:00
|
|
|
struct dma_desc *p);
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
void (*rx_extended_status)(struct stmmac_extra_stats *x,
|
2023-04-11 15:04:04 -05:00
|
|
|
struct dma_extended_desc *p);
|
2018-04-16 16:08:12 +01:00
|
|
|
/* Set tx timestamp enable bit */
|
|
|
|
void (*enable_tx_timestamp) (struct dma_desc *p);
|
|
|
|
/* get tx timestamp status */
|
|
|
|
int (*get_tx_timestamp_status) (struct dma_desc *p);
|
|
|
|
/* get timestamp value */
|
|
|
|
void (*get_timestamp)(void *desc, u32 ats, u64 *ts);
|
|
|
|
/* get rx timestamp status */
|
|
|
|
int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
|
|
|
|
/* Display ring */
|
2021-02-25 17:01:12 +08:00
|
|
|
void (*display_ring)(void *head, unsigned int size, bool rx,
|
|
|
|
dma_addr_t dma_rx_phy, unsigned int desc_size);
|
2018-04-16 16:08:12 +01:00
|
|
|
/* set MSS via context descriptor */
|
|
|
|
void (*set_mss)(struct dma_desc *p, unsigned int mss);
|
2018-05-18 14:56:00 +01:00
|
|
|
/* set descriptor skbuff address */
|
|
|
|
void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
|
2018-05-18 14:56:01 +01:00
|
|
|
/* clear descriptor */
|
|
|
|
void (*clear)(struct dma_desc *p);
|
2019-08-07 10:03:12 +02:00
|
|
|
/* RSS */
|
|
|
|
int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
|
|
|
|
enum pkt_hash_types *type);
|
2020-09-11 11:55:58 +08:00
|
|
|
void (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
|
2021-02-25 17:01:13 +08:00
|
|
|
void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr, bool buf2_valid);
|
2019-08-17 20:54:47 +02:00
|
|
|
void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
|
2019-08-17 20:54:50 +02:00
|
|
|
void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
|
|
|
|
u32 inner_type);
|
|
|
|
void (*set_vlan)(struct dma_desc *p, u32 type);
|
2020-01-13 17:24:09 +01:00
|
|
|
void (*set_tbs)(struct dma_edesc *p, u32 sec, u32 nsec);
|
2018-04-16 16:08:12 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
#define stmmac_init_rx_desc(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, init_rx_desc, __args)
|
|
|
|
#define stmmac_init_tx_desc(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, init_tx_desc, __args)
|
|
|
|
#define stmmac_prepare_tx_desc(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, prepare_tx_desc, __args)
|
|
|
|
#define stmmac_prepare_tso_tx_desc(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, prepare_tso_tx_desc, __args)
|
|
|
|
#define stmmac_set_tx_owner(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_tx_owner, __args)
|
|
|
|
#define stmmac_get_tx_owner(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, get_tx_owner, __args)
|
|
|
|
#define stmmac_release_tx_desc(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, release_tx_desc, __args)
|
|
|
|
#define stmmac_set_tx_ic(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_tx_ic, __args)
|
|
|
|
#define stmmac_get_tx_ls(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, get_tx_ls, __args)
|
2023-11-21 13:38:42 +08:00
|
|
|
#define stmmac_get_rx_vlan_tci(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, get_rx_vlan_tci, __args)
|
|
|
|
#define stmmac_get_rx_vlan_valid(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, get_rx_vlan_valid, __args)
|
2018-04-16 16:08:12 +01:00
|
|
|
#define stmmac_tx_status(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, tx_status, __args)
|
|
|
|
#define stmmac_get_tx_len(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, get_tx_len, __args)
|
|
|
|
#define stmmac_set_rx_owner(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_rx_owner, __args)
|
|
|
|
#define stmmac_get_rx_frame_len(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, get_rx_frame_len, __args)
|
|
|
|
#define stmmac_rx_status(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, rx_status, __args)
|
|
|
|
#define stmmac_rx_extended_status(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, rx_extended_status, __args)
|
|
|
|
#define stmmac_enable_tx_timestamp(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, enable_tx_timestamp, __args)
|
|
|
|
#define stmmac_get_tx_timestamp_status(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, get_tx_timestamp_status, __args)
|
|
|
|
#define stmmac_get_timestamp(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, get_timestamp, __args)
|
|
|
|
#define stmmac_get_rx_timestamp_status(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, get_rx_timestamp_status, __args)
|
|
|
|
#define stmmac_display_ring(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, display_ring, __args)
|
|
|
|
#define stmmac_set_mss(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_mss, __args)
|
2018-05-18 14:56:00 +01:00
|
|
|
#define stmmac_set_desc_addr(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_addr, __args)
|
2018-05-18 14:56:01 +01:00
|
|
|
#define stmmac_clear_desc(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, clear, __args)
|
2019-08-07 10:03:12 +02:00
|
|
|
#define stmmac_get_rx_hash(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, desc, get_rx_hash, __args)
|
2019-08-17 20:54:43 +02:00
|
|
|
#define stmmac_get_rx_header_len(__priv, __args...) \
|
2020-09-11 11:55:58 +08:00
|
|
|
stmmac_do_void_callback(__priv, desc, get_rx_header_len, __args)
|
2019-08-17 20:54:43 +02:00
|
|
|
#define stmmac_set_desc_sec_addr(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_sec_addr, __args)
|
2019-08-17 20:54:47 +02:00
|
|
|
#define stmmac_set_desc_sarc(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_sarc, __args)
|
2019-08-17 20:54:50 +02:00
|
|
|
#define stmmac_set_desc_vlan_tag(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
|
|
|
|
#define stmmac_set_desc_vlan(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_vlan, __args)
|
2020-01-13 17:24:09 +01:00
|
|
|
#define stmmac_set_desc_tbs(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, desc, set_tbs, __args)
|
2018-04-16 16:08:12 +01:00
|
|
|
|
2018-04-16 16:08:13 +01:00
|
|
|
struct stmmac_dma_cfg;
|
|
|
|
struct dma_features;
|
|
|
|
|
|
|
|
/* Specific DMA helpers */
|
|
|
|
struct stmmac_dma_ops {
|
|
|
|
/* DMA core initialization */
|
|
|
|
int (*reset)(void __iomem *ioaddr);
|
|
|
|
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
|
2018-05-18 14:56:05 +01:00
|
|
|
int atds);
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
2018-04-16 16:08:13 +01:00
|
|
|
struct stmmac_dma_cfg *dma_cfg, u32 chan);
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
2018-04-16 16:08:13 +01:00
|
|
|
struct stmmac_dma_cfg *dma_cfg,
|
2019-07-09 10:02:59 +02:00
|
|
|
dma_addr_t phy, u32 chan);
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*init_tx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
2018-04-16 16:08:13 +01:00
|
|
|
struct stmmac_dma_cfg *dma_cfg,
|
2019-07-09 10:02:59 +02:00
|
|
|
dma_addr_t phy, u32 chan);
|
2018-04-16 16:08:13 +01:00
|
|
|
/* Configure the AXI Bus Mode Register */
|
|
|
|
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
|
|
|
|
/* Dump DMA registers */
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*dump_regs)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 *reg_space);
|
|
|
|
void (*dma_rx_mode)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
int mode, u32 channel,
|
2018-04-16 16:08:13 +01:00
|
|
|
int fifosz, u8 qmode);
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*dma_tx_mode)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
int mode, u32 channel, int fifosz, u8 qmode);
|
2018-04-16 16:08:13 +01:00
|
|
|
/* To track extra statistic (if supported) */
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
|
2023-04-11 15:04:04 -05:00
|
|
|
void __iomem *ioaddr);
|
2018-04-16 16:08:13 +01:00
|
|
|
void (*enable_dma_transmission) (void __iomem *ioaddr);
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan, bool rx, bool tx);
|
|
|
|
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan, bool rx, bool tx);
|
|
|
|
void (*start_tx)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan);
|
|
|
|
void (*stop_tx)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan);
|
|
|
|
void (*start_rx)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan);
|
|
|
|
void (*stop_rx)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan);
|
|
|
|
int (*dma_interrupt)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
struct stmmac_extra_stats *x, u32 chan, u32 dir);
|
2018-04-16 16:08:13 +01:00
|
|
|
/* If supported then get the optional core features */
|
2021-10-08 12:34:37 +02:00
|
|
|
int (*get_hw_feature)(void __iomem *ioaddr,
|
|
|
|
struct dma_features *dma_cap);
|
2018-04-16 16:08:13 +01:00
|
|
|
/* Program the HW RX Watchdog */
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*rx_watchdog)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 riwt, u32 queue);
|
|
|
|
void (*set_tx_ring_len)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 len, u32 chan);
|
|
|
|
void (*set_rx_ring_len)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 len, u32 chan);
|
|
|
|
void (*set_rx_tail_ptr)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 tail_ptr, u32 chan);
|
|
|
|
void (*set_tx_tail_ptr)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 tail_ptr, u32 chan);
|
|
|
|
void (*enable_tso)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
bool en, u32 chan);
|
|
|
|
void (*qmode)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 channel, u8 qmode);
|
|
|
|
void (*set_bfsize)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
int bfsize, u32 chan);
|
|
|
|
void (*enable_sph)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
bool en, u32 chan);
|
|
|
|
int (*enable_tbs)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
bool en, u32 chan);
|
2018-04-16 16:08:13 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
#define stmmac_dma_init(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, dma, init, __args)
|
|
|
|
#define stmmac_init_chan(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, init_chan, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_init_rx_chan(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, init_rx_chan, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_init_tx_chan(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, init_tx_chan, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_axi(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, dma, axi, __args)
|
|
|
|
#define stmmac_dump_dma_regs(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, dump_regs, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_dma_rx_mode(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, dma_rx_mode, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_dma_tx_mode(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, dma_tx_mode, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_dma_diagnostic_fr(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args)
|
|
|
|
#define stmmac_enable_dma_transmission(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args)
|
|
|
|
#define stmmac_enable_dma_irq(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, enable_dma_irq, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_disable_dma_irq(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, disable_dma_irq, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_start_tx(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, start_tx, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_stop_tx(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, stop_tx, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_start_rx(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, start_rx, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_stop_rx(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, stop_rx, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_dma_interrupt_status(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_callback(__priv, dma, dma_interrupt, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_get_hw_feature(__priv, __args...) \
|
2021-10-08 12:34:37 +02:00
|
|
|
stmmac_do_callback(__priv, dma, get_hw_feature, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_rx_watchdog(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, rx_watchdog, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_set_tx_ring_len(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_set_rx_ring_len(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_set_rx_tail_ptr(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_set_tx_tail_ptr(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
#define stmmac_enable_tso(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, enable_tso, __priv, __args)
|
2018-06-27 15:57:02 +01:00
|
|
|
#define stmmac_dma_qmode(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, qmode, __priv, __args)
|
2018-06-27 15:03:20 +01:00
|
|
|
#define stmmac_set_dma_bfsize(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, set_bfsize, __priv, __args)
|
2019-08-17 20:54:43 +02:00
|
|
|
#define stmmac_enable_sph(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, dma, enable_sph, __priv, __args)
|
2020-01-13 17:24:09 +01:00
|
|
|
#define stmmac_enable_tbs(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_callback(__priv, dma, enable_tbs, __priv, __args)
|
2018-04-16 16:08:13 +01:00
|
|
|
|
2018-04-16 16:08:14 +01:00
|
|
|
struct mac_device_info;
|
|
|
|
struct net_device;
|
|
|
|
struct rgmii_adv;
|
2018-05-04 10:01:38 +01:00
|
|
|
struct stmmac_tc_entry;
|
2018-05-31 18:01:27 +01:00
|
|
|
struct stmmac_pps_cfg;
|
2019-08-07 10:03:12 +02:00
|
|
|
struct stmmac_rss;
|
2019-12-18 11:33:05 +01:00
|
|
|
struct stmmac_est;
|
2018-04-16 16:08:14 +01:00
|
|
|
|
|
|
|
/* Helpers to program the MAC core */
|
|
|
|
struct stmmac_ops {
|
|
|
|
/* MAC core initialization */
|
|
|
|
void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
|
2024-04-19 12:03:05 +03:00
|
|
|
/* Update MAC capabilities */
|
|
|
|
void (*update_caps)(struct stmmac_priv *priv);
|
2018-04-16 16:08:14 +01:00
|
|
|
/* Enable the MAC RX/TX */
|
|
|
|
void (*set_mac)(void __iomem *ioaddr, bool enable);
|
|
|
|
/* Enable and verify that the IPC module is supported */
|
|
|
|
int (*rx_ipc)(struct mac_device_info *hw);
|
|
|
|
/* Enable RX Queues */
|
|
|
|
void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
|
|
|
|
/* RX Queues Priority */
|
|
|
|
void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
|
|
|
|
/* TX Queues Priority */
|
|
|
|
void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
|
|
|
|
/* RX Queues Routing */
|
|
|
|
void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
|
|
|
|
u32 queue);
|
|
|
|
/* Program RX Algorithms */
|
|
|
|
void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
|
|
|
|
/* Program TX Algorithms */
|
|
|
|
void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
|
|
|
|
/* Set MTL TX queues weight */
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*set_mtl_tx_queue_weight)(struct stmmac_priv *priv,
|
|
|
|
struct mac_device_info *hw,
|
2018-04-16 16:08:14 +01:00
|
|
|
u32 weight, u32 queue);
|
|
|
|
/* RX MTL queue to RX dma mapping */
|
|
|
|
void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
|
|
|
|
/* Configure AV Algorithm */
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*config_cbs)(struct stmmac_priv *priv, struct mac_device_info *hw,
|
|
|
|
u32 send_slope, u32 idle_slope, u32 high_credit,
|
|
|
|
u32 low_credit, u32 queue);
|
2018-04-16 16:08:14 +01:00
|
|
|
/* Dump MAC registers */
|
|
|
|
void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
|
|
|
|
/* Handle extra events on specific interrupts hw dependent */
|
|
|
|
int (*host_irq_status)(struct mac_device_info *hw,
|
|
|
|
struct stmmac_extra_stats *x);
|
|
|
|
/* Handle MTL interrupts */
|
2023-04-11 15:04:05 -05:00
|
|
|
int (*host_mtl_irq_status)(struct stmmac_priv *priv,
|
|
|
|
struct mac_device_info *hw, u32 chan);
|
2018-04-16 16:08:14 +01:00
|
|
|
/* Multicast filter setting */
|
|
|
|
void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
|
|
|
|
/* Flow control setting */
|
|
|
|
void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
|
|
|
|
unsigned int fc, unsigned int pause_time, u32 tx_cnt);
|
|
|
|
/* Set power management mode (e.g. magic frame) */
|
|
|
|
void (*pmt)(struct mac_device_info *hw, unsigned long mode);
|
|
|
|
/* Set/Get Unicast MAC addresses */
|
2021-10-14 07:24:31 -07:00
|
|
|
void (*set_umac_addr)(struct mac_device_info *hw,
|
|
|
|
const unsigned char *addr,
|
2018-04-16 16:08:14 +01:00
|
|
|
unsigned int reg_n);
|
|
|
|
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
|
|
|
|
unsigned int reg_n);
|
|
|
|
void (*set_eee_mode)(struct mac_device_info *hw,
|
|
|
|
bool en_tx_lpi_clockgating);
|
|
|
|
void (*reset_eee_mode)(struct mac_device_info *hw);
|
2020-10-28 00:00:51 +08:00
|
|
|
void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
|
2018-04-16 16:08:14 +01:00
|
|
|
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
|
|
|
|
void (*set_eee_pls)(struct mac_device_info *hw, int link);
|
2023-04-11 15:04:05 -05:00
|
|
|
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
struct stmmac_extra_stats *x, u32 rx_queues,
|
|
|
|
u32 tx_queues);
|
2018-04-16 16:08:14 +01:00
|
|
|
/* PCS calls */
|
|
|
|
void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
|
|
|
|
bool loopback);
|
|
|
|
void (*pcs_rane)(void __iomem *ioaddr, bool restart);
|
|
|
|
void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
|
|
|
|
/* Safety Features */
|
2021-06-01 21:52:35 +08:00
|
|
|
int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
|
|
|
|
struct stmmac_safety_feature_cfg *safety_cfg);
|
2018-04-16 16:08:14 +01:00
|
|
|
int (*safety_feat_irq_status)(struct net_device *ndev,
|
|
|
|
void __iomem *ioaddr, unsigned int asp,
|
|
|
|
struct stmmac_safety_stats *stats);
|
|
|
|
int (*safety_feat_dump)(struct stmmac_safety_stats *stats,
|
|
|
|
int index, unsigned long *count, const char **desc);
|
2018-05-04 10:01:38 +01:00
|
|
|
/* Flexible RX Parser */
|
|
|
|
int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
|
|
|
|
unsigned int count);
|
2018-05-31 18:01:27 +01:00
|
|
|
/* Flexible PPS */
|
|
|
|
int (*flex_pps_config)(void __iomem *ioaddr, int index,
|
|
|
|
struct stmmac_pps_cfg *cfg, bool enable,
|
|
|
|
u32 sub_second_inc, u32 systime_flags);
|
2019-05-24 10:20:09 +02:00
|
|
|
/* Loopback for selftests */
|
|
|
|
void (*set_mac_loopback)(void __iomem *ioaddr, bool enable);
|
2019-08-07 10:03:12 +02:00
|
|
|
/* RSS */
|
|
|
|
int (*rss_configure)(struct mac_device_info *hw,
|
|
|
|
struct stmmac_rss *cfg, u32 num_rxq);
|
2019-08-07 10:03:14 +02:00
|
|
|
/* VLAN */
|
|
|
|
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
|
2019-11-11 15:42:34 +01:00
|
|
|
__le16 perfect_match, bool is_double);
|
2019-08-17 20:54:50 +02:00
|
|
|
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
|
2023-11-21 13:38:42 +08:00
|
|
|
void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
|
|
|
|
struct sk_buff *skb);
|
|
|
|
void (*set_hw_vlan_mode)(struct mac_device_info *hw);
|
2020-03-30 23:53:57 +08:00
|
|
|
int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
|
|
|
|
struct mac_device_info *hw,
|
|
|
|
__be16 proto, u16 vid);
|
|
|
|
int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
|
|
|
|
struct mac_device_info *hw,
|
|
|
|
__be16 proto, u16 vid);
|
|
|
|
void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
|
|
|
|
struct mac_device_info *hw);
|
2019-08-17 20:54:40 +02:00
|
|
|
/* TX Timestamp */
|
|
|
|
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
|
2019-08-17 20:54:47 +02:00
|
|
|
/* Source Address Insertion / Replacement */
|
|
|
|
void (*sarc_configure)(void __iomem *ioaddr, int val);
|
2019-09-04 15:16:56 +02:00
|
|
|
/* Filtering */
|
|
|
|
int (*config_l3_filter)(struct mac_device_info *hw, u32 filter_no,
|
|
|
|
bool en, bool ipv6, bool sa, bool inv,
|
|
|
|
u32 match);
|
|
|
|
int (*config_l4_filter)(struct mac_device_info *hw, u32 filter_no,
|
|
|
|
bool en, bool udp, bool sa, bool inv,
|
|
|
|
u32 match);
|
2019-09-04 15:16:58 +02:00
|
|
|
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
|
2023-12-01 03:22:03 +00:00
|
|
|
void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
|
|
|
|
u32 num_txq, u32 num_rxq,
|
2019-12-18 11:33:08 +01:00
|
|
|
bool enable);
|
2021-03-24 17:07:42 +08:00
|
|
|
void (*fpe_send_mpacket)(void __iomem *ioaddr,
|
2023-12-01 03:22:03 +00:00
|
|
|
struct stmmac_fpe_cfg *cfg,
|
2021-03-24 17:07:42 +08:00
|
|
|
enum stmmac_mpacket_type type);
|
|
|
|
int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
|
2018-04-16 16:08:14 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
#define stmmac_core_init(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, core_init, __args)
|
2024-04-19 12:03:05 +03:00
|
|
|
#define stmmac_mac_update_caps(__priv) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, update_caps, __priv)
|
2018-04-16 16:08:14 +01:00
|
|
|
#define stmmac_mac_set(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_mac, __args)
|
|
|
|
#define stmmac_rx_ipc(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, rx_ipc, __args)
|
|
|
|
#define stmmac_rx_queue_enable(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, rx_queue_enable, __args)
|
|
|
|
#define stmmac_rx_queue_prio(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, rx_queue_prio, __args)
|
|
|
|
#define stmmac_tx_queue_prio(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, tx_queue_prio, __args)
|
|
|
|
#define stmmac_rx_queue_routing(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, rx_queue_routing, __args)
|
|
|
|
#define stmmac_prog_mtl_rx_algorithms(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, prog_mtl_rx_algorithms, __args)
|
|
|
|
#define stmmac_prog_mtl_tx_algorithms(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, prog_mtl_tx_algorithms, __args)
|
|
|
|
#define stmmac_set_mtl_tx_queue_weight(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __priv, __args)
|
2018-04-16 16:08:14 +01:00
|
|
|
#define stmmac_map_mtl_to_dma(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, map_mtl_to_dma, __args)
|
|
|
|
#define stmmac_config_cbs(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, mac, config_cbs, __priv, __args)
|
2018-04-16 16:08:14 +01:00
|
|
|
#define stmmac_dump_mac_regs(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, dump_regs, __args)
|
|
|
|
#define stmmac_host_irq_status(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, host_irq_status, __args)
|
|
|
|
#define stmmac_host_mtl_irq_status(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_callback(__priv, mac, host_mtl_irq_status, __priv, __args)
|
2018-04-16 16:08:14 +01:00
|
|
|
#define stmmac_set_filter(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_filter, __args)
|
|
|
|
#define stmmac_flow_ctrl(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, flow_ctrl, __args)
|
|
|
|
#define stmmac_pmt(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, pmt, __args)
|
|
|
|
#define stmmac_set_umac_addr(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_umac_addr, __args)
|
|
|
|
#define stmmac_get_umac_addr(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, get_umac_addr, __args)
|
|
|
|
#define stmmac_set_eee_mode(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
|
|
|
|
#define stmmac_reset_eee_mode(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
|
2020-10-28 00:00:51 +08:00
|
|
|
#define stmmac_set_eee_lpi_timer(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
|
2018-04-16 16:08:14 +01:00
|
|
|
#define stmmac_set_eee_timer(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
|
|
|
|
#define stmmac_set_eee_pls(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_eee_pls, __args)
|
|
|
|
#define stmmac_mac_debug(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, mac, debug, __priv, __args)
|
2018-04-16 16:08:14 +01:00
|
|
|
#define stmmac_pcs_ctrl_ane(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
|
|
|
|
#define stmmac_pcs_rane(__priv, __args...) \
|
2023-04-11 15:04:05 -05:00
|
|
|
stmmac_do_void_callback(__priv, mac, pcs_rane, __priv, __args)
|
2018-04-16 16:08:14 +01:00
|
|
|
#define stmmac_pcs_get_adv_lp(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
|
|
|
|
#define stmmac_safety_feat_config(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, safety_feat_config, __args)
|
|
|
|
#define stmmac_safety_feat_irq_status(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args)
|
|
|
|
#define stmmac_safety_feat_dump(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, safety_feat_dump, __args)
|
2018-05-04 10:01:38 +01:00
|
|
|
#define stmmac_rxp_config(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, rxp_config, __args)
|
2018-05-31 18:01:27 +01:00
|
|
|
#define stmmac_flex_pps_config(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, flex_pps_config, __args)
|
2019-05-24 10:20:09 +02:00
|
|
|
#define stmmac_set_mac_loopback(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
|
2019-08-07 10:03:12 +02:00
|
|
|
#define stmmac_rss_configure(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, rss_configure, __args)
|
2019-08-07 10:03:14 +02:00
|
|
|
#define stmmac_update_vlan_hash(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
|
2019-08-17 20:54:50 +02:00
|
|
|
#define stmmac_enable_vlan(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
|
2023-11-21 13:38:42 +08:00
|
|
|
#define stmmac_rx_hw_vlan(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args)
|
|
|
|
#define stmmac_set_hw_vlan_mode(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args)
|
2020-03-30 23:53:57 +08:00
|
|
|
#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
|
|
|
|
#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args)
|
|
|
|
#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args)
|
2019-08-17 20:54:40 +02:00
|
|
|
#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
|
2019-08-17 20:54:47 +02:00
|
|
|
#define stmmac_sarc_configure(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, sarc_configure, __args)
|
2019-09-04 15:16:56 +02:00
|
|
|
#define stmmac_config_l3_filter(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, config_l3_filter, __args)
|
|
|
|
#define stmmac_config_l4_filter(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
|
2019-09-04 15:16:58 +02:00
|
|
|
#define stmmac_set_arp_offload(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
|
2019-12-18 11:33:08 +01:00
|
|
|
#define stmmac_fpe_configure(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
|
2021-03-24 17:07:42 +08:00
|
|
|
#define stmmac_fpe_send_mpacket(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
|
|
|
|
#define stmmac_fpe_irq_status(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
|
2018-04-16 16:08:14 +01:00
|
|
|
|
2018-04-16 16:08:15 +01:00
|
|
|
/* PTP and HW Timer helpers */
|
|
|
|
struct stmmac_hwtimestamp {
|
|
|
|
void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
|
|
|
|
void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
|
|
|
|
int gmac4, u32 *ssinc);
|
|
|
|
int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
|
|
|
|
int (*config_addend) (void __iomem *ioaddr, u32 addend);
|
|
|
|
int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
|
|
|
|
int add_sub, int gmac4);
|
|
|
|
void (*get_systime) (void __iomem *ioaddr, u64 *systime);
|
2021-03-23 19:07:34 +08:00
|
|
|
void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time);
|
2021-04-14 08:16:17 +08:00
|
|
|
void (*timestamp_interrupt)(struct stmmac_priv *priv);
|
2023-08-01 17:44:29 +02:00
|
|
|
void (*hwtstamp_correct_latency)(struct stmmac_priv *priv);
|
2018-04-16 16:08:15 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
#define stmmac_config_hw_tstamping(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, ptp, config_hw_tstamping, __args)
|
|
|
|
#define stmmac_config_sub_second_increment(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, ptp, config_sub_second_increment, __args)
|
|
|
|
#define stmmac_init_systime(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, ptp, init_systime, __args)
|
|
|
|
#define stmmac_config_addend(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, ptp, config_addend, __args)
|
|
|
|
#define stmmac_adjust_systime(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, ptp, adjust_systime, __args)
|
|
|
|
#define stmmac_get_systime(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, ptp, get_systime, __args)
|
2021-03-23 19:07:34 +08:00
|
|
|
#define stmmac_get_ptptime(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, ptp, get_ptptime, __args)
|
2021-04-14 08:16:17 +08:00
|
|
|
#define stmmac_timestamp_interrupt(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args)
|
2023-08-01 17:44:29 +02:00
|
|
|
#define stmmac_hwtstamp_correct_latency(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, ptp, hwtstamp_correct_latency, __args)
|
2018-04-16 16:08:15 +01:00
|
|
|
|
2023-04-11 15:04:04 -05:00
|
|
|
struct stmmac_tx_queue;
|
|
|
|
struct stmmac_rx_queue;
|
|
|
|
|
2018-04-16 16:08:16 +01:00
|
|
|
/* Helpers to manage the descriptors for chain and ring modes */
|
|
|
|
struct stmmac_mode_ops {
|
|
|
|
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
|
|
|
|
unsigned int extend_desc);
|
|
|
|
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
|
2023-04-11 15:04:04 -05:00
|
|
|
int (*jumbo_frm)(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
|
|
|
|
int csum);
|
2018-04-16 16:08:16 +01:00
|
|
|
int (*set_16kib_bfsize)(int mtu);
|
|
|
|
void (*init_desc3)(struct dma_desc *p);
|
2023-04-11 15:04:04 -05:00
|
|
|
void (*refill_desc3)(struct stmmac_rx_queue *rx_q, struct dma_desc *p);
|
|
|
|
void (*clean_desc3)(struct stmmac_tx_queue *tx_q, struct dma_desc *p);
|
2018-04-16 16:08:16 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
#define stmmac_mode_init(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mode, init, __args)
|
|
|
|
#define stmmac_is_jumbo_frm(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mode, is_jumbo_frm, __args)
|
|
|
|
#define stmmac_jumbo_frm(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mode, jumbo_frm, __args)
|
|
|
|
#define stmmac_set_16kib_bfsize(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, mode, set_16kib_bfsize, __args)
|
|
|
|
#define stmmac_init_desc3(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mode, init_desc3, __args)
|
|
|
|
#define stmmac_refill_desc3(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mode, refill_desc3, __args)
|
|
|
|
#define stmmac_clean_desc3(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mode, clean_desc3, __args)
|
|
|
|
|
2018-05-04 10:01:38 +01:00
|
|
|
struct tc_cls_u32_offload;
|
2018-06-27 15:57:02 +01:00
|
|
|
struct tc_cbs_qopt_offload;
|
2019-09-04 15:16:56 +02:00
|
|
|
struct flow_cls_offload;
|
2019-12-18 11:33:07 +01:00
|
|
|
struct tc_taprio_qopt_offload;
|
2020-01-13 17:24:10 +01:00
|
|
|
struct tc_etf_qopt_offload;
|
net/sched: taprio: only pass gate mask per TXQ for igc, stmmac, tsnep, am65_cpsw
There are 2 classes of in-tree drivers currently:
- those who act upon struct tc_taprio_sched_entry :: gate_mask as if it
holds a bit mask of TXQs
- those who act upon the gate_mask as if it holds a bit mask of TCs
When it comes to the standard, IEEE 802.1Q-2018 does say this in the
second paragraph of section 8.6.8.4 Enhancements for scheduled traffic:
| A gate control list associated with each Port contains an ordered list
| of gate operations. Each gate operation changes the transmission gate
| state for the gate associated with each of the Port's traffic class
| queues and allows associated control operations to be scheduled.
In typically obtuse language, it refers to a "traffic class queue"
rather than a "traffic class" or a "queue". But careful reading of
802.1Q clarifies that "traffic class" and "queue" are in fact
synonymous (see 8.6.6 Queuing frames):
| A queue in this context is not necessarily a single FIFO data structure.
| A queue is a record of all frames of a given traffic class awaiting
| transmission on a given Bridge Port. The structure of this record is not
| specified.
i.o.w. their definition of "queue" isn't the Linux TX queue.
The gate_mask really is input into taprio via its UAPI as a mask of
traffic classes, but taprio_sched_to_offload() converts it into a TXQ
mask.
The breakdown of drivers which handle TC_SETUP_QDISC_TAPRIO is:
- hellcreek, felix, sja1105: these are DSA switches, it's not even very
clear what TXQs correspond to, other than purely software constructs.
Only the mqprio configuration with 8 TCs and 1 TXQ per TC makes sense.
So it's fine to convert these to a gate mask per TC.
- enetc: I have the hardware and can confirm that the gate mask is per
TC, and affects all TXQs (BD rings) configured for that priority.
- igc: in igc_save_qbv_schedule(), the gate_mask is clearly interpreted
to be per-TXQ.
- tsnep: Gerhard Engleder clarifies that even though this hardware
supports at most 1 TXQ per TC, the TXQ indices may be different from
the TC values themselves, and it is the TXQ indices that matter to
this hardware. So keep it per-TXQ as well.
- stmmac: I have a GMAC datasheet, and in the EST section it does
specify that the gate events are per TXQ rather than per TC.
- lan966x: again, this is a switch, and while not a DSA one, the way in
which it implements lan966x_mqprio_add() - by only allowing num_tc ==
NUM_PRIO_QUEUES (8) - makes it clear to me that TXQs are a purely
software construct here as well. They seem to map 1:1 with TCs.
- am65_cpsw: from looking at am65_cpsw_est_set_sched_cmds(), I get the
impression that the fetch_allow variable is treated like a prio_mask.
This definitely sounds closer to a per-TC gate mask rather than a
per-TXQ one, and TI documentation does seem to recomment an identity
mapping between TCs and TXQs. However, Roger Quadros would like to do
some testing before making changes, so I'm leaving this driver to
operate as it did before, for now. Link with more details at the end.
Based on this breakdown, we have 5 drivers with a gate mask per TC and
4 with a gate mask per TXQ. So let's make the gate mask per TXQ the
opt-in and the gate mask per TC the default.
Benefit from the TC_QUERY_CAPS feature that Jakub suggested we add, and
query the device driver before calling the proper ndo_setup_tc(), and
figure out if it expects one or the other format.
Link: https://patchwork.kernel.org/project/netdevbpf/patch/20230202003621.2679603-15-vladimir.oltean@nxp.com/#25193204
Cc: Horatiu Vultur <horatiu.vultur@microchip.com>
Cc: Siddharth Vadapalli <s-vadapalli@ti.com>
Cc: Roger Quadros <rogerq@kernel.org>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-04 15:53:04 +02:00
|
|
|
struct tc_query_caps_base;
|
2018-05-04 10:01:38 +01:00
|
|
|
|
|
|
|
struct stmmac_tc_ops {
|
|
|
|
int (*init)(struct stmmac_priv *priv);
|
|
|
|
int (*setup_cls_u32)(struct stmmac_priv *priv,
|
|
|
|
struct tc_cls_u32_offload *cls);
|
2018-06-27 15:57:02 +01:00
|
|
|
int (*setup_cbs)(struct stmmac_priv *priv,
|
|
|
|
struct tc_cbs_qopt_offload *qopt);
|
2019-09-04 15:16:56 +02:00
|
|
|
int (*setup_cls)(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls);
|
2019-12-18 11:33:07 +01:00
|
|
|
int (*setup_taprio)(struct stmmac_priv *priv,
|
|
|
|
struct tc_taprio_qopt_offload *qopt);
|
2020-01-13 17:24:10 +01:00
|
|
|
int (*setup_etf)(struct stmmac_priv *priv,
|
|
|
|
struct tc_etf_qopt_offload *qopt);
|
net/sched: taprio: only pass gate mask per TXQ for igc, stmmac, tsnep, am65_cpsw
There are 2 classes of in-tree drivers currently:
- those who act upon struct tc_taprio_sched_entry :: gate_mask as if it
holds a bit mask of TXQs
- those who act upon the gate_mask as if it holds a bit mask of TCs
When it comes to the standard, IEEE 802.1Q-2018 does say this in the
second paragraph of section 8.6.8.4 Enhancements for scheduled traffic:
| A gate control list associated with each Port contains an ordered list
| of gate operations. Each gate operation changes the transmission gate
| state for the gate associated with each of the Port's traffic class
| queues and allows associated control operations to be scheduled.
In typically obtuse language, it refers to a "traffic class queue"
rather than a "traffic class" or a "queue". But careful reading of
802.1Q clarifies that "traffic class" and "queue" are in fact
synonymous (see 8.6.6 Queuing frames):
| A queue in this context is not necessarily a single FIFO data structure.
| A queue is a record of all frames of a given traffic class awaiting
| transmission on a given Bridge Port. The structure of this record is not
| specified.
i.o.w. their definition of "queue" isn't the Linux TX queue.
The gate_mask really is input into taprio via its UAPI as a mask of
traffic classes, but taprio_sched_to_offload() converts it into a TXQ
mask.
The breakdown of drivers which handle TC_SETUP_QDISC_TAPRIO is:
- hellcreek, felix, sja1105: these are DSA switches, it's not even very
clear what TXQs correspond to, other than purely software constructs.
Only the mqprio configuration with 8 TCs and 1 TXQ per TC makes sense.
So it's fine to convert these to a gate mask per TC.
- enetc: I have the hardware and can confirm that the gate mask is per
TC, and affects all TXQs (BD rings) configured for that priority.
- igc: in igc_save_qbv_schedule(), the gate_mask is clearly interpreted
to be per-TXQ.
- tsnep: Gerhard Engleder clarifies that even though this hardware
supports at most 1 TXQ per TC, the TXQ indices may be different from
the TC values themselves, and it is the TXQ indices that matter to
this hardware. So keep it per-TXQ as well.
- stmmac: I have a GMAC datasheet, and in the EST section it does
specify that the gate events are per TXQ rather than per TC.
- lan966x: again, this is a switch, and while not a DSA one, the way in
which it implements lan966x_mqprio_add() - by only allowing num_tc ==
NUM_PRIO_QUEUES (8) - makes it clear to me that TXQs are a purely
software construct here as well. They seem to map 1:1 with TCs.
- am65_cpsw: from looking at am65_cpsw_est_set_sched_cmds(), I get the
impression that the fetch_allow variable is treated like a prio_mask.
This definitely sounds closer to a per-TC gate mask rather than a
per-TXQ one, and TI documentation does seem to recomment an identity
mapping between TCs and TXQs. However, Roger Quadros would like to do
some testing before making changes, so I'm leaving this driver to
operate as it did before, for now. Link with more details at the end.
Based on this breakdown, we have 5 drivers with a gate mask per TC and
4 with a gate mask per TXQ. So let's make the gate mask per TXQ the
opt-in and the gate mask per TC the default.
Benefit from the TC_QUERY_CAPS feature that Jakub suggested we add, and
query the device driver before calling the proper ndo_setup_tc(), and
figure out if it expects one or the other format.
Link: https://patchwork.kernel.org/project/netdevbpf/patch/20230202003621.2679603-15-vladimir.oltean@nxp.com/#25193204
Cc: Horatiu Vultur <horatiu.vultur@microchip.com>
Cc: Siddharth Vadapalli <s-vadapalli@ti.com>
Cc: Roger Quadros <rogerq@kernel.org>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-04 15:53:04 +02:00
|
|
|
int (*query_caps)(struct stmmac_priv *priv,
|
|
|
|
struct tc_query_caps_base *base);
|
2018-05-04 10:01:38 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
#define stmmac_tc_init(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, tc, init, __args)
|
|
|
|
#define stmmac_tc_setup_cls_u32(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
|
2018-06-27 15:57:02 +01:00
|
|
|
#define stmmac_tc_setup_cbs(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, tc, setup_cbs, __args)
|
2019-09-04 15:16:56 +02:00
|
|
|
#define stmmac_tc_setup_cls(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, tc, setup_cls, __args)
|
2019-12-18 11:33:07 +01:00
|
|
|
#define stmmac_tc_setup_taprio(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, tc, setup_taprio, __args)
|
2020-01-13 17:24:10 +01:00
|
|
|
#define stmmac_tc_setup_etf(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, tc, setup_etf, __args)
|
net/sched: taprio: only pass gate mask per TXQ for igc, stmmac, tsnep, am65_cpsw
There are 2 classes of in-tree drivers currently:
- those who act upon struct tc_taprio_sched_entry :: gate_mask as if it
holds a bit mask of TXQs
- those who act upon the gate_mask as if it holds a bit mask of TCs
When it comes to the standard, IEEE 802.1Q-2018 does say this in the
second paragraph of section 8.6.8.4 Enhancements for scheduled traffic:
| A gate control list associated with each Port contains an ordered list
| of gate operations. Each gate operation changes the transmission gate
| state for the gate associated with each of the Port's traffic class
| queues and allows associated control operations to be scheduled.
In typically obtuse language, it refers to a "traffic class queue"
rather than a "traffic class" or a "queue". But careful reading of
802.1Q clarifies that "traffic class" and "queue" are in fact
synonymous (see 8.6.6 Queuing frames):
| A queue in this context is not necessarily a single FIFO data structure.
| A queue is a record of all frames of a given traffic class awaiting
| transmission on a given Bridge Port. The structure of this record is not
| specified.
i.o.w. their definition of "queue" isn't the Linux TX queue.
The gate_mask really is input into taprio via its UAPI as a mask of
traffic classes, but taprio_sched_to_offload() converts it into a TXQ
mask.
The breakdown of drivers which handle TC_SETUP_QDISC_TAPRIO is:
- hellcreek, felix, sja1105: these are DSA switches, it's not even very
clear what TXQs correspond to, other than purely software constructs.
Only the mqprio configuration with 8 TCs and 1 TXQ per TC makes sense.
So it's fine to convert these to a gate mask per TC.
- enetc: I have the hardware and can confirm that the gate mask is per
TC, and affects all TXQs (BD rings) configured for that priority.
- igc: in igc_save_qbv_schedule(), the gate_mask is clearly interpreted
to be per-TXQ.
- tsnep: Gerhard Engleder clarifies that even though this hardware
supports at most 1 TXQ per TC, the TXQ indices may be different from
the TC values themselves, and it is the TXQ indices that matter to
this hardware. So keep it per-TXQ as well.
- stmmac: I have a GMAC datasheet, and in the EST section it does
specify that the gate events are per TXQ rather than per TC.
- lan966x: again, this is a switch, and while not a DSA one, the way in
which it implements lan966x_mqprio_add() - by only allowing num_tc ==
NUM_PRIO_QUEUES (8) - makes it clear to me that TXQs are a purely
software construct here as well. They seem to map 1:1 with TCs.
- am65_cpsw: from looking at am65_cpsw_est_set_sched_cmds(), I get the
impression that the fetch_allow variable is treated like a prio_mask.
This definitely sounds closer to a per-TC gate mask rather than a
per-TXQ one, and TI documentation does seem to recomment an identity
mapping between TCs and TXQs. However, Roger Quadros would like to do
some testing before making changes, so I'm leaving this driver to
operate as it did before, for now. Link with more details at the end.
Based on this breakdown, we have 5 drivers with a gate mask per TC and
4 with a gate mask per TXQ. So let's make the gate mask per TXQ the
opt-in and the gate mask per TC the default.
Benefit from the TC_QUERY_CAPS feature that Jakub suggested we add, and
query the device driver before calling the proper ndo_setup_tc(), and
figure out if it expects one or the other format.
Link: https://patchwork.kernel.org/project/netdevbpf/patch/20230202003621.2679603-15-vladimir.oltean@nxp.com/#25193204
Cc: Horatiu Vultur <horatiu.vultur@microchip.com>
Cc: Siddharth Vadapalli <s-vadapalli@ti.com>
Cc: Roger Quadros <rogerq@kernel.org>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-04 15:53:04 +02:00
|
|
|
#define stmmac_tc_query_caps(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, tc, query_caps, __args)
|
2018-04-23 09:05:15 +01:00
|
|
|
|
2019-05-24 10:20:15 +02:00
|
|
|
struct stmmac_counters;
|
|
|
|
|
|
|
|
struct stmmac_mmc_ops {
|
|
|
|
void (*ctrl)(void __iomem *ioaddr, unsigned int mode);
|
|
|
|
void (*intr_all_mask)(void __iomem *ioaddr);
|
|
|
|
void (*read)(void __iomem *ioaddr, struct stmmac_counters *mmc);
|
|
|
|
};
|
|
|
|
|
|
|
|
#define stmmac_mmc_ctrl(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mmc, ctrl, __args)
|
|
|
|
#define stmmac_mmc_intr_all_mask(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mmc, intr_all_mask, __args)
|
|
|
|
#define stmmac_mmc_read(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, mmc, read, __args)
|
|
|
|
|
2023-12-01 13:52:51 +08:00
|
|
|
struct stmmac_est_ops {
|
|
|
|
int (*configure)(struct stmmac_priv *priv, struct stmmac_est *cfg,
|
|
|
|
unsigned int ptp_rate);
|
|
|
|
void (*irq_status)(struct stmmac_priv *priv, struct net_device *dev,
|
|
|
|
struct stmmac_extra_stats *x, u32 txqcnt);
|
|
|
|
};
|
|
|
|
|
|
|
|
#define stmmac_est_configure(__priv, __args...) \
|
|
|
|
stmmac_do_callback(__priv, est, configure, __args)
|
|
|
|
#define stmmac_est_irq_status(__priv, __args...) \
|
|
|
|
stmmac_do_void_callback(__priv, est, irq_status, __args)
|
|
|
|
|
2018-05-18 14:56:04 +01:00
|
|
|
struct stmmac_regs_off {
|
|
|
|
u32 ptp_off;
|
|
|
|
u32 mmc_off;
|
2023-12-01 13:52:51 +08:00
|
|
|
u32 est_off;
|
2018-05-18 14:56:04 +01:00
|
|
|
};
|
|
|
|
|
2018-04-23 09:05:15 +01:00
|
|
|
extern const struct stmmac_ops dwmac100_ops;
|
|
|
|
extern const struct stmmac_dma_ops dwmac100_dma_ops;
|
|
|
|
extern const struct stmmac_ops dwmac1000_ops;
|
|
|
|
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
|
|
|
|
extern const struct stmmac_ops dwmac4_ops;
|
|
|
|
extern const struct stmmac_dma_ops dwmac4_dma_ops;
|
|
|
|
extern const struct stmmac_ops dwmac410_ops;
|
|
|
|
extern const struct stmmac_dma_ops dwmac410_dma_ops;
|
|
|
|
extern const struct stmmac_ops dwmac510_ops;
|
2018-05-04 10:01:38 +01:00
|
|
|
extern const struct stmmac_tc_ops dwmac510_tc_ops;
|
2018-08-08 09:04:30 +01:00
|
|
|
extern const struct stmmac_ops dwxgmac210_ops;
|
2020-03-17 10:18:52 +01:00
|
|
|
extern const struct stmmac_ops dwxlgmac2_ops;
|
2018-08-08 09:04:31 +01:00
|
|
|
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
|
2018-08-08 09:04:32 +01:00
|
|
|
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
|
2019-05-24 10:20:15 +02:00
|
|
|
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
|
2019-08-07 10:03:09 +02:00
|
|
|
extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
|
2023-12-01 13:52:51 +08:00
|
|
|
extern const struct stmmac_est_ops dwmac510_est_ops;
|
2018-04-23 09:05:15 +01:00
|
|
|
|
|
|
|
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
|
|
|
|
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
|
|
|
|
|
2023-04-03 17:23:01 -05:00
|
|
|
int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr);
|
2018-04-23 09:05:15 +01:00
|
|
|
int stmmac_hwif_init(struct stmmac_priv *priv);
|
|
|
|
|
2018-04-16 16:08:12 +01:00
|
|
|
#endif /* __STMMAC_HWIF_H__ */
|