linux/drivers/net/ethernet/stmicro/stmmac/common.h

641 lines
19 KiB
C
Raw Permalink Normal View History

treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 291 Based on 2 normalized pattern(s): this program is free software you can redistribute it and or modify it under the terms and conditions of the gnu general public license version 2 as published by the free software foundation this program is distributed in the hope it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details the full gnu general public license is included in this distribution in the file called copying this program is free software you can redistribute it and or modify it under the terms and conditions of the gnu general public license version 2 as published by the free software foundation this program is distributed in the hope [that] it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details the full gnu general public license is included in this distribution in the file called copying extracted by the scancode license scanner the SPDX license identifier GPL-2.0-only has been chosen to replace the boilerplate/reference in 57 file(s). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Alexios Zavras <alexios.zavras@intel.com> Reviewed-by: Allison Randal <allison@lohutok.net> Cc: linux-spdx@vger.kernel.org Link: https://lkml.kernel.org/r/20190529141901.515993066@linutronix.de Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-05-29 07:18:05 -07:00
/* SPDX-License-Identifier: GPL-2.0-only */
/*******************************************************************************
STMMAC Common Header File
Copyright (C) 2007-2009 STMicroelectronics Ltd
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __COMMON_H__
#define __COMMON_H__
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/pcs/pcs-xpcs.h>
#include <linux/module.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
#endif
#include "descs.h"
#include "hwif.h"
#include "mmc.h"
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
net: stmmac: dwmac-loongson: Add Loongson Multi-channels GMAC support The Loongson DWMAC driver currently supports the Loongson GMAC devices (based on the DW GMAC v3.50a/v3.73a IP-core) installed to the LS2K1000 SoC and LS7A1000 chipset. But recently a new generation LS2K2000 SoC was released with the new version of the Loongson GMAC synthesized in. The new controller is based on the DW GMAC v3.73a IP-core with the AV-feature enabled, which implies the multi DMA-channels support. The multi DMA-channels feature has the next vendor-specific peculiarities: 1. Split up Tx and Rx DMA IRQ status/mask bits: Name Tx Rx DMA_INTR_ENA_NIE = 0x00040000 | 0x00020000; DMA_INTR_ENA_AIE = 0x00010000 | 0x00008000; DMA_STATUS_NIS = 0x00040000 | 0x00020000; DMA_STATUS_AIS = 0x00010000 | 0x00008000; DMA_STATUS_FBI = 0x00002000 | 0x00001000; 2. Custom Synopsys ID hardwired into the GMAC_VERSION.SNPSVER register field. It's 0x10 while it should have been 0x37 in accordance with the actual DW GMAC IP-core version. 3. There are eight DMA-channels available meanwhile the Synopsys DW GMAC IP-core supports up to three DMA-channels. 4. It's possible to have each DMA-channel IRQ independently delivered. The MSI IRQs must be utilized for that. Thus in order to have the multi-channels Loongson GMAC controllers supported let's modify the Loongson DWMAC driver in accordance with all the peculiarities described above: 1. Create the multi-channels Loongson GMAC-specific stmmac_dma_ops::dma_interrupt() stmmac_dma_ops::init_chan() callbacks due to the non-standard DMA IRQ CSR flags layout. 2. Create the Loongson DWMAC-specific platform setup() method which gets to initialize the DMA-ops with the dwmac1000_dma_ops instance and overrides the callbacks described in 1. The method also overrides the custom Synopsys ID with the real one in order to have the rest of the HW-specific callbacks correctly detected by the driver core. 3. Make sure the platform setup() method enables the flow control and duplex modes supported by the controller. Signed-off-by: Feiyang Chen <chenfeiyang@loongson.cn> Signed-off-by: Yinggang Gu <guyinggang@loongson.cn> Acked-by: Huacai Chen <chenhuacai@loongson.cn> Signed-off-by: Yanteng Si <siyanteng@loongson.cn> Reviewed-by: Serge Semin <fancer.lancer@gmail.com> Tested-by: Serge Semin <fancer.lancer@gmail.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-08-07 21:48:54 +08:00
#define DWMAC_CORE_3_70 0x37
#define DWMAC_CORE_4_00 0x40
#define DWMAC_CORE_4_10 0x41
#define DWMAC_CORE_5_00 0x50
#define DWMAC_CORE_5_10 0x51
#define DWMAC_CORE_5_20 0x52
#define DWXGMAC_CORE_2_10 0x21
#define DWXGMAC_CORE_2_20 0x22
#define DWXLGMAC_CORE_2_00 0x20
/* Device ID */
#define DWXGMAC_ID 0x76
#define DWXLGMAC_ID 0x27
2016-04-01 11:37:30 +02:00
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
/* TX and RX Descriptor Length, these need to be power of two.
* TX descriptor length less than 64 may cause transmit queue timed out error.
* RX descriptor length less than 64 may cause inconsistent Rx chain error.
*/
#define DMA_MIN_TX_SIZE 64
#define DMA_MAX_TX_SIZE 1024
#define DMA_DEFAULT_TX_SIZE 512
#define DMA_MIN_RX_SIZE 64
#define DMA_MAX_RX_SIZE 1024
#define DMA_DEFAULT_RX_SIZE 512
#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
struct stmmac_q_tx_stats {
u64_stats_t tx_bytes;
u64_stats_t tx_set_ic_bit;
u64_stats_t tx_tso_frames;
u64_stats_t tx_tso_nfrags;
};
struct stmmac_napi_tx_stats {
u64_stats_t tx_packets;
u64_stats_t tx_pkt_n;
u64_stats_t poll;
u64_stats_t tx_clean;
u64_stats_t tx_set_ic_bit;
};
struct stmmac_txq_stats {
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
/* Updates protected by tx queue lock. */
struct u64_stats_sync q_syncp;
struct stmmac_q_tx_stats q;
/* Updates protected by NAPI poll logic. */
struct u64_stats_sync napi_syncp;
struct stmmac_napi_tx_stats napi;
net: stmmac: fix incorrect rxq|txq_stats reference commit 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") caused one regression as found by Uwe, the backtrace looks like: INFO: trying to register non-static key. The code is fine but needs lockdep annotation, or maybe you didn't initialize this object before use? turning off the locking correctness validator. CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.5.0-rc1-00449-g133466c3bbe1-dirty #21 Hardware name: STM32 (Device Tree Support) unwind_backtrace from show_stack+0x18/0x1c show_stack from dump_stack_lvl+0x60/0x90 dump_stack_lvl from register_lock_class+0x98c/0x99c register_lock_class from __lock_acquire+0x74/0x293c __lock_acquire from lock_acquire+0x134/0x398 lock_acquire from stmmac_get_stats64+0x2ac/0x2fc stmmac_get_stats64 from dev_get_stats+0x44/0x130 dev_get_stats from rtnl_fill_stats+0x38/0x120 rtnl_fill_stats from rtnl_fill_ifinfo+0x834/0x17f4 rtnl_fill_ifinfo from rtmsg_ifinfo_build_skb+0xc0/0x144 rtmsg_ifinfo_build_skb from rtmsg_ifinfo+0x50/0x88 rtmsg_ifinfo from __dev_notify_flags+0xc0/0xec __dev_notify_flags from dev_change_flags+0x50/0x5c dev_change_flags from ip_auto_config+0x2f4/0x1260 ip_auto_config from do_one_initcall+0x70/0x35c do_one_initcall from kernel_init_freeable+0x2ac/0x308 kernel_init_freeable from kernel_init+0x1c/0x138 kernel_init from ret_from_fork+0x14/0x2c The reason is the rxq|txq_stats structures are not what expected because stmmac_open() -> __stmmac_open() the structure is overwritten by "memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));" This causes the well initialized syncp member of rxq|txq_stats is overwritten unexpectedly as pointed out by Johannes and Uwe. Fix this issue by moving rxq|txq_stats back to stmmac_extra_stats. For SMP cache friendly, we also mark stmmac_txq_stats and stmmac_rxq_stats as ____cacheline_aligned_in_smp. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Signed-off-by: Jisheng Zhang <jszhang@kernel.org> Reported-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Link: https://lore.kernel.org/r/20230917165328.3403-1-jszhang@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-09-18 00:53:28 +08:00
} ____cacheline_aligned_in_smp;
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
struct stmmac_napi_rx_stats {
u64_stats_t rx_bytes;
u64_stats_t rx_packets;
u64_stats_t rx_pkt_n;
u64_stats_t poll;
};
struct stmmac_rxq_stats {
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
/* Updates protected by NAPI poll logic. */
struct u64_stats_sync napi_syncp;
struct stmmac_napi_rx_stats napi;
net: stmmac: fix incorrect rxq|txq_stats reference commit 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") caused one regression as found by Uwe, the backtrace looks like: INFO: trying to register non-static key. The code is fine but needs lockdep annotation, or maybe you didn't initialize this object before use? turning off the locking correctness validator. CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.5.0-rc1-00449-g133466c3bbe1-dirty #21 Hardware name: STM32 (Device Tree Support) unwind_backtrace from show_stack+0x18/0x1c show_stack from dump_stack_lvl+0x60/0x90 dump_stack_lvl from register_lock_class+0x98c/0x99c register_lock_class from __lock_acquire+0x74/0x293c __lock_acquire from lock_acquire+0x134/0x398 lock_acquire from stmmac_get_stats64+0x2ac/0x2fc stmmac_get_stats64 from dev_get_stats+0x44/0x130 dev_get_stats from rtnl_fill_stats+0x38/0x120 rtnl_fill_stats from rtnl_fill_ifinfo+0x834/0x17f4 rtnl_fill_ifinfo from rtmsg_ifinfo_build_skb+0xc0/0x144 rtmsg_ifinfo_build_skb from rtmsg_ifinfo+0x50/0x88 rtmsg_ifinfo from __dev_notify_flags+0xc0/0xec __dev_notify_flags from dev_change_flags+0x50/0x5c dev_change_flags from ip_auto_config+0x2f4/0x1260 ip_auto_config from do_one_initcall+0x70/0x35c do_one_initcall from kernel_init_freeable+0x2ac/0x308 kernel_init_freeable from kernel_init+0x1c/0x138 kernel_init from ret_from_fork+0x14/0x2c The reason is the rxq|txq_stats structures are not what expected because stmmac_open() -> __stmmac_open() the structure is overwritten by "memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));" This causes the well initialized syncp member of rxq|txq_stats is overwritten unexpectedly as pointed out by Johannes and Uwe. Fix this issue by moving rxq|txq_stats back to stmmac_extra_stats. For SMP cache friendly, we also mark stmmac_txq_stats and stmmac_rxq_stats as ____cacheline_aligned_in_smp. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Signed-off-by: Jisheng Zhang <jszhang@kernel.org> Reported-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Link: https://lore.kernel.org/r/20230917165328.3403-1-jszhang@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-09-18 00:53:28 +08:00
} ____cacheline_aligned_in_smp;
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
/* Updates on each CPU protected by not allowing nested irqs. */
struct stmmac_pcpu_stats {
struct u64_stats_sync syncp;
u64_stats_t rx_normal_irq_n[MTL_MAX_RX_QUEUES];
u64_stats_t tx_normal_irq_n[MTL_MAX_TX_QUEUES];
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
};
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
/* Transmit errors */
unsigned long tx_underflow ____cacheline_aligned;
unsigned long tx_carrier;
unsigned long tx_losscarrier;
unsigned long vlan_tag;
unsigned long tx_deferred;
unsigned long tx_vlan;
unsigned long tx_jabber;
unsigned long tx_frame_flushed;
unsigned long tx_payload_error;
unsigned long tx_ip_header_error;
unsigned long tx_collision;
/* Receive errors */
unsigned long rx_desc;
unsigned long sa_filter_fail;
unsigned long overflow_error;
unsigned long ipc_csum_error;
unsigned long rx_collision;
unsigned long rx_crc_errors;
unsigned long dribbling_bit;
unsigned long rx_length;
unsigned long rx_mii;
unsigned long rx_multicast;
unsigned long rx_gmac_overflow;
unsigned long rx_watchdog;
unsigned long da_rx_filter_fail;
unsigned long sa_rx_filter_fail;
unsigned long rx_missed_cntr;
unsigned long rx_overflow_cntr;
unsigned long rx_vlan;
unsigned long rx_split_hdr_pkt_n;
/* Tx/Rx IRQ error info */
unsigned long tx_undeflow_irq;
unsigned long tx_process_stopped_irq;
unsigned long tx_jabber_irq;
unsigned long rx_overflow_irq;
unsigned long rx_buf_unav_irq;
unsigned long rx_process_stopped_irq;
unsigned long rx_watchdog_irq;
unsigned long tx_early_irq;
unsigned long fatal_bus_error_irq;
/* Tx/Rx IRQ Events */
unsigned long rx_early_irq;
unsigned long threshold;
unsigned long irq_receive_pmt_irq_n;
/* MMC info */
unsigned long mmc_tx_irq_n;
unsigned long mmc_rx_irq_n;
unsigned long mmc_rx_csum_offload_irq_n;
/* EEE */
unsigned long irq_tx_path_in_lpi_mode_n;
unsigned long irq_tx_path_exit_lpi_mode_n;
unsigned long irq_rx_path_in_lpi_mode_n;
unsigned long irq_rx_path_exit_lpi_mode_n;
unsigned long phy_eee_wakeup_error_n;
/* Extended RDES status */
unsigned long ip_hdr_err;
unsigned long ip_payload_err;
unsigned long ip_csum_bypassed;
unsigned long ipv4_pkt_rcvd;
unsigned long ipv6_pkt_rcvd;
unsigned long no_ptp_rx_msg_type_ext;
unsigned long ptp_rx_msg_type_sync;
unsigned long ptp_rx_msg_type_follow_up;
unsigned long ptp_rx_msg_type_delay_req;
unsigned long ptp_rx_msg_type_delay_resp;
unsigned long ptp_rx_msg_type_pdelay_req;
unsigned long ptp_rx_msg_type_pdelay_resp;
unsigned long ptp_rx_msg_type_pdelay_follow_up;
unsigned long ptp_rx_msg_type_announce;
unsigned long ptp_rx_msg_type_management;
unsigned long ptp_rx_msg_pkt_reserved_type;
unsigned long ptp_frame_type;
unsigned long ptp_ver;
unsigned long timestamp_dropped;
unsigned long av_pkt_rcvd;
unsigned long av_tagged_pkt_rcvd;
unsigned long vlan_tag_priority_val;
unsigned long l3_filter_match;
unsigned long l4_filter_match;
unsigned long l3_l4_filter_no_match;
/* PCS */
unsigned long irq_pcs_ane_n;
unsigned long irq_pcs_link_n;
unsigned long irq_rgmii_n;
unsigned long pcs_link;
unsigned long pcs_duplex;
unsigned long pcs_speed;
/* debug register */
unsigned long mtl_tx_status_fifo_full;
unsigned long mtl_tx_fifo_not_empty;
unsigned long mmtl_fifo_ctrl;
unsigned long mtl_tx_fifo_read_ctrl_write;
unsigned long mtl_tx_fifo_read_ctrl_wait;
unsigned long mtl_tx_fifo_read_ctrl_read;
unsigned long mtl_tx_fifo_read_ctrl_idle;
unsigned long mac_tx_in_pause;
unsigned long mac_tx_frame_ctrl_xfer;
unsigned long mac_tx_frame_ctrl_idle;
unsigned long mac_tx_frame_ctrl_wait;
unsigned long mac_tx_frame_ctrl_pause;
unsigned long mac_gmii_tx_proto_engine;
unsigned long mtl_rx_fifo_fill_level_full;
unsigned long mtl_rx_fifo_fill_above_thresh;
unsigned long mtl_rx_fifo_fill_below_thresh;
unsigned long mtl_rx_fifo_fill_level_empty;
unsigned long mtl_rx_fifo_read_ctrl_flush;
unsigned long mtl_rx_fifo_read_ctrl_read_data;
unsigned long mtl_rx_fifo_read_ctrl_status;
unsigned long mtl_rx_fifo_read_ctrl_idle;
unsigned long mtl_rx_fifo_ctrl_active;
unsigned long mac_rx_frame_ctrl_fifo;
unsigned long mac_gmii_rx_proto_engine;
/* EST */
unsigned long mtl_est_cgce;
unsigned long mtl_est_hlbs;
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
net: stmmac: fix incorrect rxq|txq_stats reference commit 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") caused one regression as found by Uwe, the backtrace looks like: INFO: trying to register non-static key. The code is fine but needs lockdep annotation, or maybe you didn't initialize this object before use? turning off the locking correctness validator. CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.5.0-rc1-00449-g133466c3bbe1-dirty #21 Hardware name: STM32 (Device Tree Support) unwind_backtrace from show_stack+0x18/0x1c show_stack from dump_stack_lvl+0x60/0x90 dump_stack_lvl from register_lock_class+0x98c/0x99c register_lock_class from __lock_acquire+0x74/0x293c __lock_acquire from lock_acquire+0x134/0x398 lock_acquire from stmmac_get_stats64+0x2ac/0x2fc stmmac_get_stats64 from dev_get_stats+0x44/0x130 dev_get_stats from rtnl_fill_stats+0x38/0x120 rtnl_fill_stats from rtnl_fill_ifinfo+0x834/0x17f4 rtnl_fill_ifinfo from rtmsg_ifinfo_build_skb+0xc0/0x144 rtmsg_ifinfo_build_skb from rtmsg_ifinfo+0x50/0x88 rtmsg_ifinfo from __dev_notify_flags+0xc0/0xec __dev_notify_flags from dev_change_flags+0x50/0x5c dev_change_flags from ip_auto_config+0x2f4/0x1260 ip_auto_config from do_one_initcall+0x70/0x35c do_one_initcall from kernel_init_freeable+0x2ac/0x308 kernel_init_freeable from kernel_init+0x1c/0x138 kernel_init from ret_from_fork+0x14/0x2c The reason is the rxq|txq_stats structures are not what expected because stmmac_open() -> __stmmac_open() the structure is overwritten by "memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));" This causes the well initialized syncp member of rxq|txq_stats is overwritten unexpectedly as pointed out by Johannes and Uwe. Fix this issue by moving rxq|txq_stats back to stmmac_extra_stats. For SMP cache friendly, we also mark stmmac_txq_stats and stmmac_rxq_stats as ____cacheline_aligned_in_smp. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Signed-off-by: Jisheng Zhang <jszhang@kernel.org> Reported-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Link: https://lore.kernel.org/r/20230917165328.3403-1-jszhang@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-09-18 00:53:28 +08:00
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
struct stmmac_pcpu_stats __percpu *pcpu_stats;
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_dropped;
unsigned long tx_errors;
};
/* Safety Feature statistics exposed by ethtool */
struct stmmac_safety_stats {
unsigned long mac_errors[32];
unsigned long mtl_errors[32];
unsigned long dma_errors[32];
unsigned long dma_dpp_errors[32];
};
/* Number of fields in Safety Stats */
#define STMMAC_SAFETY_FEAT_SIZE \
(sizeof(struct stmmac_safety_stats) / sizeof(unsigned long))
/* CSR Frequency Access Defines*/
#define CSR_F_35M 35000000
#define CSR_F_60M 60000000
#define CSR_F_100M 100000000
#define CSR_F_150M 150000000
#define CSR_F_250M 250000000
#define CSR_F_300M 300000000
#define CSR_F_500M 500000000
#define CSR_F_800M 800000000
#define MAC_CSR_H_FRQ_MASK 0x20
#define HASH_TABLE_SIZE 64
#define PAUSE_TIME 0xffff
/* Flow Control defines */
#define FLOW_OFF 0
#define FLOW_RX 1
#define FLOW_TX 2
#define FLOW_AUTO (FLOW_TX | FLOW_RX)
/* PCS defines */
#define STMMAC_PCS_RGMII (1 << 0)
#define STMMAC_PCS_SGMII (1 << 1)
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
/* DMA HW feature register fields */
#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */
#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */
#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */
#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */
#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */
#define DMA_HW_FEAT_ADDMAC 0x00000020 /* Multiple MAC Addr Reg */
#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */
#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */
#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */
#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */
#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */
#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */
#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 */
#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 PTPv2 */
#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */
#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */
#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */
#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP COE (Type 1) in Rx */
#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP COE (Type 2) in Rx */
#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */
#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. additional Rx Channels */
#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. additional Tx Channels */
#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate Descriptor */
/* Timestamping with Internal System Time */
#define DMA_HW_FEAT_INTTSEN 0x02000000
#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN */
#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
#define DEFAULT_DMA_PBL 8
/* MSI defines */
#define STMMAC_MSI_VEC_MAX 32
/* PCS status and mask defines */
#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */
#define PCS_LINK_IRQ BIT(1) /* PCS Link */
#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */
/* Max/Min RI Watchdog Timer count value */
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x10
#define DEF_DMA_RIWT 0xa0
/* Tx coalesce parameters */
#define STMMAC_COAL_TX_TIMER 5000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
#define STMMAC_TX_FRAMES 25
#define STMMAC_RX_FRAMES 0
/* Packets types */
enum packets_types {
PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
PACKET_PTPQ = 0x2, /* PTP Packets */
PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
PACKET_UPQ = 0x4, /* Untagged Packets */
PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
};
/* Rx IPC status */
enum rx_frame_status {
good_frame = 0x0,
discard_frame = 0x1,
csum_none = 0x2,
llc_snap = 0x4,
dma_own = 0x8,
stmmac: add descriptors function for GMAC 4.xx One of main changes of GMAC 4.xx IP is descriptors management. -descriptors are only used in ring mode. -A descriptor is composed of 4 32bits registers (no more extended descriptors) -descriptor mechanism (Tx for example, but it is exactly the same for RX): -useful registers: -DMA_CH#_TxDesc_Ring_Len: length of transmit descriptor ring -DMA_CH#_TxDesc_List_Address: start address of the ring -DMA_CH#_TxDesc_Tail_Pointer: address of the last descriptor to send + 1. -DMA_CH#_TxDesc_Current_App_TxDesc: address of the current descriptor -The descriptor Tail Pointer register contains the pointer to the descriptor address (N). The base address and the current descriptor decide the address of the current descriptor that the DMA can process. The descriptors up to one location less than the one indicated by the descriptor tail pointer (N-1) are owned by the DMA. The DMA continues to process the descriptors until the following condition occurs: "current descriptor pointer == Descriptor Tail pointer" Then the DMA goes into suspend mode. The application must perform a write to descriptor tail pointer register and update the tail pointer to have the following condition and to start a new transfer: "current descriptor pointer < Descriptor tail pointer" The DMA automatically wraps around the base address when the end of ring is reached. -New features are available on IP: -TSO (TCP Segmentation Offload) for TX only -Split header: to have header and payload in 2 different buffers Signed-off-by: Alexandre TORGUE <alexandre.torgue@st.com> Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-01 11:37:28 +02:00
rx_not_ls = 0x10,
};
/* Tx status */
enum tx_frame_status {
tx_done = 0x0,
tx_not_ls = 0x1,
tx_err = 0x2,
tx_dma_own = 0x4,
tx_err_bump_tc = 0x8,
};
enum dma_irq_status {
tx_hard_error = 0x1,
tx_hard_error_bump_tc = 0x2,
handle_rx = 0x4,
handle_tx = 0x8,
};
enum dma_irq_dir {
DMA_DIR_RX = 0x1,
DMA_DIR_TX = 0x2,
DMA_DIR_RXTX = 0x3,
};
enum request_irq_err {
REQ_IRQ_ERR_ALL,
REQ_IRQ_ERR_TX,
REQ_IRQ_ERR_RX,
REQ_IRQ_ERR_SFTY,
REQ_IRQ_ERR_SFTY_UE,
REQ_IRQ_ERR_SFTY_CE,
REQ_IRQ_ERR_LPI,
REQ_IRQ_ERR_WOL,
REQ_IRQ_ERR_MAC,
REQ_IRQ_ERR_NO,
};
/* EEE and LPI defines */
#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
net: stmmac: support FPE link partner hand-shaking procedure In order to discover whether remote station supports frame preemption, local station sends verify mPacket and expects response mPacket in return from the remote station. So, we add the functions to send and handle event when verify mPacket and response mPacket are exchanged between the networked stations. The mechanism to handle different FPE states between local and remote station (link partner) is implemented using workqueue which starts a task each time there is some sign of verify & response mPacket exchange as check in FPE IRQ event. The task retries couple of times to try to spot the states that both stations are ready to enter FPE ON. This allows different end points to enable FPE at different time and verify-response mPacket can happen asynchronously. Ultimately, the task will only turn FPE ON when local station have both exchange response in both directions. Thanks to Voon Weifeng for implementing the core functions for detecting FPE events and send mPacket and phylink related change. Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com> Co-developed-by: Voon Weifeng <weifeng.voon@intel.com> Signed-off-by: Voon Weifeng <weifeng.voon@intel.com> Co-developed-by: Tan Tee Min <tee.min.tan@intel.com> Signed-off-by: Tan Tee Min <tee.min.tan@intel.com> Co-developed-by: Mohammad Athari Bin Ismail <mohammad.athari.ismail@intel.com> Signed-off-by: Mohammad Athari Bin Ismail <mohammad.athari.ismail@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-24 17:07:42 +08:00
/* FPE defines */
#define FPE_EVENT_UNKNOWN 0
#define FPE_EVENT_TRSP BIT(0)
#define FPE_EVENT_TVER BIT(1)
#define FPE_EVENT_RRSP BIT(2)
#define FPE_EVENT_RVER BIT(3)
2016-04-01 11:37:30 +02:00
#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
unsigned int mbps_1000;
unsigned int half_duplex;
unsigned int hash_filter;
unsigned int multi_addr;
unsigned int pcs;
unsigned int sma_mdio;
unsigned int pmt_remote_wake_up;
unsigned int pmt_magic_frame;
unsigned int rmon;
/* IEEE 1588-2002 */
unsigned int time_stamp;
/* IEEE 1588-2008 */
unsigned int atime_stamp;
/* 802.3az - Energy-Efficient Ethernet (EEE) */
unsigned int eee;
unsigned int av;
unsigned int hash_tb_sz;
2016-04-01 11:37:30 +02:00
unsigned int tsoen;
/* TX and RX csum */
unsigned int tx_coe;
2016-04-01 11:37:30 +02:00
unsigned int rx_coe;
unsigned int rx_coe_type1;
unsigned int rx_coe_type2;
unsigned int rxfifo_over_2048;
/* TX and RX number of channels */
unsigned int number_rx_channel;
unsigned int number_tx_channel;
/* TX and RX number of queues */
unsigned int number_rx_queues;
unsigned int number_tx_queues;
/* PPS output */
unsigned int pps_out_num;
/* Number of Traffic Classes */
unsigned int numtc;
/* DCB Feature Enable */
unsigned int dcben;
/* IEEE 1588 High Word Register Enable */
unsigned int advthword;
/* PTP Offload Enable */
unsigned int ptoen;
/* One-Step Timestamping Enable */
unsigned int osten;
/* Priority-Based Flow Control Enable */
unsigned int pfcen;
/* Alternate (enhanced) DESC mode */
unsigned int enh_desc;
/* TX and RX FIFO sizes */
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
/* Automotive Safety Package */
unsigned int asp;
/* RX Parser */
unsigned int frpsel;
unsigned int frpbs;
unsigned int frpes;
unsigned int addr64;
unsigned int host_dma_width;
unsigned int rssen;
unsigned int vlhash;
net: stmmac: Add Split Header support and enable it in XGMAC cores Add the support for Split Header feature in the RX path and enable it in XGMAC cores. This does not impact neither beneficts bandwidth but it does reduces CPU usage because without the feature all the entire packet is memcpy'ed, while that with the feature only the header is. With Split Header disabled 'perf stat -d' gives: 86870.624945 task-clock (msec) # 0.429 CPUs utilized 1073352 context-switches # 0.012 M/sec 1 cpu-migrations # 0.000 K/sec 213 page-faults # 0.002 K/sec 327113872376 cycles # 3.766 GHz (62.53%) 56618161216 instructions # 0.17 insn per cycle (75.06%) 10742205071 branches # 123.658 M/sec (75.36%) 584309242 branch-misses # 5.44% of all branches (75.19%) 17594787965 L1-dcache-loads # 202.540 M/sec (74.88%) 4003773131 L1-dcache-load-misses # 22.76% of all L1-dcache hits (74.89%) 1313301468 LLC-loads # 15.118 M/sec (49.75%) 355906510 LLC-load-misses # 27.10% of all LL-cache hits (49.92%) With Split Header enabled 'perf stat -d' gives: 49324.456539 task-clock (msec) # 0.245 CPUs utilized 2542387 context-switches # 0.052 M/sec 1 cpu-migrations # 0.000 K/sec 213 page-faults # 0.004 K/sec 177092791469 cycles # 3.590 GHz (62.30%) 68555756017 instructions # 0.39 insn per cycle (75.16%) 12697019382 branches # 257.418 M/sec (74.81%) 442081897 branch-misses # 3.48% of all branches (74.79%) 20337958358 L1-dcache-loads # 412.330 M/sec (75.46%) 3820210140 L1-dcache-load-misses # 18.78% of all L1-dcache hits (75.35%) 1257719198 LLC-loads # 25.499 M/sec (49.73%) 685543923 LLC-load-misses # 54.51% of all LL-cache hits (49.86%) Changes from v2: - Reword commit message (Jakub) Changes from v1: - Add performance info (David) - Add misssing dma_sync_single_for_device() Signed-off-by: Jose Abreu <joabreu@synopsys.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-08-17 20:54:43 +02:00
unsigned int sphen;
unsigned int vlins;
unsigned int dvlan;
unsigned int l3l4fnum;
unsigned int arpoffsel;
/* One Step for PTP over UDP/IP Feature Enable */
unsigned int pou_ost_en;
/* Tx Timestamp FIFO Depth */
unsigned int ttsfd;
/* Queue/Channel-Based VLAN tag insertion on Tx */
unsigned int cbtisel;
/* Supported Parallel Instruction Processor Engines */
unsigned int frppipe_num;
/* Number of Extended VLAN Tag Filters */
unsigned int nrvf_num;
/* TSN Features */
unsigned int estwid;
unsigned int estdep;
unsigned int estsel;
unsigned int fpesel;
unsigned int tbssel;
/* Number of DMA channels enabled for TBS */
unsigned int tbs_ch_num;
/* Per-Stream Filtering Enable */
unsigned int sgfsel;
/* Numbers of Auxiliary Snapshot Inputs */
unsigned int aux_snapshot_n;
/* Timestamp System Time Source */
unsigned int tssrc;
/* Enhanced DMA Enable */
unsigned int edma;
/* Different Descriptor Cache Enable */
unsigned int ediffc;
/* VxLAN/NVGRE Enable */
unsigned int vxn;
/* Debug Memory Interface Enable */
unsigned int dbgmem;
/* Number of Policing Counters */
unsigned int pcsel;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
#define BUF_SIZE_16KiB 16368
#define BUF_SIZE_8KiB 8188
#define BUF_SIZE_4KiB 4096
#define BUF_SIZE_2KiB 2048
/* Power Down and WOL */
#define PMT_NOT_SUPPORTED 0
#define PMT_SUPPORTED 1
/* Common MAC defines */
#define MAC_CTRL_REG 0x00000000 /* MAC Control */
#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
/* Default LPI timers */
#define STMMAC_DEFAULT_LIT_LS 0x3E8
#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_ET_MAX 0xFFFFF
/* Common LPI register bits */
#define LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable, gmac4, xgmac2 only */
#define LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable, gmac4 only */
#define LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
#define LPI_CTRL_STATUS_PLSEN BIT(18) /* Enable PHY Link Status */
#define LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
#define LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
#define LPI_CTRL_STATUS_RLPIST BIT(9) /* Receive LPI state, gmac1000 only? */
#define LPI_CTRL_STATUS_TLPIST BIT(8) /* Transmit LPI state, gmac1000 only? */
#define LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
#define LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
stmmac: Fix kernel crashes for jumbo frames These changes correct the following issues with jumbo frames on the stmmac driver: 1) The Synopsys EMAC can be configured to support different FIFO sizes at core configuration time. There's no way to query the controller and know the FIFO size, so the driver needs to get this information from the device tree in order to know how to correctly handle MTU changes and setting up dma buffers. The default max-frame-size is as currently used, which is the size of a jumbo frame. 2) The driver was enabling Jumbo frames by default, but was not allocating dma buffers of sufficient size to handle the maximum possible packet size that could be received. This led to memory corruption since DMAs were occurring beyond the extent of the allocated receive buffers for certain types of network traffic. kernel BUG at net/core/skbuff.c:126! Internal error: Oops - BUG: 0 [#1] SMP ARM Modules linked in: CPU: 0 PID: 563 Comm: sockperf Not tainted 3.13.0-rc6-01523-gf7111b9 #31 task: ef35e580 ti: ef252000 task.ti: ef252000 PC is at skb_panic+0x60/0x64 LR is at skb_panic+0x60/0x64 pc : [<c03c7c3c>] lr : [<c03c7c3c>] psr: 60000113 sp : ef253c18 ip : 60000113 fp : 00000000 r10: ef3a5400 r9 : 00000ebc r8 : ef3a546c r7 : ee59f000 r6 : ee59f084 r5 : ee59ff40 r4 : ee59f140 r3 : 000003e2 r2 : 00000007 r1 : c0b9c420 r0 : 0000007d Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user Control: 10c5387d Table: 2e8ac04a DAC: 00000015 Process sockperf (pid: 563, stack limit = 0xef252248) Stack: (0xef253c18 to 0xef254000) 3c00: 00000ebc ee59f000 3c20: ee59f084 ee59ff40 ee59f140 c04a9cd8 ee8c50c0 00000ebc ee59ff40 00000000 3c40: ee59f140 c02d0ef0 00000056 ef1eda80 ee8c50c0 00000ebc 22bbef29 c0318f8c 3c60: 00000056 ef3a547c ffe2c716 c02c9c90 c0ba1298 ef3a5838 ef3a5838 ef3a5400 3c80: 000020c0 ee573840 000055cb ef3f2050 c053f0e0 c0319214 22b9b085 22d92813 3ca0: 00001c80 004b8e00 ef3a5400 ee573840 ef3f2064 22d92813 ef3f2064 000055cb 3cc0: ef3f2050 c031a19c ef252000 00000000 00000000 c0561bc0 00000000 ff00ffff 3ce0: c05621c0 ef3a5400 ef3f2064 ee573840 00000020 ef3f2064 000055cb ef3f2050 3d00: c053f0e0 c031cad0 c053e740 00000e60 00000000 00000000 ee573840 ef3a5400 3d20: ef0a6e00 00000000 ef3f2064 c032507c 00010000 00000020 c0561bc0 c0561bc0 3d40: ee599850 c032799c 00000000 ee573840 c055a380 ef3a5400 00000000 ef3f2064 3d60: ef3f2050 c032799c 0101c7c0 2b6755cb c059a280 c030e4d8 000055cb ffffffff 3d80: ee574fc0 c055a380 ee574000 ee573840 00002b67 ee573840 c03fe9c4 c053fa68 3da0: c055a380 00001f6f 00000000 ee573840 c053f0e0 c0304fdc ef0a6e01 ef3f2050 3dc0: ee573858 ef031000 ee573840 c03055d8 c0ba0c40 ef000f40 00100100 c053f0dc 3de0: c053ffdc c053f0f0 00000008 00000000 ef031000 c02da948 00001140 00000000 3e00: c0563c78 ef253e5f 00000020 ee573840 00000020 c053f0f0 ef313400 ee573840 3e20: c053f0e0 00000000 00000000 c05380c0 ef313400 00001000 00000015 c02df280 3e40: ee574000 ef001e00 00000000 00001080 00000042 005cd980 ef031500 ef031500 3e60: 00000000 c02df824 ef031500 c053e390 c0541084 f00b1e00 c05925e8 c02df864 3e80: 00001f5c ef031440 c053e390 c0278524 00000002 00000000 c0b9eb48 c02df280 3ea0: ee8c7180 00000100 c0542ca8 00000015 00000040 ef031500 ef031500 ef031500 3ec0: c027803c ef252000 00000040 000000ec c05380c0 c0b9eb40 c0b9eb48 c02df940 3ee0: ef060780 ffffa4dd c0564a9c c056343c 002e80a8 00000080 ef031500 00000001 3f00: c053808c ef252000 fffec100 00000003 00000004 002e80a8 0000000c c00258f0 3f20: 002e80a8 c005e704 00000005 00000100 c05634d0 c0538080 c05333e0 00000000 3f40: 0000000a c0565580 c05380c0 ffffa4dc c05434f4 00400100 00000004 c0534cd4 3f60: 00000098 00000000 fffec100 002e80a8 00000004 002e80a8 002a20e0 c0025da8 3f80: c0534cd4 c000f020 fffec10c c053ea60 ef253fb0 c0008530 0000ffe2 b6ef67f4 3fa0: 40000010 ffffffff 00000124 c0012f3c 0000ffe2 002e80f0 0000ffe2 00004000 3fc0: becb6338 becb6334 00000004 00000124 002e80a8 00000004 002e80a8 002a20e0 3fe0: becb6300 becb62f4 002773bb b6ef67f4 40000010 ffffffff 00000000 00000000 [<c03c7c3c>] (skb_panic+0x60/0x64) from [<c02d0ef0>] (skb_put+0x4c/0x50) [<c02d0ef0>] (skb_put+0x4c/0x50) from [<c0318f8c>] (tcp_collapse+0x314/0x3ec) [<c0318f8c>] (tcp_collapse+0x314/0x3ec) from [<c0319214>] (tcp_try_rmem_schedule+0x1b0/0x3c4) [<c0319214>] (tcp_try_rmem_schedule+0x1b0/0x3c4) from [<c031a19c>] (tcp_data_queue+0x480/0xe6c) [<c031a19c>] (tcp_data_queue+0x480/0xe6c) from [<c031cad0>] (tcp_rcv_established+0x180/0x62c) [<c031cad0>] (tcp_rcv_established+0x180/0x62c) from [<c032507c>] (tcp_v4_do_rcv+0x13c/0x31c) [<c032507c>] (tcp_v4_do_rcv+0x13c/0x31c) from [<c032799c>] (tcp_v4_rcv+0x718/0x73c) [<c032799c>] (tcp_v4_rcv+0x718/0x73c) from [<c0304fdc>] (ip_local_deliver+0x98/0x274) [<c0304fdc>] (ip_local_deliver+0x98/0x274) from [<c03055d8>] (ip_rcv+0x420/0x758) [<c03055d8>] (ip_rcv+0x420/0x758) from [<c02da948>] (__netif_receive_skb_core+0x44c/0x5bc) [<c02da948>] (__netif_receive_skb_core+0x44c/0x5bc) from [<c02df280>] (netif_receive_skb+0x48/0xb4) [<c02df280>] (netif_receive_skb+0x48/0xb4) from [<c02df824>] (napi_gro_flush+0x70/0x94) [<c02df824>] (napi_gro_flush+0x70/0x94) from [<c02df864>] (napi_complete+0x1c/0x34) [<c02df864>] (napi_complete+0x1c/0x34) from [<c0278524>] (stmmac_poll+0x4e8/0x5c8) [<c0278524>] (stmmac_poll+0x4e8/0x5c8) from [<c02df940>] (net_rx_action+0xc4/0x1e4) [<c02df940>] (net_rx_action+0xc4/0x1e4) from [<c00258f0>] (__do_softirq+0x12c/0x2e8) [<c00258f0>] (__do_softirq+0x12c/0x2e8) from [<c0025da8>] (irq_exit+0x78/0xac) [<c0025da8>] (irq_exit+0x78/0xac) from [<c000f020>] (handle_IRQ+0x44/0x90) [<c000f020>] (handle_IRQ+0x44/0x90) from [<c0008530>] (gic_handle_irq+0x2c/0x5c) [<c0008530>] (gic_handle_irq+0x2c/0x5c) from [<c0012f3c>] (__irq_usr+0x3c/0x60) 3) The driver was setting the dma buffer size after allocating dma buffers, which caused a system panic when changing the MTU. BUG: Bad page state in process ifconfig pfn:2e850 page:c0b72a00 count:0 mapcount:0 mapping: (null) index:0x0 page flags: 0x200(arch_1) Modules linked in: CPU: 0 PID: 566 Comm: ifconfig Not tainted 3.13.0-rc6-01523-gf7111b9 #29 [<c001547c>] (unwind_backtrace+0x0/0xf8) from [<c00122dc>] (show_stack+0x10/0x14) [<c00122dc>] (show_stack+0x10/0x14) from [<c03c793c>] (dump_stack+0x70/0x88) [<c03c793c>] (dump_stack+0x70/0x88) from [<c00b2620>] (bad_page+0xc8/0x118) [<c00b2620>] (bad_page+0xc8/0x118) from [<c00b302c>] (get_page_from_freelist+0x744/0x870) [<c00b302c>] (get_page_from_freelist+0x744/0x870) from [<c00b40f4>] (__alloc_pages_nodemask+0x118/0x86c) [<c00b40f4>] (__alloc_pages_nodemask+0x118/0x86c) from [<c00b4858>] (__get_free_pages+0x10/0x54) [<c00b4858>] (__get_free_pages+0x10/0x54) from [<c00cba1c>] (kmalloc_order_trace+0x24/0xa0) [<c00cba1c>] (kmalloc_order_trace+0x24/0xa0) from [<c02d199c>] (__kmalloc_reserve.isra.21+0x24/0x70) [<c02d199c>] (__kmalloc_reserve.isra.21+0x24/0x70) from [<c02d240c>] (__alloc_skb+0x68/0x13c) [<c02d240c>] (__alloc_skb+0x68/0x13c) from [<c02d3930>] (__netdev_alloc_skb+0x3c/0xe8) [<c02d3930>] (__netdev_alloc_skb+0x3c/0xe8) from [<c0279378>] (stmmac_open+0x63c/0x1024) [<c0279378>] (stmmac_open+0x63c/0x1024) from [<c02e18cc>] (__dev_open+0xa0/0xfc) [<c02e18cc>] (__dev_open+0xa0/0xfc) from [<c02e1b40>] (__dev_change_flags+0x94/0x158) [<c02e1b40>] (__dev_change_flags+0x94/0x158) from [<c02e1c24>] (dev_change_flags+0x18/0x48) [<c02e1c24>] (dev_change_flags+0x18/0x48) from [<c0337bc0>] (devinet_ioctl+0x638/0x700) [<c0337bc0>] (devinet_ioctl+0x638/0x700) from [<c02c7aec>] (sock_ioctl+0x64/0x290) [<c02c7aec>] (sock_ioctl+0x64/0x290) from [<c0100890>] (do_vfs_ioctl+0x78/0x5b8) [<c0100890>] (do_vfs_ioctl+0x78/0x5b8) from [<c0100e0c>] (SyS_ioctl+0x3c/0x5c) [<c0100e0c>] (SyS_ioctl+0x3c/0x5c) from [<c000e760>] The fixes have been verified using reproducible, automated testing. Signed-off-by: Vince Bridgers <vbridgers2013@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-01-20 05:39:01 -06:00
#define JUMBO_LEN 9000
/* Receive Side Scaling */
#define STMMAC_RSS_HASH_KEY_SIZE 40
#define STMMAC_RSS_MAX_TABLE_SIZE 256
/* VLAN */
#define STMMAC_VLAN_NONE 0x0
#define STMMAC_VLAN_REMOVE 0x1
#define STMMAC_VLAN_INSERT 0x2
#define STMMAC_VLAN_REPLACE 0x3
struct mac_device_info;
struct mac_link {
net: stmmac: Fix IP-cores specific MAC capabilities Here is the list of the MAC capabilities specific to the particular DW MAC IP-cores currently supported by the driver: DW MAC100: MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 DW GMAC: MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000 Allwinner sun8i MAC: MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000 DW QoS Eth: MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD if there is more than 1 active Tx/Rx queues: MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD DW XGMAC: MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_1000FD | MAC_2500FD | MAC_5000FD | MAC_10000FD DW XLGMAC: MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_1000FD | MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD | MAC_40000FD | MAC_50000FD | MAC_100000FD As you can see there are only two common capabilities: MAC_ASYM_PAUSE | MAC_SYM_PAUSE. Meanwhile what is currently implemented defines 10/100/1000 link speeds for all IP-cores, which is definitely incorrect for DW MAC100, DW XGMAC and DW XLGMAC devices. Seeing the flow-control is implemented as a callback for each MAC IP-core (see dwmac100_flow_ctrl(), dwmac1000_flow_ctrl(), sun8i_dwmac_flow_ctrl(), etc) and since the MAC-specific setup() method is supposed to be called for each available DW MAC-based device, the capabilities initialization can be freely moved to these setup() functions, thus correctly setting up the MAC-capabilities for each IP-core (including the Allwinner Sun8i). A new stmmac_link::caps field was specifically introduced for that so to have all link-specific info preserved in a single structure. Note the suggested change fixes three earlier commits at a time. The commit 5b0d7d7da64b ("net: stmmac: Add the missing speeds that XGMAC supports") permitted the 10-100 link speeds and 1G half-duplex mode for DW XGMAC IP-core even though it doesn't support them. The commit df7699c70c1b ("net: stmmac: Do not cut down 1G modes") incorrectly added the MAC1000 capability to the DW MAC100 IP-core. Similarly to the DW XGMAC the commit 8a880936e902 ("net: stmmac: Add XLGMII support") incorrectly permitted the 10-100 link speeds and 1G half-duplex mode for DW XLGMAC IP-core. Fixes: 5b0d7d7da64b ("net: stmmac: Add the missing speeds that XGMAC supports") Fixes: df7699c70c1b ("net: stmmac: Do not cut down 1G modes") Fixes: 8a880936e902 ("net: stmmac: Add XLGMII support") Suggested-by: Russell King (Oracle) <linux@armlinux.org.uk> Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Reviewed-by: Romain Gantois <romain.gantois@bootlin.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-04-12 21:03:16 +03:00
u32 caps;
u32 speed_mask;
u32 speed10;
u32 speed100;
u32 speed1000;
u32 speed2500;
u32 duplex;
struct {
u32 speed2500;
u32 speed5000;
u32 speed10000;
} xgmii;
struct {
u32 speed25000;
u32 speed40000;
u32 speed50000;
u32 speed100000;
} xlgmii;
};
struct mii_regs {
unsigned int addr; /* MII Address */
unsigned int data; /* MII Data */
unsigned int addr_shift; /* MII address shift */
unsigned int reg_shift; /* MII reg shift */
unsigned int addr_mask; /* MII address mask */
unsigned int reg_mask; /* MII reg mask */
unsigned int clk_csr_shift;
unsigned int clk_csr_mask;
};
struct mac_device_info {
const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma;
const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
const struct stmmac_est_ops *est;
net: stmmac: Refactor VLAN implementation Refactor VLAN implementation by moving common code for DWMAC4 and DWXGMAC IPs into a separate VLAN module. VLAN implementation for DWMAC4 and DWXGMAC differs only for CSR base address, the descriptor for the VLAN ID and VLAN VALID bit field. The descriptor format for VLAN is not moved to the common code due to hardware-specific differences between DWMAC4 and DWXGMAC. For the DWMAC4 IP, the Receive Normal Descriptor 0 (RDES0) is formatted as follows: 31 0 ------------------------ ----------------------- RDES0| Inner VLAN TAG [31:16] | Outer VLAN TAG [15:0] | ------------------------ ----------------------- For the DWXGMAC IP, the RDES0 format varies based on the Tunneled Frame bit (TNP): a) For Non-Tunneled Frame (TNP=0) 31 0 ------------------------ ----------------------- RDES0| Inner VLAN TAG [31:16] | Outer VLAN TAG [15:0] | ------------------------ ----------------------- b) For Tunneled Frame (TNP=1) 31 8 7 3 2 0 --------------------- ------------------ ------- RDES0| VNID/VSID | Reserved | OL2L3 | --------------------- ------------------ ------ The logic for handling tunneled frames is not yet implemented in the dwxgmac2_wrback_get_rx_vlan_tci() function. Therefore, it is prudent to maintain separate functions within their respective descriptor driver files (dwxgmac2_descs.c and dwmac4_descs.c). Signed-off-by: Boon Khai Ng <boon.khai.ng@altera.com> Link: https://patch.msgid.link/20250507063812.34000-2-boon.khai.ng@altera.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-05-07 14:38:10 +08:00
const struct stmmac_vlan_ops *vlan;
struct dw_xpcs *xpcs;
struct phylink_pcs *phylink_pcs;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
unsigned int multicast_filter_bins;
unsigned int unicast_filter_entries;
unsigned int mcast_bits_log2;
unsigned int rx_csum;
unsigned int pcs;
unsigned int pmt;
unsigned int ps;
unsigned int xlgmac;
unsigned int num_vlan;
u32 vlan_filter[32];
bool vlan_fail_q_en;
u8 vlan_fail_q;
bool hw_vlan_en;
};
struct stmmac_rx_routing {
u32 reg_mask;
u32 reg_shift;
};
int dwmac100_setup(struct stmmac_priv *priv);
int dwmac1000_setup(struct stmmac_priv *priv);
int dwmac4_setup(struct stmmac_priv *priv);
int dwxgmac2_setup(struct stmmac_priv *priv);
int dwxlgmac2_setup(struct stmmac_priv *priv);
void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
#endif /* __COMMON_H__ */