linux/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c

1266 lines
35 KiB
C
Raw Permalink Normal View History

treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 291 Based on 2 normalized pattern(s): this program is free software you can redistribute it and or modify it under the terms and conditions of the gnu general public license version 2 as published by the free software foundation this program is distributed in the hope it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details the full gnu general public license is included in this distribution in the file called copying this program is free software you can redistribute it and or modify it under the terms and conditions of the gnu general public license version 2 as published by the free software foundation this program is distributed in the hope [that] it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details the full gnu general public license is included in this distribution in the file called copying extracted by the scancode license scanner the SPDX license identifier GPL-2.0-only has been chosen to replace the boilerplate/reference in 57 file(s). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Alexios Zavras <alexios.zavras@intel.com> Reviewed-by: Allison Randal <allison@lohutok.net> Cc: linux-spdx@vger.kernel.org Link: https://lkml.kernel.org/r/20190529141901.515993066@linutronix.de Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-05-29 07:18:05 -07:00
// SPDX-License-Identifier: GPL-2.0-only
/*******************************************************************************
STMMAC Ethtool support
Copyright (C) 2007-2009 STMicroelectronics Ltd
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mii.h>
#include <linux/phylink.h>
#include <linux/net_tstamp.h>
#include "stmmac.h"
#include "stmmac_fpe.h"
#include "dwmac_dma.h"
#include "dwxgmac2.h"
#define REG_SPACE_SIZE 0x1060
#define GMAC4_REG_SPACE_SIZE 0x116C
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
#define XGMAC_ETHTOOL_NAME "st_xgmac"
/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h
*
* It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the
* same time due to the conflicting macro names.
*/
#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
net: ethtool: Adjust exactly ETH_GSTRING_LEN-long stats to use memcpy Many drivers populate the stats buffer using C-String based APIs (e.g. ethtool_sprintf() and ethtool_puts()), usually when building up the list of stats individually (i.e. with a for() loop). This, however, requires that the source strings be populated in such a way as to have a terminating NUL byte in the source. Other drivers populate the stats buffer directly using one big memcpy() of an entire array of strings. No NUL termination is needed here, as the bytes are being directly passed through. Yet others will build up the stats buffer individually, but also use memcpy(). This, too, does not need NUL termination of the source strings. However, there are cases where the strings that populate the source stats strings are exactly ETH_GSTRING_LEN long, and GCC 15's -Wunterminated-string-initialization option complains that the trailing NUL byte has been truncated. This situation is fine only if the driver is using the memcpy() approach. If the C-String APIs are used, the destination string name will have its final byte truncated by the required trailing NUL byte applied by the C-string API. For drivers that are already using memcpy() but have initializers that truncate the NUL terminator, mark their source strings as __nonstring to silence the GCC warnings. For drivers that have initializers that truncate the NUL terminator and are using the C-String APIs, switch to memcpy() to avoid destination string truncation and mark their source strings as __nonstring to silence the GCC warnings. (Also introduce ethtool_cpy() as a helper to make this an easy replacement). Specifically the following warnings were investigated and addressed: ../drivers/net/ethernet/chelsio/cxgb/cxgb2.c:364:9: warning: initializer-string for array of 'char' truncates NUL terminator but destination lacks 'nonstring' attribute (33 chars into 32 available) [-Wunterminated-string-initialization] 364 | "TxFramesAbortedDueToXSCollisions", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../drivers/net/ethernet/freescale/enetc/enetc_ethtool.c:165:33: warning: initializer-string for array of 'char' truncates NUL terminator but destination lacks 'nonstring' attribute (33 chars into 32 available) [-Wunterminated-string-initialization] 165 | { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" }, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../drivers/net/ethernet/freescale/enetc/enetc_ethtool.c:190:33: warning: initializer-string for array of 'char' truncates NUL terminator but destination lacks 'nonstring' attribute (33 chars into 32 available) [-Wunterminated-string-initialization] 190 | { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" }, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../drivers/net/ethernet/google/gve/gve_ethtool.c:76:9: warning: initializer-string for array of 'char' truncates NUL terminator but destination lacks 'nonstring' attribute (33 chars into 32 available) [-Wunterminated-string-initialization] 76 | "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c:117:53: warning: initializer-string for array of 'char' truncates NUL terminator but destination lacks 'nonstring' attribute (33 chars into 32 available) [-Wunterminated-string-initialization] 117 | STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up), | ^ ../drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c:46:12: note: in definition of macro 'STMMAC_STAT' 46 | { #m, sizeof_field(struct stmmac_extra_stats, m), \ | ^ ../drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c:328:24: warning: initializer-string for array of 'char' truncates NUL terminator but destination lacks 'nonstring' attribute (33 chars into 32 available) [-Wunterminated-string-initialization] 328 | .str = "a_mac_control_frames_transmitted", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c:340:24: warning: initializer-string for array of 'char' truncates NUL terminator but destination lacks 'nonstring' attribute (33 chars into 32 available) [-Wunterminated-string-initialization] 340 | .str = "a_pause_mac_ctrl_frames_received", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Signed-off-by: Kees Cook <kees@kernel.org> Reviewed-by: Petr Machata <petrm@nvidia.com> # for mlxsw Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com> Link: https://patch.msgid.link/20250416010210.work.904-kees@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-04-15 18:02:15 -07:00
char stat_string[ETH_GSTRING_LEN] __nonstring;
int sizeof_stat;
int stat_offset;
};
#define STMMAC_STAT(m) \
{ #m, sizeof_field(struct stmmac_extra_stats, m), \
offsetof(struct stmmac_priv, xstats.m)}
static const struct stmmac_stats stmmac_gstrings_stats[] = {
/* Transmit errors */
STMMAC_STAT(tx_underflow),
STMMAC_STAT(tx_carrier),
STMMAC_STAT(tx_losscarrier),
STMMAC_STAT(vlan_tag),
STMMAC_STAT(tx_deferred),
STMMAC_STAT(tx_vlan),
STMMAC_STAT(tx_jabber),
STMMAC_STAT(tx_frame_flushed),
STMMAC_STAT(tx_payload_error),
STMMAC_STAT(tx_ip_header_error),
/* Receive errors */
STMMAC_STAT(rx_desc),
STMMAC_STAT(sa_filter_fail),
STMMAC_STAT(overflow_error),
STMMAC_STAT(ipc_csum_error),
STMMAC_STAT(rx_collision),
STMMAC_STAT(rx_crc_errors),
STMMAC_STAT(dribbling_bit),
STMMAC_STAT(rx_length),
STMMAC_STAT(rx_mii),
STMMAC_STAT(rx_multicast),
STMMAC_STAT(rx_gmac_overflow),
STMMAC_STAT(rx_watchdog),
STMMAC_STAT(da_rx_filter_fail),
STMMAC_STAT(sa_rx_filter_fail),
STMMAC_STAT(rx_missed_cntr),
STMMAC_STAT(rx_overflow_cntr),
STMMAC_STAT(rx_vlan),
STMMAC_STAT(rx_split_hdr_pkt_n),
/* Tx/Rx IRQ error info */
STMMAC_STAT(tx_undeflow_irq),
STMMAC_STAT(tx_process_stopped_irq),
STMMAC_STAT(tx_jabber_irq),
STMMAC_STAT(rx_overflow_irq),
STMMAC_STAT(rx_buf_unav_irq),
STMMAC_STAT(rx_process_stopped_irq),
STMMAC_STAT(rx_watchdog_irq),
STMMAC_STAT(tx_early_irq),
STMMAC_STAT(fatal_bus_error_irq),
/* Tx/Rx IRQ Events */
STMMAC_STAT(rx_early_irq),
STMMAC_STAT(threshold),
STMMAC_STAT(irq_receive_pmt_irq_n),
/* MMC info */
STMMAC_STAT(mmc_tx_irq_n),
STMMAC_STAT(mmc_rx_irq_n),
STMMAC_STAT(mmc_rx_csum_offload_irq_n),
/* EEE */
STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
STMMAC_STAT(phy_eee_wakeup_error_n),
/* Extended RDES status */
STMMAC_STAT(ip_hdr_err),
STMMAC_STAT(ip_payload_err),
STMMAC_STAT(ip_csum_bypassed),
STMMAC_STAT(ipv4_pkt_rcvd),
STMMAC_STAT(ipv6_pkt_rcvd),
STMMAC_STAT(no_ptp_rx_msg_type_ext),
STMMAC_STAT(ptp_rx_msg_type_sync),
STMMAC_STAT(ptp_rx_msg_type_follow_up),
STMMAC_STAT(ptp_rx_msg_type_delay_req),
STMMAC_STAT(ptp_rx_msg_type_delay_resp),
STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
STMMAC_STAT(ptp_rx_msg_type_announce),
STMMAC_STAT(ptp_rx_msg_type_management),
STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
STMMAC_STAT(ptp_frame_type),
STMMAC_STAT(ptp_ver),
STMMAC_STAT(timestamp_dropped),
STMMAC_STAT(av_pkt_rcvd),
STMMAC_STAT(av_tagged_pkt_rcvd),
STMMAC_STAT(vlan_tag_priority_val),
STMMAC_STAT(l3_filter_match),
STMMAC_STAT(l4_filter_match),
STMMAC_STAT(l3_l4_filter_no_match),
/* PCS */
STMMAC_STAT(irq_pcs_ane_n),
STMMAC_STAT(irq_pcs_link_n),
STMMAC_STAT(irq_rgmii_n),
/* DEBUG */
STMMAC_STAT(mtl_tx_status_fifo_full),
STMMAC_STAT(mtl_tx_fifo_not_empty),
STMMAC_STAT(mmtl_fifo_ctrl),
STMMAC_STAT(mtl_tx_fifo_read_ctrl_write),
STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait),
STMMAC_STAT(mtl_tx_fifo_read_ctrl_read),
STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle),
STMMAC_STAT(mac_tx_in_pause),
STMMAC_STAT(mac_tx_frame_ctrl_xfer),
STMMAC_STAT(mac_tx_frame_ctrl_idle),
STMMAC_STAT(mac_tx_frame_ctrl_wait),
STMMAC_STAT(mac_tx_frame_ctrl_pause),
STMMAC_STAT(mac_gmii_tx_proto_engine),
STMMAC_STAT(mtl_rx_fifo_fill_level_full),
STMMAC_STAT(mtl_rx_fifo_fill_above_thresh),
STMMAC_STAT(mtl_rx_fifo_fill_below_thresh),
STMMAC_STAT(mtl_rx_fifo_fill_level_empty),
STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush),
STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data),
STMMAC_STAT(mtl_rx_fifo_read_ctrl_status),
STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle),
STMMAC_STAT(mtl_rx_fifo_ctrl_active),
STMMAC_STAT(mac_rx_frame_ctrl_fifo),
STMMAC_STAT(mac_gmii_rx_proto_engine),
/* EST */
STMMAC_STAT(mtl_est_cgce),
STMMAC_STAT(mtl_est_hlbs),
STMMAC_STAT(mtl_est_hlbf),
STMMAC_STAT(mtl_est_btre),
STMMAC_STAT(mtl_est_btrlm),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
/* statistics collected in queue which will be summed up for all TX or RX
* queues, or summed up for both TX and RX queues(napi_poll, normal_irq_n).
*/
static const char stmmac_qstats_string[][ETH_GSTRING_LEN] = {
"rx_pkt_n",
"rx_normal_irq_n",
"tx_pkt_n",
"tx_normal_irq_n",
"tx_clean",
"tx_set_ic_bit",
"tx_tso_frames",
"tx_tso_nfrags",
"normal_irq_n",
"napi_poll",
};
#define STMMAC_QSTATS ARRAY_SIZE(stmmac_qstats_string)
/* HW MAC Management counters (if supported) */
#define STMMAC_MMC_STAT(m) \
{ #m, sizeof_field(struct stmmac_counters, m), \
offsetof(struct stmmac_priv, mmc.m)}
static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
STMMAC_MMC_STAT(mmc_tx_framecount_gb),
STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
STMMAC_MMC_STAT(mmc_tx_unicast_gb),
STMMAC_MMC_STAT(mmc_tx_multicast_gb),
STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
STMMAC_MMC_STAT(mmc_tx_underflow_error),
STMMAC_MMC_STAT(mmc_tx_singlecol_g),
STMMAC_MMC_STAT(mmc_tx_multicol_g),
STMMAC_MMC_STAT(mmc_tx_deferred),
STMMAC_MMC_STAT(mmc_tx_latecol),
STMMAC_MMC_STAT(mmc_tx_exesscol),
STMMAC_MMC_STAT(mmc_tx_carrier_error),
STMMAC_MMC_STAT(mmc_tx_octetcount_g),
STMMAC_MMC_STAT(mmc_tx_framecount_g),
STMMAC_MMC_STAT(mmc_tx_excessdef),
STMMAC_MMC_STAT(mmc_tx_pause_frame),
STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
STMMAC_MMC_STAT(mmc_tx_oversize_g),
STMMAC_MMC_STAT(mmc_tx_lpi_usec),
STMMAC_MMC_STAT(mmc_tx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_framecount_gb),
STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
STMMAC_MMC_STAT(mmc_rx_octetcount_g),
STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
STMMAC_MMC_STAT(mmc_rx_crc_error),
STMMAC_MMC_STAT(mmc_rx_align_error),
STMMAC_MMC_STAT(mmc_rx_run_error),
STMMAC_MMC_STAT(mmc_rx_jabber_error),
STMMAC_MMC_STAT(mmc_rx_undersize_g),
STMMAC_MMC_STAT(mmc_rx_oversize_g),
STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
STMMAC_MMC_STAT(mmc_rx_unicast_g),
STMMAC_MMC_STAT(mmc_rx_length_error),
STMMAC_MMC_STAT(mmc_rx_autofrangetype),
STMMAC_MMC_STAT(mmc_rx_pause_frames),
STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
STMMAC_MMC_STAT(mmc_rx_watchdog_error),
STMMAC_MMC_STAT(mmc_rx_error),
STMMAC_MMC_STAT(mmc_rx_lpi_usec),
STMMAC_MMC_STAT(mmc_rx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
STMMAC_MMC_STAT(mmc_rx_discard_octets_gb),
STMMAC_MMC_STAT(mmc_rx_align_err_frames),
STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
STMMAC_MMC_STAT(mmc_rx_udp_gd),
STMMAC_MMC_STAT(mmc_rx_udp_err),
STMMAC_MMC_STAT(mmc_rx_tcp_gd),
STMMAC_MMC_STAT(mmc_rx_tcp_err),
STMMAC_MMC_STAT(mmc_rx_icmp_gd),
STMMAC_MMC_STAT(mmc_rx_icmp_err),
STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
STMMAC_MMC_STAT(mmc_sgf_pass_fragment_cntr),
STMMAC_MMC_STAT(mmc_sgf_fail_fragment_cntr),
STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
STMMAC_MMC_STAT(mmc_tx_gate_overrun_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr),
};
#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
static const char stmmac_qstats_tx_string[][ETH_GSTRING_LEN] = {
"tx_pkt_n",
"tx_irq_n",
#define STMMAC_TXQ_STATS ARRAY_SIZE(stmmac_qstats_tx_string)
};
static const char stmmac_qstats_rx_string[][ETH_GSTRING_LEN] = {
"rx_pkt_n",
"rx_irq_n",
#define STMMAC_RXQ_STATS ARRAY_SIZE(stmmac_qstats_rx_string)
};
static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->plat->has_gmac || priv->plat->has_gmac4)
strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
else if (priv->plat->has_xgmac)
strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
strscpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
if (priv->plat->pdev) {
strscpy(info->bus_info, pci_name(priv->plat->pdev),
sizeof(info->bus_info));
}
}
static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
(priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII)) {
u32 supported, advertising, lp_advertising;
if (!priv->xstats.pcs_link) {
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
return 0;
}
cmd->base.duplex = priv->xstats.pcs_duplex;
cmd->base.speed = priv->xstats.pcs_speed;
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
ethtool_convert_link_mode_to_legacy_u32(
&supported, cmd->link_modes.supported);
ethtool_convert_link_mode_to_legacy_u32(
&advertising, cmd->link_modes.advertising);
ethtool_convert_link_mode_to_legacy_u32(
&lp_advertising, cmd->link_modes.lp_advertising);
/* Reg49[3] always set because ANE is always supported */
cmd->base.autoneg = ADVERTISED_Autoneg;
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
lp_advertising |= ADVERTISED_Autoneg;
cmd->base.port = PORT_OTHER;
ethtool_convert_legacy_u32_to_link_mode(
cmd->link_modes.supported, supported);
ethtool_convert_legacy_u32_to_link_mode(
cmd->link_modes.advertising, advertising);
ethtool_convert_legacy_u32_to_link_mode(
cmd->link_modes.lp_advertising, lp_advertising);
return 0;
}
return phylink_ethtool_ksettings_get(priv->phylink, cmd);
}
static int
stmmac_ethtool_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
(priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII)) {
/* Only support ANE */
if (cmd->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
mutex_lock(&priv->lock);
stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
mutex_unlock(&priv->lock);
return 0;
}
return phylink_ethtool_ksettings_set(priv->phylink, cmd);
}
static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
return priv->msg_enable;
}
static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
{
struct stmmac_priv *priv = netdev_priv(dev);
priv->msg_enable = level;
}
static int stmmac_ethtool_get_regs_len(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->plat->has_xgmac)
return XGMAC_REGSIZE * 4;
else if (priv->plat->has_gmac4)
return GMAC4_REG_SPACE_SIZE;
return REG_SPACE_SIZE;
}
static void stmmac_ethtool_gregs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 *reg_space = (u32 *) space;
stmmac_dump_mac_regs(priv, priv->hw, reg_space);
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
/* Copy DMA registers to where ethtool expects them */
if (priv->plat->has_gmac4) {
/* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
NUM_DWMAC4_DMA_REGS * 4);
} else if (!priv->plat->has_xgmac) {
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[DMA_BUS_MODE / 4],
NUM_DWMAC1000_DMA_REGS * 4);
}
}
static int stmmac_nway_reset(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
return phylink_ethtool_nway_reset(priv->phylink);
}
static void stmmac_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(netdev);
ring->rx_max_pending = DMA_MAX_RX_SIZE;
ring->tx_max_pending = DMA_MAX_TX_SIZE;
ring->rx_pending = priv->dma_conf.dma_rx_size;
ring->tx_pending = priv->dma_conf.dma_tx_size;
}
static int stmmac_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending < DMA_MIN_RX_SIZE ||
ring->rx_pending > DMA_MAX_RX_SIZE ||
!is_power_of_2(ring->rx_pending) ||
ring->tx_pending < DMA_MIN_TX_SIZE ||
ring->tx_pending > DMA_MAX_TX_SIZE ||
!is_power_of_2(ring->tx_pending))
return -EINVAL;
return stmmac_reinit_ringparam(netdev, ring->rx_pending,
ring->tx_pending);
}
static void
stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
if (priv->hw->pcs) {
pause->autoneg = 1;
} else {
phylink_ethtool_get_pauseparam(priv->phylink, pause);
}
}
static int
stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
if (priv->hw->pcs) {
pause->autoneg = 1;
return 0;
} else {
return phylink_ethtool_set_pauseparam(priv->phylink, pause);
}
}
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
{
u64 total;
int cpu;
total = 0;
for_each_possible_cpu(cpu) {
struct stmmac_pcpu_stats *pcpu;
unsigned int start;
u64 irq_n;
pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin(&pcpu->syncp);
irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
} while (u64_stats_fetch_retry(&pcpu->syncp, start));
total += irq_n;
}
return total;
}
static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
{
u64 total;
int cpu;
total = 0;
for_each_possible_cpu(cpu) {
struct stmmac_pcpu_stats *pcpu;
unsigned int start;
u64 irq_n;
pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin(&pcpu->syncp);
irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
} while (u64_stats_fetch_retry(&pcpu->syncp, start));
total += irq_n;
}
return total;
}
static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int start;
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
int q;
for (q = 0; q < tx_cnt; q++) {
net: stmmac: fix incorrect rxq|txq_stats reference commit 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") caused one regression as found by Uwe, the backtrace looks like: INFO: trying to register non-static key. The code is fine but needs lockdep annotation, or maybe you didn't initialize this object before use? turning off the locking correctness validator. CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.5.0-rc1-00449-g133466c3bbe1-dirty #21 Hardware name: STM32 (Device Tree Support) unwind_backtrace from show_stack+0x18/0x1c show_stack from dump_stack_lvl+0x60/0x90 dump_stack_lvl from register_lock_class+0x98c/0x99c register_lock_class from __lock_acquire+0x74/0x293c __lock_acquire from lock_acquire+0x134/0x398 lock_acquire from stmmac_get_stats64+0x2ac/0x2fc stmmac_get_stats64 from dev_get_stats+0x44/0x130 dev_get_stats from rtnl_fill_stats+0x38/0x120 rtnl_fill_stats from rtnl_fill_ifinfo+0x834/0x17f4 rtnl_fill_ifinfo from rtmsg_ifinfo_build_skb+0xc0/0x144 rtmsg_ifinfo_build_skb from rtmsg_ifinfo+0x50/0x88 rtmsg_ifinfo from __dev_notify_flags+0xc0/0xec __dev_notify_flags from dev_change_flags+0x50/0x5c dev_change_flags from ip_auto_config+0x2f4/0x1260 ip_auto_config from do_one_initcall+0x70/0x35c do_one_initcall from kernel_init_freeable+0x2ac/0x308 kernel_init_freeable from kernel_init+0x1c/0x138 kernel_init from ret_from_fork+0x14/0x2c The reason is the rxq|txq_stats structures are not what expected because stmmac_open() -> __stmmac_open() the structure is overwritten by "memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));" This causes the well initialized syncp member of rxq|txq_stats is overwritten unexpectedly as pointed out by Johannes and Uwe. Fix this issue by moving rxq|txq_stats back to stmmac_extra_stats. For SMP cache friendly, we also mark stmmac_txq_stats and stmmac_rxq_stats as ____cacheline_aligned_in_smp. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Signed-off-by: Jisheng Zhang <jszhang@kernel.org> Reported-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Link: https://lore.kernel.org/r/20230917165328.3403-1-jszhang@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-09-18 00:53:28 +08:00
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
u64 pkt_n;
do {
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n);
} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
*data++ = pkt_n;
*data++ = stmmac_get_tx_normal_irq_n(priv, q);
}
for (q = 0; q < rx_cnt; q++) {
net: stmmac: fix incorrect rxq|txq_stats reference commit 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") caused one regression as found by Uwe, the backtrace looks like: INFO: trying to register non-static key. The code is fine but needs lockdep annotation, or maybe you didn't initialize this object before use? turning off the locking correctness validator. CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.5.0-rc1-00449-g133466c3bbe1-dirty #21 Hardware name: STM32 (Device Tree Support) unwind_backtrace from show_stack+0x18/0x1c show_stack from dump_stack_lvl+0x60/0x90 dump_stack_lvl from register_lock_class+0x98c/0x99c register_lock_class from __lock_acquire+0x74/0x293c __lock_acquire from lock_acquire+0x134/0x398 lock_acquire from stmmac_get_stats64+0x2ac/0x2fc stmmac_get_stats64 from dev_get_stats+0x44/0x130 dev_get_stats from rtnl_fill_stats+0x38/0x120 rtnl_fill_stats from rtnl_fill_ifinfo+0x834/0x17f4 rtnl_fill_ifinfo from rtmsg_ifinfo_build_skb+0xc0/0x144 rtmsg_ifinfo_build_skb from rtmsg_ifinfo+0x50/0x88 rtmsg_ifinfo from __dev_notify_flags+0xc0/0xec __dev_notify_flags from dev_change_flags+0x50/0x5c dev_change_flags from ip_auto_config+0x2f4/0x1260 ip_auto_config from do_one_initcall+0x70/0x35c do_one_initcall from kernel_init_freeable+0x2ac/0x308 kernel_init_freeable from kernel_init+0x1c/0x138 kernel_init from ret_from_fork+0x14/0x2c The reason is the rxq|txq_stats structures are not what expected because stmmac_open() -> __stmmac_open() the structure is overwritten by "memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));" This causes the well initialized syncp member of rxq|txq_stats is overwritten unexpectedly as pointed out by Johannes and Uwe. Fix this issue by moving rxq|txq_stats back to stmmac_extra_stats. For SMP cache friendly, we also mark stmmac_txq_stats and stmmac_rxq_stats as ____cacheline_aligned_in_smp. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Signed-off-by: Jisheng Zhang <jszhang@kernel.org> Reported-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Link: https://lore.kernel.org/r/20230917165328.3403-1-jszhang@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-09-18 00:53:28 +08:00
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
u64 pkt_n;
do {
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n);
} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
*data++ = pkt_n;
*data++ = stmmac_get_rx_normal_irq_n(priv, q);
}
}
static void stmmac_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *dummy, u64 *data)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_queues_count = priv->plat->rx_queues_to_use;
u32 tx_queues_count = priv->plat->tx_queues_to_use;
u64 napi_poll = 0, normal_irq_n = 0;
int i, j = 0, pos, ret;
unsigned long count;
unsigned int start;
if (priv->dma_cap.asp) {
for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
if (!stmmac_safety_feat_dump(priv, &priv->sstats, i,
&count, NULL))
data[j++] = count;
}
}
/* Update the DMA HW counters for dwmac10/100 */
ret = stmmac_dma_diagnostic_fr(priv, &priv->xstats, priv->ioaddr);
if (ret) {
/* If supported, for new GMAC chips expose the MMC counters */
if (priv->dma_cap.rmon) {
stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
char *p;
p = (char *)priv + stmmac_mmc[i].stat_offset;
data[j++] = (stmmac_mmc[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p) :
(*(u32 *)p);
}
}
if (priv->dma_cap.eee) {
int val = phylink_get_eee_err(priv->phylink);
if (val)
priv->xstats.phy_eee_wakeup_error_n = val;
}
if (priv->synopsys_id >= DWMAC_CORE_3_50)
stmmac_mac_debug(priv, priv->ioaddr,
(void *)&priv->xstats,
rx_queues_count, tx_queues_count);
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
}
pos = j;
for (i = 0; i < rx_queues_count; i++) {
net: stmmac: fix incorrect rxq|txq_stats reference commit 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") caused one regression as found by Uwe, the backtrace looks like: INFO: trying to register non-static key. The code is fine but needs lockdep annotation, or maybe you didn't initialize this object before use? turning off the locking correctness validator. CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.5.0-rc1-00449-g133466c3bbe1-dirty #21 Hardware name: STM32 (Device Tree Support) unwind_backtrace from show_stack+0x18/0x1c show_stack from dump_stack_lvl+0x60/0x90 dump_stack_lvl from register_lock_class+0x98c/0x99c register_lock_class from __lock_acquire+0x74/0x293c __lock_acquire from lock_acquire+0x134/0x398 lock_acquire from stmmac_get_stats64+0x2ac/0x2fc stmmac_get_stats64 from dev_get_stats+0x44/0x130 dev_get_stats from rtnl_fill_stats+0x38/0x120 rtnl_fill_stats from rtnl_fill_ifinfo+0x834/0x17f4 rtnl_fill_ifinfo from rtmsg_ifinfo_build_skb+0xc0/0x144 rtmsg_ifinfo_build_skb from rtmsg_ifinfo+0x50/0x88 rtmsg_ifinfo from __dev_notify_flags+0xc0/0xec __dev_notify_flags from dev_change_flags+0x50/0x5c dev_change_flags from ip_auto_config+0x2f4/0x1260 ip_auto_config from do_one_initcall+0x70/0x35c do_one_initcall from kernel_init_freeable+0x2ac/0x308 kernel_init_freeable from kernel_init+0x1c/0x138 kernel_init from ret_from_fork+0x14/0x2c The reason is the rxq|txq_stats structures are not what expected because stmmac_open() -> __stmmac_open() the structure is overwritten by "memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));" This causes the well initialized syncp member of rxq|txq_stats is overwritten unexpectedly as pointed out by Johannes and Uwe. Fix this issue by moving rxq|txq_stats back to stmmac_extra_stats. For SMP cache friendly, we also mark stmmac_txq_stats and stmmac_rxq_stats as ____cacheline_aligned_in_smp. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Signed-off-by: Jisheng Zhang <jszhang@kernel.org> Reported-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Link: https://lore.kernel.org/r/20230917165328.3403-1-jszhang@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-09-18 00:53:28 +08:00
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
struct stmmac_napi_rx_stats snapshot;
u64 n_irq;
j = pos;
do {
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
snapshot = rxq_stats->napi;
} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
data[j++] += u64_stats_read(&snapshot.rx_pkt_n);
n_irq = stmmac_get_rx_normal_irq_n(priv, i);
data[j++] += n_irq;
normal_irq_n += n_irq;
napi_poll += u64_stats_read(&snapshot.poll);
}
pos = j;
for (i = 0; i < tx_queues_count; i++) {
net: stmmac: fix incorrect rxq|txq_stats reference commit 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") caused one regression as found by Uwe, the backtrace looks like: INFO: trying to register non-static key. The code is fine but needs lockdep annotation, or maybe you didn't initialize this object before use? turning off the locking correctness validator. CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.5.0-rc1-00449-g133466c3bbe1-dirty #21 Hardware name: STM32 (Device Tree Support) unwind_backtrace from show_stack+0x18/0x1c show_stack from dump_stack_lvl+0x60/0x90 dump_stack_lvl from register_lock_class+0x98c/0x99c register_lock_class from __lock_acquire+0x74/0x293c __lock_acquire from lock_acquire+0x134/0x398 lock_acquire from stmmac_get_stats64+0x2ac/0x2fc stmmac_get_stats64 from dev_get_stats+0x44/0x130 dev_get_stats from rtnl_fill_stats+0x38/0x120 rtnl_fill_stats from rtnl_fill_ifinfo+0x834/0x17f4 rtnl_fill_ifinfo from rtmsg_ifinfo_build_skb+0xc0/0x144 rtmsg_ifinfo_build_skb from rtmsg_ifinfo+0x50/0x88 rtmsg_ifinfo from __dev_notify_flags+0xc0/0xec __dev_notify_flags from dev_change_flags+0x50/0x5c dev_change_flags from ip_auto_config+0x2f4/0x1260 ip_auto_config from do_one_initcall+0x70/0x35c do_one_initcall from kernel_init_freeable+0x2ac/0x308 kernel_init_freeable from kernel_init+0x1c/0x138 kernel_init from ret_from_fork+0x14/0x2c The reason is the rxq|txq_stats structures are not what expected because stmmac_open() -> __stmmac_open() the structure is overwritten by "memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));" This causes the well initialized syncp member of rxq|txq_stats is overwritten unexpectedly as pointed out by Johannes and Uwe. Fix this issue by moving rxq|txq_stats back to stmmac_extra_stats. For SMP cache friendly, we also mark stmmac_txq_stats and stmmac_rxq_stats as ____cacheline_aligned_in_smp. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Signed-off-by: Jisheng Zhang <jszhang@kernel.org> Reported-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Link: https://lore.kernel.org/r/20230917165328.3403-1-jszhang@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-09-18 00:53:28 +08:00
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
struct stmmac_napi_tx_stats napi_snapshot;
struct stmmac_q_tx_stats q_snapshot;
u64 n_irq;
j = pos;
do {
net: stmmac: protect updates of 64-bit statistics counters As explained by a comment in <linux/u64_stats_sync.h>, write side of struct u64_stats_sync must ensure mutual exclusion, or one seqcount update could be lost on 32-bit platforms, thus blocking readers forever. Such lockups have been observed in real world after stmmac_xmit() on one CPU raced with stmmac_napi_poll_tx() on another CPU. To fix the issue without introducing a new lock, split the statics into three parts: 1. fields updated only under the tx queue lock, 2. fields updated only during NAPI poll, 3. fields updated only from interrupt context, Updates to fields in the first two groups are already serialized through other locks. It is sufficient to split the existing struct u64_stats_sync so that each group has its own. Note that tx_set_ic_bit is updated from both contexts. Split this counter so that each context gets its own, and calculate their sum to get the total value in stmmac_get_ethtool_stats(). For the third group, multiple interrupts may be processed by different CPUs at the same time, but interrupts on the same CPU will not nest. Move fields from this group to a newly created per-cpu struct stmmac_pcpu_stats. Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary") Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/ Cc: stable@vger.kernel.org Signed-off-by: Petr Tesarik <petr@tesarici.cz> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
start = u64_stats_fetch_begin(&txq_stats->q_syncp);
q_snapshot = txq_stats->q;
} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
do {
start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
napi_snapshot = txq_stats->napi;
} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n);
n_irq = stmmac_get_tx_normal_irq_n(priv, i);
data[j++] += n_irq;
normal_irq_n += n_irq;
data[j++] += u64_stats_read(&napi_snapshot.tx_clean);
data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) +
u64_stats_read(&napi_snapshot.tx_set_ic_bit);
data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames);
data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags);
napi_poll += u64_stats_read(&napi_snapshot.poll);
}
normal_irq_n += priv->xstats.rx_early_irq;
data[j++] = normal_irq_n;
data[j++] = napi_poll;
stmmac_get_per_qstats(priv, &data[j]);
}
static int stmmac_get_sset_count(struct net_device *netdev, int sset)
{
struct stmmac_priv *priv = netdev_priv(netdev);
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
int i, len, safety_len = 0;
switch (sset) {
case ETH_SS_STATS:
len = STMMAC_STATS_LEN + STMMAC_QSTATS +
STMMAC_TXQ_STATS * tx_cnt +
STMMAC_RXQ_STATS * rx_cnt;
if (priv->dma_cap.rmon)
len += STMMAC_MMC_STATS_LEN;
if (priv->dma_cap.asp) {
for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
if (!stmmac_safety_feat_dump(priv,
&priv->sstats, i,
NULL, NULL))
safety_len++;
}
len += safety_len;
}
return len;
case ETH_SS_TEST:
return stmmac_selftest_get_count(priv);
default:
return -EOPNOTSUPP;
}
}
static void stmmac_get_qstats_string(struct stmmac_priv *priv, u8 *data)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
int q, stat;
for (q = 0; q < tx_cnt; q++) {
for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
stmmac_qstats_tx_string[stat]);
data += ETH_GSTRING_LEN;
}
}
for (q = 0; q < rx_cnt; q++) {
for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
stmmac_qstats_rx_string[stat]);
data += ETH_GSTRING_LEN;
}
}
}
static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
int i;
u8 *p = data;
struct stmmac_priv *priv = netdev_priv(dev);
switch (stringset) {
case ETH_SS_STATS:
if (priv->dma_cap.asp) {
for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
const char *desc;
if (!stmmac_safety_feat_dump(priv,
&priv->sstats, i,
NULL, &desc)) {
memcpy(p, desc, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
}
if (priv->dma_cap.rmon)
for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
memcpy(p, stmmac_mmc[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
memcpy(p, stmmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < STMMAC_QSTATS; i++) {
memcpy(p, stmmac_qstats_string[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
stmmac_get_qstats_string(priv, p);
break;
case ETH_SS_TEST:
stmmac_selftest_get_strings(priv, p);
break;
default:
WARN_ON(1);
break;
}
}
/* Currently only support WOL through Magic packet. */
static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!priv->plat->pmt)
return phylink_ethtool_get_wol(priv->phylink, wol);
mutex_lock(&priv->lock);
if (device_can_wakeup(priv->device)) {
wol->supported = WAKE_MAGIC | WAKE_UCAST;
if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame)
wol->supported &= ~WAKE_MAGIC;
wol->wolopts = priv->wolopts;
}
mutex_unlock(&priv->lock);
}
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 support = WAKE_MAGIC | WAKE_UCAST;
if (!device_can_wakeup(priv->device))
return -EOPNOTSUPP;
if (!priv->plat->pmt) {
int ret = phylink_ethtool_set_wol(priv->phylink, wol);
if (!ret)
device_set_wakeup_enable(priv->device, !!wol->wolopts);
return ret;
}
/* By default almost all GMAC devices support the WoL via
* magic frame but we can disable it if the HW capability
* register shows no support for pmt_magic_frame. */
if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
wol->wolopts &= ~WAKE_MAGIC;
if (wol->wolopts & ~support)
return -EINVAL;
if (wol->wolopts) {
pr_info("stmmac: wakeup enable\n");
device_set_wakeup_enable(priv->device, 1);
net: stmmac: ethtool: Fixed calltrace caused by unbalanced disable_irq_wake calls We found the following dmesg calltrace when testing the GMAC NIC notebook: [9.448656] ------------[ cut here ]------------ [9.448658] Unbalanced IRQ 43 wake disable [9.448673] WARNING: CPU: 3 PID: 1083 at kernel/irq/manage.c:688 irq_set_irq_wake+0xe0/0x128 [9.448717] CPU: 3 PID: 1083 Comm: ethtool Tainted: G O 4.19 #1 [9.448773] ... [9.448774] Call Trace: [9.448781] [<9000000000209b5c>] show_stack+0x34/0x140 [9.448788] [<9000000000d52700>] dump_stack+0x98/0xd0 [9.448794] [<9000000000228610>] __warn+0xa8/0x120 [9.448797] [<9000000000d2fb60>] report_bug+0x98/0x130 [9.448800] [<900000000020a418>] do_bp+0x248/0x2f0 [9.448805] [<90000000002035f4>] handle_bp_int+0x4c/0x78 [9.448808] [<900000000029ea40>] irq_set_irq_wake+0xe0/0x128 [9.448813] [<9000000000a96a7c>] stmmac_set_wol+0x134/0x150 [9.448819] [<9000000000be6ed0>] dev_ethtool+0x1368/0x2440 [9.448824] [<9000000000c08350>] dev_ioctl+0x1f8/0x3e0 [9.448827] [<9000000000bb2a34>] sock_ioctl+0x2a4/0x450 [9.448832] [<900000000046f044>] do_vfs_ioctl+0xa4/0x738 [9.448834] [<900000000046f778>] ksys_ioctl+0xa0/0xe8 [9.448837] [<900000000046f7d8>] sys_ioctl+0x18/0x28 [9.448840] [<9000000000211ab4>] syscall_common+0x20/0x34 [9.448842] ---[ end trace 40c18d9aec863c3e ]--- Multiple disable_irq_wake() calls will keep decreasing the IRQ wake_depth, When wake_depth is 0, calling disable_irq_wake() again, will report the above calltrace. Due to the need to appear in pairs, we cannot call disable_irq_wake() without calling enable_irq_wake(). Fix this by making sure there are no unbalanced disable_irq_wake() calls. Fixes: 3172d3afa998 ("stmmac: support wake up irq from external sources (v3)") Signed-off-by: Qiang Ma <maqianga@uniontech.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://lore.kernel.org/r/20240112021249.24598-1-maqianga@uniontech.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-01-12 10:12:49 +08:00
/* Avoid unbalanced enable_irq_wake calls */
if (priv->wol_irq_disabled)
enable_irq_wake(priv->wol_irq);
priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(priv->device, 0);
net: stmmac: ethtool: Fixed calltrace caused by unbalanced disable_irq_wake calls We found the following dmesg calltrace when testing the GMAC NIC notebook: [9.448656] ------------[ cut here ]------------ [9.448658] Unbalanced IRQ 43 wake disable [9.448673] WARNING: CPU: 3 PID: 1083 at kernel/irq/manage.c:688 irq_set_irq_wake+0xe0/0x128 [9.448717] CPU: 3 PID: 1083 Comm: ethtool Tainted: G O 4.19 #1 [9.448773] ... [9.448774] Call Trace: [9.448781] [<9000000000209b5c>] show_stack+0x34/0x140 [9.448788] [<9000000000d52700>] dump_stack+0x98/0xd0 [9.448794] [<9000000000228610>] __warn+0xa8/0x120 [9.448797] [<9000000000d2fb60>] report_bug+0x98/0x130 [9.448800] [<900000000020a418>] do_bp+0x248/0x2f0 [9.448805] [<90000000002035f4>] handle_bp_int+0x4c/0x78 [9.448808] [<900000000029ea40>] irq_set_irq_wake+0xe0/0x128 [9.448813] [<9000000000a96a7c>] stmmac_set_wol+0x134/0x150 [9.448819] [<9000000000be6ed0>] dev_ethtool+0x1368/0x2440 [9.448824] [<9000000000c08350>] dev_ioctl+0x1f8/0x3e0 [9.448827] [<9000000000bb2a34>] sock_ioctl+0x2a4/0x450 [9.448832] [<900000000046f044>] do_vfs_ioctl+0xa4/0x738 [9.448834] [<900000000046f778>] ksys_ioctl+0xa0/0xe8 [9.448837] [<900000000046f7d8>] sys_ioctl+0x18/0x28 [9.448840] [<9000000000211ab4>] syscall_common+0x20/0x34 [9.448842] ---[ end trace 40c18d9aec863c3e ]--- Multiple disable_irq_wake() calls will keep decreasing the IRQ wake_depth, When wake_depth is 0, calling disable_irq_wake() again, will report the above calltrace. Due to the need to appear in pairs, we cannot call disable_irq_wake() without calling enable_irq_wake(). Fix this by making sure there are no unbalanced disable_irq_wake() calls. Fixes: 3172d3afa998 ("stmmac: support wake up irq from external sources (v3)") Signed-off-by: Qiang Ma <maqianga@uniontech.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://lore.kernel.org/r/20240112021249.24598-1-maqianga@uniontech.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-01-12 10:12:49 +08:00
/* Avoid unbalanced disable_irq_wake calls */
if (!priv->wol_irq_disabled)
disable_irq_wake(priv->wol_irq);
priv->wol_irq_disabled = true;
}
mutex_lock(&priv->lock);
priv->wolopts = wol->wolopts;
mutex_unlock(&priv->lock);
return 0;
}
static int stmmac_ethtool_op_get_eee(struct net_device *dev,
struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
return phylink_ethtool_get_eee(priv->phylink, edata);
}
static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
return phylink_ethtool_set_eee(priv->phylink, edata);
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
{
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
if (!clk) {
clk = priv->plat->clk_ref_rate;
if (!clk)
return 0;
}
return (usec * (clk / 1000000)) / 256;
}
static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
{
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
if (!clk) {
clk = priv->plat->clk_ref_rate;
if (!clk)
return 0;
}
return (riwt * 256) / (clk / 1000000);
}
static int __stmmac_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
int queue)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 max_cnt;
u32 rx_cnt;
u32 tx_cnt;
rx_cnt = priv->plat->rx_queues_to_use;
tx_cnt = priv->plat->tx_queues_to_use;
max_cnt = max(rx_cnt, tx_cnt);
if (queue < 0)
queue = 0;
else if (queue >= max_cnt)
return -EINVAL;
if (queue < tx_cnt) {
ec->tx_coalesce_usecs = priv->tx_coal_timer[queue];
ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue];
} else {
ec->tx_coalesce_usecs = 0;
ec->tx_max_coalesced_frames = 0;
}
if (priv->use_riwt && queue < rx_cnt) {
ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue];
ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue],
priv);
} else {
ec->rx_max_coalesced_frames = 0;
ec->rx_coalesce_usecs = 0;
}
return 0;
}
static int stmmac_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
return __stmmac_get_coalesce(dev, ec, -1);
}
static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *ec)
{
return __stmmac_get_coalesce(dev, ec, queue);
}
static int __stmmac_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
int queue)
{
struct stmmac_priv *priv = netdev_priv(dev);
bool all_queues = false;
unsigned int rx_riwt;
u32 max_cnt;
u32 rx_cnt;
u32 tx_cnt;
rx_cnt = priv->plat->rx_queues_to_use;
tx_cnt = priv->plat->tx_queues_to_use;
max_cnt = max(rx_cnt, tx_cnt);
if (queue < 0)
all_queues = true;
else if (queue >= max_cnt)
return -EINVAL;
if (priv->use_riwt) {
rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
return -EINVAL;
if (all_queues) {
int i;
for (i = 0; i < rx_cnt; i++) {
priv->rx_riwt[i] = rx_riwt;
stmmac_rx_watchdog(priv, priv->ioaddr,
rx_riwt, i);
priv->rx_coal_frames[i] =
ec->rx_max_coalesced_frames;
}
} else if (queue < rx_cnt) {
priv->rx_riwt[queue] = rx_riwt;
stmmac_rx_watchdog(priv, priv->ioaddr,
rx_riwt, queue);
priv->rx_coal_frames[queue] =
ec->rx_max_coalesced_frames;
}
}
if ((ec->tx_coalesce_usecs == 0) &&
(ec->tx_max_coalesced_frames == 0))
return -EINVAL;
if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) ||
(ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
return -EINVAL;
if (all_queues) {
int i;
for (i = 0; i < tx_cnt; i++) {
priv->tx_coal_frames[i] =
ec->tx_max_coalesced_frames;
priv->tx_coal_timer[i] =
ec->tx_coalesce_usecs;
}
} else if (queue < tx_cnt) {
priv->tx_coal_frames[queue] =
ec->tx_max_coalesced_frames;
priv->tx_coal_timer[queue] =
ec->tx_coalesce_usecs;
}
return 0;
}
static int stmmac_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
return __stmmac_set_coalesce(dev, ec, -1);
}
static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *ec)
{
return __stmmac_set_coalesce(dev, ec, queue);
}
static int stmmac_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
struct stmmac_priv *priv = netdev_priv(dev);
switch (rxnfc->cmd) {
case ETHTOOL_GRXRINGS:
rxnfc->data = priv->plat->rx_queues_to_use;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
return sizeof(priv->rss.key);
}
static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
return ARRAY_SIZE(priv->rss.table);
}
static int stmmac_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
rxfh->indir[i] = priv->rss.table[i];
}
if (rxfh->key)
memcpy(rxfh->key, priv->rss.key, sizeof(priv->rss.key));
rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int stmmac_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
priv->rss.table[i] = rxfh->indir[i];
}
if (rxfh->key)
memcpy(priv->rss.key, rxfh->key, sizeof(priv->rss.key));
return stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
static void stmmac_get_channels(struct net_device *dev,
struct ethtool_channels *chan)
{
struct stmmac_priv *priv = netdev_priv(dev);
chan->rx_count = priv->plat->rx_queues_to_use;
chan->tx_count = priv->plat->tx_queues_to_use;
chan->max_rx = priv->dma_cap.number_rx_queues;
chan->max_tx = priv->dma_cap.number_tx_queues;
}
static int stmmac_set_channels(struct net_device *dev,
struct ethtool_channels *chan)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (chan->rx_count > priv->dma_cap.number_rx_queues ||
chan->tx_count > priv->dma_cap.number_tx_queues ||
!chan->rx_count || !chan->tx_count)
return -EINVAL;
return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count);
}
static int stmmac_get_ts_info(struct net_device *dev,
struct kernel_ethtool_ts_info *info)
{
struct stmmac_priv *priv = netdev_priv(dev);
if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (priv->ptp_clock)
info->phc_index = ptp_clock_index(priv->ptp_clock);
else
info->phc_index = 0;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_ALL));
return 0;
} else
return ethtool_op_get_ts_info(dev, info);
}
static int stmmac_get_mm(struct net_device *ndev,
struct ethtool_mm_state *state)
{
struct stmmac_priv *priv = netdev_priv(ndev);
u32 frag_size;
if (!stmmac_fpe_supported(priv))
return -EOPNOTSUPP;
state->rx_min_frag_size = ETH_ZLEN;
frag_size = stmmac_fpe_get_add_frag_size(priv);
state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
net: ethtool: mm: extract stmmac verification logic into common library It appears that stmmac is not the only hardware which requires a software-driven verification state machine for the MAC Merge layer. While on the one hand it's good to encourage hardware implementations, on the other hand it's quite difficult to tolerate multiple drivers implementing independently fairly non-trivial logic. Extract the hardware-independent logic from stmmac into library code and put it in ethtool. Name the state structure "mmsv" for MAC Merge Software Verification. Let this expose an operations structure for executing the hardware stuff: sync hardware with the tx_active boolean (result of verification process), enable/disable the pMAC, send mPackets, notify library of external events (reception of mPackets), as well as link state changes. Note that it is assumed that the external events are received in hardirq context. If they are not, it is probably a good idea to disable hardirqs when calling ethtool_mmsv_event_handle(), because the library does not do so. Also, the MM software verification process has no business with the tx_min_frag_size, that is all the driver's to handle. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Co-developed-by: Choong Yong Liang <yong.liang.choong@linux.intel.com> Signed-off-by: Choong Yong Liang <yong.liang.choong@linux.intel.com> Tested-by: Choong Yong Liang <yong.liang.choong@linux.intel.com> Tested-by: Furong Xu <0x1207@gmail.com> Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com> Signed-off-by: Faizal Rahim <faizal.abdul.rahim@linux.intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2025-03-17 23:07:30 -04:00
ethtool_mmsv_get_mm(&priv->fpe_cfg.mmsv, state);
return 0;
}
static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(ndev);
u32 frag_size;
int err;
err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
&frag_size, extack);
if (err)
return err;
stmmac_fpe_set_add_frag_size(priv, frag_size);
net: ethtool: mm: extract stmmac verification logic into common library It appears that stmmac is not the only hardware which requires a software-driven verification state machine for the MAC Merge layer. While on the one hand it's good to encourage hardware implementations, on the other hand it's quite difficult to tolerate multiple drivers implementing independently fairly non-trivial logic. Extract the hardware-independent logic from stmmac into library code and put it in ethtool. Name the state structure "mmsv" for MAC Merge Software Verification. Let this expose an operations structure for executing the hardware stuff: sync hardware with the tx_active boolean (result of verification process), enable/disable the pMAC, send mPackets, notify library of external events (reception of mPackets), as well as link state changes. Note that it is assumed that the external events are received in hardirq context. If they are not, it is probably a good idea to disable hardirqs when calling ethtool_mmsv_event_handle(), because the library does not do so. Also, the MM software verification process has no business with the tx_min_frag_size, that is all the driver's to handle. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Co-developed-by: Choong Yong Liang <yong.liang.choong@linux.intel.com> Signed-off-by: Choong Yong Liang <yong.liang.choong@linux.intel.com> Tested-by: Choong Yong Liang <yong.liang.choong@linux.intel.com> Tested-by: Furong Xu <0x1207@gmail.com> Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com> Signed-off-by: Faizal Rahim <faizal.abdul.rahim@linux.intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2025-03-17 23:07:30 -04:00
ethtool_mmsv_set_mm(&priv->fpe_cfg.mmsv, cfg);
return 0;
}
static void stmmac_get_mm_stats(struct net_device *ndev,
struct ethtool_mm_stats *s)
{
struct stmmac_priv *priv = netdev_priv(ndev);
struct stmmac_counters *mmc = &priv->mmc;
if (!priv->dma_cap.rmon)
return;
stmmac_mmc_read(priv, priv->mmcaddr, mmc);
s->MACMergeFrameAssErrorCount = mmc->mmc_rx_packet_assembly_err_cntr;
s->MACMergeFrameAssOkCount = mmc->mmc_rx_packet_assembly_ok_cntr;
s->MACMergeFrameSmdErrorCount = mmc->mmc_rx_packet_smd_err_cntr;
s->MACMergeFragCountRx = mmc->mmc_rx_fpe_fragment_cntr;
s->MACMergeFragCountTx = mmc->mmc_tx_fpe_fragment_cntr;
s->MACMergeHoldCount = mmc->mmc_tx_hold_req_cntr;
}
static const struct ethtool_ops stmmac_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_msglevel = stmmac_ethtool_getmsglevel,
.set_msglevel = stmmac_ethtool_setmsglevel,
.get_regs = stmmac_ethtool_gregs,
.get_regs_len = stmmac_ethtool_get_regs_len,
.get_link = ethtool_op_get_link,
.nway_reset = stmmac_nway_reset,
.get_ringparam = stmmac_get_ringparam,
.set_ringparam = stmmac_set_ringparam,
.get_pauseparam = stmmac_get_pauseparam,
.set_pauseparam = stmmac_set_pauseparam,
.self_test = stmmac_selftest_run,
.get_ethtool_stats = stmmac_get_ethtool_stats,
.get_strings = stmmac_get_strings,
.get_wol = stmmac_get_wol,
.set_wol = stmmac_set_wol,
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
.get_rxnfc = stmmac_get_rxnfc,
.get_rxfh_key_size = stmmac_get_rxfh_key_size,
.get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
.get_rxfh = stmmac_get_rxfh,
.set_rxfh = stmmac_set_rxfh,
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
.get_per_queue_coalesce = stmmac_get_per_queue_coalesce,
.set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
.get_channels = stmmac_get_channels,
.set_channels = stmmac_set_channels,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
.get_mm = stmmac_get_mm,
.set_mm = stmmac_set_mm,
.get_mm_stats = stmmac_get_mm_stats,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &stmmac_ethtool_ops;
}