2019-05-29 07:18:05 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2010-01-06 23:07:18 +00:00
|
|
|
/*******************************************************************************
|
|
|
|
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
|
|
|
|
|
|
|
|
|
|
|
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
|
|
|
*******************************************************************************/
|
|
|
|
|
|
|
|
#include <linux/io.h>
|
2017-02-08 09:31:13 +01:00
|
|
|
#include <linux/iopoll.h>
|
2010-01-06 23:07:18 +00:00
|
|
|
#include "common.h"
|
|
|
|
#include "dwmac_dma.h"
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
#include "stmmac.h"
|
2010-01-06 23:07:18 +00:00
|
|
|
|
2012-05-13 22:18:41 +00:00
|
|
|
#define GMAC_HI_REG_AE 0x80000000
|
|
|
|
|
2016-02-29 14:27:27 +01:00
|
|
|
int dwmac_dma_reset(void __iomem *ioaddr)
|
|
|
|
{
|
|
|
|
u32 value = readl(ioaddr + DMA_BUS_MODE);
|
|
|
|
|
|
|
|
/* DMA SW reset */
|
|
|
|
value |= DMA_BUS_MODE_SFT_RESET;
|
|
|
|
writel(value, ioaddr + DMA_BUS_MODE);
|
2017-02-08 09:31:13 +01:00
|
|
|
|
2020-03-19 21:16:38 +08:00
|
|
|
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
|
2017-02-08 09:31:13 +01:00
|
|
|
!(value & DMA_BUS_MODE_SFT_RESET),
|
2020-11-13 09:09:02 +08:00
|
|
|
10000, 200000);
|
2016-02-29 14:27:27 +01:00
|
|
|
}
|
|
|
|
|
2010-01-06 23:07:18 +00:00
|
|
|
/* CSR1 enables the transmit DMA to check for new descriptor */
|
2010-08-23 20:40:42 +00:00
|
|
|
void dwmac_enable_dma_transmission(void __iomem *ioaddr)
|
2010-01-06 23:07:18 +00:00
|
|
|
{
|
|
|
|
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
|
|
|
|
}
|
|
|
|
|
2023-04-11 15:04:05 -05:00
|
|
|
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan, bool rx, bool tx)
|
2010-01-06 23:07:18 +00:00
|
|
|
{
|
2019-12-18 11:24:44 +01:00
|
|
|
u32 value = readl(ioaddr + DMA_INTR_ENA);
|
|
|
|
|
|
|
|
if (rx)
|
|
|
|
value |= DMA_INTR_DEFAULT_RX;
|
|
|
|
if (tx)
|
|
|
|
value |= DMA_INTR_DEFAULT_TX;
|
|
|
|
|
|
|
|
writel(value, ioaddr + DMA_INTR_ENA);
|
2010-01-06 23:07:18 +00:00
|
|
|
}
|
|
|
|
|
2023-04-11 15:04:05 -05:00
|
|
|
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan, bool rx, bool tx)
|
2010-01-06 23:07:18 +00:00
|
|
|
{
|
2019-12-18 11:24:44 +01:00
|
|
|
u32 value = readl(ioaddr + DMA_INTR_ENA);
|
|
|
|
|
|
|
|
if (rx)
|
|
|
|
value &= ~DMA_INTR_DEFAULT_RX;
|
|
|
|
if (tx)
|
|
|
|
value &= ~DMA_INTR_DEFAULT_TX;
|
|
|
|
|
|
|
|
writel(value, ioaddr + DMA_INTR_ENA);
|
2010-01-06 23:07:18 +00:00
|
|
|
}
|
|
|
|
|
2023-04-11 15:04:05 -05:00
|
|
|
void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan)
|
2010-01-06 23:07:18 +00:00
|
|
|
{
|
|
|
|
u32 value = readl(ioaddr + DMA_CONTROL);
|
|
|
|
value |= DMA_CONTROL_ST;
|
|
|
|
writel(value, ioaddr + DMA_CONTROL);
|
|
|
|
}
|
|
|
|
|
2023-04-11 15:04:05 -05:00
|
|
|
void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
|
2010-01-06 23:07:18 +00:00
|
|
|
{
|
|
|
|
u32 value = readl(ioaddr + DMA_CONTROL);
|
|
|
|
value &= ~DMA_CONTROL_ST;
|
|
|
|
writel(value, ioaddr + DMA_CONTROL);
|
|
|
|
}
|
|
|
|
|
2023-04-11 15:04:05 -05:00
|
|
|
void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
|
|
|
|
u32 chan)
|
2010-01-06 23:07:18 +00:00
|
|
|
{
|
|
|
|
u32 value = readl(ioaddr + DMA_CONTROL);
|
|
|
|
value |= DMA_CONTROL_SR;
|
|
|
|
writel(value, ioaddr + DMA_CONTROL);
|
|
|
|
}
|
|
|
|
|
2023-04-11 15:04:05 -05:00
|
|
|
void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
|
2010-01-06 23:07:18 +00:00
|
|
|
{
|
|
|
|
u32 value = readl(ioaddr + DMA_CONTROL);
|
|
|
|
value &= ~DMA_CONTROL_SR;
|
|
|
|
writel(value, ioaddr + DMA_CONTROL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef DWMAC_DMA_DEBUG
|
|
|
|
static void show_tx_process_state(unsigned int status)
|
|
|
|
{
|
|
|
|
unsigned int state;
|
|
|
|
state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
|
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case 0:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- TX (Stopped): Reset or Stop command\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2017-02-08 09:31:08 +01:00
|
|
|
pr_debug("- TX (Running): Fetching the Tx desc\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- TX (Running): Waiting for end of tx\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- TX (Running): Reading the data "
|
2010-01-06 23:07:18 +00:00
|
|
|
"and queuing the data into the Tx buf\n");
|
|
|
|
break;
|
|
|
|
case 6:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- TX (Suspended): Tx Buff Underflow "
|
2010-01-06 23:07:18 +00:00
|
|
|
"or an unavailable Transmit descriptor\n");
|
|
|
|
break;
|
|
|
|
case 7:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- TX (Running): Closing Tx descriptor\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void show_rx_process_state(unsigned int status)
|
|
|
|
{
|
|
|
|
unsigned int state;
|
|
|
|
state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
|
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case 0:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- RX (Stopped): Reset or Stop command\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- RX (Running): Fetching the Rx desc\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2017-02-08 09:31:08 +01:00
|
|
|
pr_debug("- RX (Running): Checking for end of pkt\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- RX (Running): Waiting for Rx pkt\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
case 4:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- RX (Suspended): Unavailable Rx buf\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
case 5:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- RX (Running): Closing Rx descriptor\n");
|
2010-01-06 23:07:18 +00:00
|
|
|
break;
|
|
|
|
case 6:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- RX(Running): Flushing the current frame"
|
2010-01-06 23:07:18 +00:00
|
|
|
" from the Rx buf\n");
|
|
|
|
break;
|
|
|
|
case 7:
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_debug("- RX (Running): Queuing the Rx frame"
|
2010-01-06 23:07:18 +00:00
|
|
|
" from the Rx buf into memory\n");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-04-11 15:04:05 -05:00
|
|
|
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
|
2021-03-26 01:39:12 +08:00
|
|
|
struct stmmac_extra_stats *x, u32 chan, u32 dir)
|
2010-01-06 23:07:18 +00:00
|
|
|
{
|
net: stmmac: protect updates of 64-bit statistics counters
As explained by a comment in <linux/u64_stats_sync.h>, write side of struct
u64_stats_sync must ensure mutual exclusion, or one seqcount update could
be lost on 32-bit platforms, thus blocking readers forever. Such lockups
have been observed in real world after stmmac_xmit() on one CPU raced with
stmmac_napi_poll_tx() on another CPU.
To fix the issue without introducing a new lock, split the statics into
three parts:
1. fields updated only under the tx queue lock,
2. fields updated only during NAPI poll,
3. fields updated only from interrupt context,
Updates to fields in the first two groups are already serialized through
other locks. It is sufficient to split the existing struct u64_stats_sync
so that each group has its own.
Note that tx_set_ic_bit is updated from both contexts. Split this counter
so that each context gets its own, and calculate their sum to get the total
value in stmmac_get_ethtool_stats().
For the third group, multiple interrupts may be processed by different CPUs
at the same time, but interrupts on the same CPU will not nest. Move fields
from this group to a newly created per-cpu struct stmmac_pcpu_stats.
Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary")
Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/
Cc: stable@vger.kernel.org
Signed-off-by: Petr Tesarik <petr@tesarici.cz>
Reviewed-by: Jisheng Zhang <jszhang@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
|
|
|
struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
|
2010-01-06 23:07:18 +00:00
|
|
|
int ret = 0;
|
|
|
|
/* read the status register (CSR5) */
|
|
|
|
u32 intr_status = readl(ioaddr + DMA_STATUS);
|
|
|
|
|
|
|
|
#ifdef DWMAC_DMA_DEBUG
|
2013-07-02 14:12:36 +02:00
|
|
|
/* Enable it to monitor DMA rx/tx status in case of critical problems */
|
|
|
|
pr_debug("%s: [CSR5: 0x%08x]\n", __func__, intr_status);
|
2010-01-06 23:07:18 +00:00
|
|
|
show_tx_process_state(intr_status);
|
|
|
|
show_rx_process_state(intr_status);
|
|
|
|
#endif
|
2021-03-26 01:39:12 +08:00
|
|
|
|
|
|
|
if (dir == DMA_DIR_RX)
|
|
|
|
intr_status &= DMA_STATUS_MSK_RX;
|
|
|
|
else if (dir == DMA_DIR_TX)
|
|
|
|
intr_status &= DMA_STATUS_MSK_TX;
|
|
|
|
|
2010-01-06 23:07:18 +00:00
|
|
|
/* ABNORMAL interrupts */
|
|
|
|
if (unlikely(intr_status & DMA_STATUS_AIS)) {
|
|
|
|
if (unlikely(intr_status & DMA_STATUS_UNF)) {
|
|
|
|
ret = tx_hard_error_bump_tc;
|
|
|
|
x->tx_undeflow_irq++;
|
|
|
|
}
|
2013-07-02 14:12:36 +02:00
|
|
|
if (unlikely(intr_status & DMA_STATUS_TJT))
|
2010-01-06 23:07:18 +00:00
|
|
|
x->tx_jabber_irq++;
|
2013-07-02 14:12:36 +02:00
|
|
|
|
|
|
|
if (unlikely(intr_status & DMA_STATUS_OVF))
|
2010-01-06 23:07:18 +00:00
|
|
|
x->rx_overflow_irq++;
|
2013-07-02 14:12:36 +02:00
|
|
|
|
|
|
|
if (unlikely(intr_status & DMA_STATUS_RU))
|
2010-01-06 23:07:18 +00:00
|
|
|
x->rx_buf_unav_irq++;
|
2013-07-02 14:12:36 +02:00
|
|
|
if (unlikely(intr_status & DMA_STATUS_RPS))
|
2010-01-06 23:07:18 +00:00
|
|
|
x->rx_process_stopped_irq++;
|
2013-07-02 14:12:36 +02:00
|
|
|
if (unlikely(intr_status & DMA_STATUS_RWT))
|
2010-01-06 23:07:18 +00:00
|
|
|
x->rx_watchdog_irq++;
|
2013-07-02 14:12:36 +02:00
|
|
|
if (unlikely(intr_status & DMA_STATUS_ETI))
|
2010-01-06 23:07:18 +00:00
|
|
|
x->tx_early_irq++;
|
|
|
|
if (unlikely(intr_status & DMA_STATUS_TPS)) {
|
|
|
|
x->tx_process_stopped_irq++;
|
|
|
|
ret = tx_hard_error;
|
|
|
|
}
|
|
|
|
if (unlikely(intr_status & DMA_STATUS_FBI)) {
|
|
|
|
x->fatal_bus_error_irq++;
|
|
|
|
ret = tx_hard_error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* TX/RX NORMAL interrupts */
|
2012-11-25 23:10:43 +00:00
|
|
|
if (likely(intr_status & DMA_STATUS_NIS)) {
|
|
|
|
if (likely(intr_status & DMA_STATUS_RI)) {
|
|
|
|
u32 value = readl(ioaddr + DMA_INTR_ENA);
|
|
|
|
/* to schedule NAPI on real RIE event. */
|
|
|
|
if (likely(value & DMA_INTR_ENA_RIE)) {
|
net: stmmac: protect updates of 64-bit statistics counters
As explained by a comment in <linux/u64_stats_sync.h>, write side of struct
u64_stats_sync must ensure mutual exclusion, or one seqcount update could
be lost on 32-bit platforms, thus blocking readers forever. Such lockups
have been observed in real world after stmmac_xmit() on one CPU raced with
stmmac_napi_poll_tx() on another CPU.
To fix the issue without introducing a new lock, split the statics into
three parts:
1. fields updated only under the tx queue lock,
2. fields updated only during NAPI poll,
3. fields updated only from interrupt context,
Updates to fields in the first two groups are already serialized through
other locks. It is sufficient to split the existing struct u64_stats_sync
so that each group has its own.
Note that tx_set_ic_bit is updated from both contexts. Split this counter
so that each context gets its own, and calculate their sum to get the total
value in stmmac_get_ethtool_stats().
For the third group, multiple interrupts may be processed by different CPUs
at the same time, but interrupts on the same CPU will not nest. Move fields
from this group to a newly created per-cpu struct stmmac_pcpu_stats.
Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary")
Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/
Cc: stable@vger.kernel.org
Signed-off-by: Petr Tesarik <petr@tesarici.cz>
Reviewed-by: Jisheng Zhang <jszhang@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
|
|
|
u64_stats_update_begin(&stats->syncp);
|
|
|
|
u64_stats_inc(&stats->rx_normal_irq_n[chan]);
|
|
|
|
u64_stats_update_end(&stats->syncp);
|
2012-11-25 23:10:43 +00:00
|
|
|
ret |= handle_rx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (likely(intr_status & DMA_STATUS_TI)) {
|
net: stmmac: protect updates of 64-bit statistics counters
As explained by a comment in <linux/u64_stats_sync.h>, write side of struct
u64_stats_sync must ensure mutual exclusion, or one seqcount update could
be lost on 32-bit platforms, thus blocking readers forever. Such lockups
have been observed in real world after stmmac_xmit() on one CPU raced with
stmmac_napi_poll_tx() on another CPU.
To fix the issue without introducing a new lock, split the statics into
three parts:
1. fields updated only under the tx queue lock,
2. fields updated only during NAPI poll,
3. fields updated only from interrupt context,
Updates to fields in the first two groups are already serialized through
other locks. It is sufficient to split the existing struct u64_stats_sync
so that each group has its own.
Note that tx_set_ic_bit is updated from both contexts. Split this counter
so that each context gets its own, and calculate their sum to get the total
value in stmmac_get_ethtool_stats().
For the third group, multiple interrupts may be processed by different CPUs
at the same time, but interrupts on the same CPU will not nest. Move fields
from this group to a newly created per-cpu struct stmmac_pcpu_stats.
Fixes: 133466c3bbe1 ("net: stmmac: use per-queue 64 bit statistics where necessary")
Link: https://lore.kernel.org/netdev/Za173PhviYg-1qIn@torres.zugschlus.de/t/
Cc: stable@vger.kernel.org
Signed-off-by: Petr Tesarik <petr@tesarici.cz>
Reviewed-by: Jisheng Zhang <jszhang@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2024-02-03 20:09:27 +01:00
|
|
|
u64_stats_update_begin(&stats->syncp);
|
|
|
|
u64_stats_inc(&stats->tx_normal_irq_n[chan]);
|
|
|
|
u64_stats_update_end(&stats->syncp);
|
2012-11-25 23:10:42 +00:00
|
|
|
ret |= handle_tx;
|
2012-11-25 23:10:43 +00:00
|
|
|
}
|
|
|
|
if (unlikely(intr_status & DMA_STATUS_ERI))
|
|
|
|
x->rx_early_irq++;
|
2010-01-06 23:07:18 +00:00
|
|
|
}
|
|
|
|
/* Optional hardware blocks, interrupts should be disabled */
|
|
|
|
if (unlikely(intr_status &
|
|
|
|
(DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
|
2013-07-02 14:12:36 +02:00
|
|
|
pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
|
2012-11-25 23:10:43 +00:00
|
|
|
|
2010-01-06 23:07:18 +00:00
|
|
|
/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
|
|
|
|
writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-08-23 20:40:42 +00:00
|
|
|
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
|
2010-04-13 20:21:13 +00:00
|
|
|
{
|
|
|
|
u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
|
|
|
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
|
|
|
|
|
|
|
|
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
|
|
|
|
}
|
2010-01-06 23:07:18 +00:00
|
|
|
|
2021-10-14 07:24:31 -07:00
|
|
|
void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
|
2010-01-06 23:07:18 +00:00
|
|
|
unsigned int high, unsigned int low)
|
|
|
|
{
|
|
|
|
unsigned long data;
|
|
|
|
|
|
|
|
data = (addr[5] << 8) | addr[4];
|
2017-02-08 09:31:08 +01:00
|
|
|
/* For MAC Addr registers we have to set the Address Enable (AE)
|
2012-05-13 22:18:41 +00:00
|
|
|
* bit that has no effect on the High Reg 0 where the bit 31 (MO)
|
|
|
|
* is RO.
|
|
|
|
*/
|
|
|
|
writel(data | GMAC_HI_REG_AE, ioaddr + high);
|
2010-01-06 23:07:18 +00:00
|
|
|
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
|
|
|
|
writel(data, ioaddr + low);
|
|
|
|
}
|
2017-05-31 09:18:32 +02:00
|
|
|
EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
|
2010-01-06 23:07:18 +00:00
|
|
|
|
2011-12-21 03:58:19 +00:00
|
|
|
/* Enable disable MAC RX/TX */
|
|
|
|
void stmmac_set_mac(void __iomem *ioaddr, bool enable)
|
|
|
|
{
|
2022-08-24 22:34:49 +02:00
|
|
|
u32 old_val, value;
|
|
|
|
|
|
|
|
old_val = readl(ioaddr + MAC_CTRL_REG);
|
|
|
|
value = old_val;
|
2011-12-21 03:58:19 +00:00
|
|
|
|
|
|
|
if (enable)
|
2017-02-08 09:31:06 +01:00
|
|
|
value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
|
2011-12-21 03:58:19 +00:00
|
|
|
else
|
2017-02-08 09:31:06 +01:00
|
|
|
value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
|
2011-12-21 03:58:19 +00:00
|
|
|
|
2022-08-24 22:34:49 +02:00
|
|
|
if (value != old_val)
|
|
|
|
writel(value, ioaddr + MAC_CTRL_REG);
|
2011-12-21 03:58:19 +00:00
|
|
|
}
|
|
|
|
|
2010-08-23 20:40:42 +00:00
|
|
|
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
|
2010-01-06 23:07:18 +00:00
|
|
|
unsigned int high, unsigned int low)
|
|
|
|
{
|
|
|
|
unsigned int hi_addr, lo_addr;
|
|
|
|
|
|
|
|
/* Read the MAC address from the hardware */
|
|
|
|
hi_addr = readl(ioaddr + high);
|
|
|
|
lo_addr = readl(ioaddr + low);
|
|
|
|
|
|
|
|
/* Extract the MAC address from the high and low words */
|
|
|
|
addr[0] = lo_addr & 0xff;
|
|
|
|
addr[1] = (lo_addr >> 8) & 0xff;
|
|
|
|
addr[2] = (lo_addr >> 16) & 0xff;
|
|
|
|
addr[3] = (lo_addr >> 24) & 0xff;
|
|
|
|
addr[4] = hi_addr & 0xff;
|
|
|
|
addr[5] = (hi_addr >> 8) & 0xff;
|
|
|
|
}
|
2017-05-31 09:18:32 +02:00
|
|
|
EXPORT_SYMBOL_GPL(stmmac_get_mac_addr);
|