linux/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c

630 lines
20 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0-only
2016-04-01 11:37:30 +02:00
/*
* This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
* DWC Ether MAC version 4.xx has been used for developing this code.
*
* This contains the functions to handle the dma.
*
* Copyright (C) 2015 STMicroelectronics Ltd
*
* Author: Alexandre Torgue <alexandre.torgue@st.com>
*/
#include <linux/io.h>
#include "dwmac4.h"
#include "dwmac4_dma.h"
#include "stmmac.h"
2016-04-01 11:37:30 +02:00
static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
int i;
pr_info("dwmac4: Master AXI performs %s burst length\n",
(value & DMA_SYS_BUS_FB) ? "fixed" : "any");
if (axi->axi_lpi_en)
value |= DMA_AXI_EN_LPI;
if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM;
value &= ~DMA_AXI_WR_OSR_LMT;
2016-04-01 11:37:30 +02:00
value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
DMA_AXI_WR_OSR_LMT_SHIFT;
value &= ~DMA_AXI_RD_OSR_LMT;
2016-04-01 11:37:30 +02:00
value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
DMA_AXI_RD_OSR_LMT_SHIFT;
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
* set).
*/
for (i = 0; i < AXI_BLEN; i++) {
switch (axi->axi_blen[i]) {
case 256:
value |= DMA_AXI_BLEN256;
break;
case 128:
value |= DMA_AXI_BLEN128;
break;
case 64:
value |= DMA_AXI_BLEN64;
break;
case 32:
value |= DMA_AXI_BLEN32;
break;
case 16:
value |= DMA_AXI_BLEN16;
break;
case 8:
value |= DMA_AXI_BLEN8;
break;
case 4:
value |= DMA_AXI_BLEN4;
break;
}
}
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
static void dwmac4_dma_init_rx_chan(struct stmmac_priv *priv,
void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_rx_phy, u32 chan)
2016-04-01 11:37:30 +02:00
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
2016-04-01 11:37:30 +02:00
u32 value;
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
2016-04-01 11:37:30 +02:00
value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
writel(upper_32_bits(dma_rx_phy),
ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, chan));
writel(lower_32_bits(dma_rx_phy),
ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, chan));
}
2016-04-01 11:37:30 +02:00
static void dwmac4_dma_init_tx_chan(struct stmmac_priv *priv,
void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_tx_phy, u32 chan)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
/* Enable OSP to get best performance */
value |= DMA_CONTROL_OSP;
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
2016-04-01 11:37:30 +02:00
if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
writel(upper_32_bits(dma_tx_phy),
ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, chan));
writel(lower_32_bits(dma_tx_phy),
ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, chan));
}
2016-04-01 11:37:30 +02:00
static void dwmac4_dma_init_channel(struct stmmac_priv *priv,
void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
/* common channel control register config */
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (dma_cfg->pblx8)
value = value | DMA_BUS_MODE_PBL;
writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
2016-04-01 11:37:30 +02:00
/* Mask interrupts by writing to CSR7 */
writel(DMA_CHAN_INTR_DEFAULT_MASK,
ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
2016-04-01 11:37:30 +02:00
}
static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
/* common channel control register config */
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (dma_cfg->pblx8)
value = value | DMA_BUS_MODE_PBL;
writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
/* Mask interrupts by writing to CSR7 */
writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
static void dwmac4_dma_init(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg)
2016-04-01 11:37:30 +02:00
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
/* Set the Fixed burst mode */
if (dma_cfg->fixed_burst)
2016-04-01 11:37:30 +02:00
value |= DMA_SYS_BUS_FB;
/* Mixed Burst has no effect when fb is set */
if (dma_cfg->mixed_burst)
2016-04-01 11:37:30 +02:00
value |= DMA_SYS_BUS_MB;
if (dma_cfg->aal)
2016-04-01 11:37:30 +02:00
value |= DMA_SYS_BUS_AAL;
if (dma_cfg->eame)
value |= DMA_SYS_BUS_EAME;
2016-04-01 11:37:30 +02:00
writel(value, ioaddr + DMA_SYS_BUS_MODE);
value = readl(ioaddr + DMA_BUS_MODE);
if (dma_cfg->multi_msi_en) {
value &= ~DMA_BUS_MODE_INTM_MASK;
value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT);
}
if (dma_cfg->dche)
value |= DMA_BUS_MODE_DCHE;
writel(value, ioaddr + DMA_BUS_MODE);
2016-04-01 11:37:30 +02:00
}
static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv,
void __iomem *ioaddr, u32 channel,
u32 *reg_space)
2016-04-01 11:37:30 +02:00
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
const struct dwmac4_addrs *default_addrs = NULL;
/* Purposely save the registers in the "normal" layout, regardless of
* platform modifications, to keep reg_space size constant
*/
reg_space[DMA_CHAN_CONTROL(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, channel));
reg_space[DMA_CHAN_TX_CONTROL(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_CONTROL(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, channel));
reg_space[DMA_CHAN_TX_BASE_ADDR_HI(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_TX_BASE_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_BASE_ADDR_HI(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_BASE_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_TX_END_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_TX_END_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_END_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_END_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_TX_RING_LEN(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_TX_RING_LEN(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_RING_LEN(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_RING_LEN(dwmac4_addrs, channel));
reg_space[DMA_CHAN_INTR_ENA(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_WATCHDOG(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, channel));
reg_space[DMA_CHAN_SLOT_CTRL_STATUS(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_TX_DESC(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_TX_DESC(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_RX_DESC(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_RX_DESC(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_TX_BUF_ADDR_HI(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_RX_BUF_ADDR_HI(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_STATUS(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, channel));
2016-04-01 11:37:30 +02:00
}
static void dwmac4_dump_dma_regs(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 *reg_space)
2016-04-01 11:37:30 +02:00
{
int i;
for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
_dwmac4_dump_dma_regs(priv, ioaddr, i, reg_space);
2016-04-01 11:37:30 +02:00
}
static void dwmac4_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 riwt, u32 queue)
2016-04-01 11:37:30 +02:00
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, queue));
2016-04-01 11:37:30 +02:00
}
static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
2016-04-01 11:37:30 +02:00
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
unsigned int rqs = fifosz / 256 - 1;
u32 mtl_rx_op;
2016-04-01 11:37:30 +02:00
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
2016-04-01 11:37:30 +02:00
if (mode == SF_DMA_MODE) {
2016-04-01 11:37:30 +02:00
pr_debug("GMAC: enable RX store and forward mode\n");
mtl_rx_op |= MTL_OP_MODE_RSF;
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
2016-04-01 11:37:30 +02:00
mtl_rx_op &= ~MTL_OP_MODE_RSF;
mtl_rx_op &= ~MTL_OP_MODE_RTC_MASK;
if (mode <= 32)
2016-04-01 11:37:30 +02:00
mtl_rx_op |= MTL_OP_MODE_RTC_32;
else if (mode <= 64)
2016-04-01 11:37:30 +02:00
mtl_rx_op |= MTL_OP_MODE_RTC_64;
else if (mode <= 96)
2016-04-01 11:37:30 +02:00
mtl_rx_op |= MTL_OP_MODE_RTC_96;
else
mtl_rx_op |= MTL_OP_MODE_RTC_128;
}
mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
/* Enable flow control only if each channel gets 4 KiB or more FIFO and
* only if channel is not an AVB channel.
*/
if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
unsigned int rfd, rfa;
mtl_rx_op |= MTL_OP_MODE_EHFC;
/* Set Threshold for Activating Flow Control to min 2 frames,
* i.e. 1500 * 2 = 3000 bytes.
*
* Set Threshold for Deactivating Flow Control to min 1 frame,
* i.e. 1500 bytes.
*/
switch (fifosz) {
case 4096:
/* This violates the above formula because of FIFO size
* limit therefore overflow may occur in spite of this.
*/
rfd = 0x03; /* Full-2.5K */
rfa = 0x01; /* Full-1.5K */
break;
default:
rfd = 0x07; /* Full-4.5K */
rfa = 0x04; /* Full-3K */
break;
}
mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
}
writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
2016-04-01 11:37:30 +02:00
}
static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
2016-04-01 11:37:30 +02:00
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs,
channel));
unsigned int tqs = fifosz / 256 - 1;
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
mtl_tx_op |= MTL_OP_MODE_TSF;
} else {
pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
mtl_tx_op &= ~MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
if (mode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
else if (mode <= 64)
mtl_tx_op |= MTL_OP_MODE_TTC_64;
else if (mode <= 96)
mtl_tx_op |= MTL_OP_MODE_TTC_96;
else if (mode <= 128)
mtl_tx_op |= MTL_OP_MODE_TTC_128;
else if (mode <= 192)
mtl_tx_op |= MTL_OP_MODE_TTC_192;
else if (mode <= 256)
mtl_tx_op |= MTL_OP_MODE_TTC_256;
else if (mode <= 384)
mtl_tx_op |= MTL_OP_MODE_TTC_384;
else
mtl_tx_op |= MTL_OP_MODE_TTC_512;
}
/* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
* with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
* For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
* with reset values: TXQEN off, TQS 256 bytes.
*
* TXQEN must be written for multi-channel operation and TQS must
* reflect the available fifo size per queue (total fifo size / number
* of enabled queues).
*/
mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
if (qmode != MTL_QUEUE_AVB)
mtl_tx_op |= MTL_OP_MODE_TXQEN;
else
mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel));
2016-04-01 11:37:30 +02:00
}
static int dwmac4_get_hw_feature(void __iomem *ioaddr,
struct dma_features *dma_cap)
2016-04-01 11:37:30 +02:00
{
u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
/* MAC HW feature0 */
dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
2016-04-01 11:37:30 +02:00
dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
/* MMC */
dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
/* IEEE 1588-2008 */
dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
/* 802.3az - Energy-Efficient Ethernet (EEE) */
dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
/* TX and RX csum */
dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27;
dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9;
2016-04-01 11:37:30 +02:00
/* MAC HW feature1 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27;
dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
2016-04-01 11:37:30 +02:00
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17;
dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14;
switch (dma_cap->addr64) {
case 0:
dma_cap->addr64 = 32;
break;
case 1:
dma_cap->addr64 = 40;
break;
case 2:
dma_cap->addr64 = 48;
break;
default:
dma_cap->addr64 = 32;
break;
}
/* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
* shifting and store the sizes in bytes.
*/
dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
2016-04-01 11:37:30 +02:00
/* MAC HW feature2 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
/* TX and RX number of channels */
dma_cap->number_rx_channel =
((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
dma_cap->number_tx_channel =
((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
/* TX and RX number of queues */
dma_cap->number_rx_queues =
((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1;
dma_cap->number_tx_queues =
((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1;
/* PPS output */
dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24;
2016-04-01 11:37:30 +02:00
/* IEEE 1588-2002 */
dma_cap->time_stamp = 0;
/* Number of Auxiliary Snapshot Inputs */
dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28;
/* MAC HW feature3 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE3);
/* 5.10 Features */
dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27;
dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26;
dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20;
dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17;
dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16;
dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
return 0;
2016-04-01 11:37:30 +02:00
}
/* Enable/disable TSO feature and set MSS */
static void dwmac4_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr,
bool en, u32 chan)
2016-04-01 11:37:30 +02:00
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
2016-04-01 11:37:30 +02:00
u32 value;
if (en) {
/* enable TSO */
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
2016-04-01 11:37:30 +02:00
writel(value | DMA_CONTROL_TSE,
ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
2016-04-01 11:37:30 +02:00
} else {
/* enable TSO */
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
2016-04-01 11:37:30 +02:00
writel(value & ~DMA_CONTROL_TSE,
ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
2016-04-01 11:37:30 +02:00
}
}
static void dwmac4_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 channel, u8 qmode)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs,
channel));
mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
if (qmode != MTL_QUEUE_AVB)
mtl_tx_op |= MTL_OP_MODE_TXQEN;
else
mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel));
}
static void dwmac4_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
int bfsize, u32 chan)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value &= ~DMA_RBSZ_MASK;
value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
}
static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
bool en, u32 chan)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + GMAC_EXT_CONFIG);
value &= ~GMAC_CONFIG_HDSMS;
value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
writel(value, ioaddr + GMAC_EXT_CONFIG);
net: stmmac: Programming sequence for VLAN packets with split header Currently reset state configuration of split header works fine for non-tagged packets and we see no corruption in payload of any size We need additional programming sequence with reset configuration to handle VLAN tagged packets to avoid corruption in payload for packets of size greater than 256 bytes. Without this change ping application complains about corruption in payload when the size of the VLAN packet exceeds 256 bytes. With this change tagged and non-tagged packets of any size works fine and there is no corruption seen. Current configuration which has the issue for VLAN packet ---------------------------------------------------------- Split happens at the position at Layer 3 header |MAC-DA|MAC-SA|Vlan Tag|Ether type|IP header|IP data|Rest of the payload| 2 bytes ^ | With the fix we are making sure that the split happens now at Layer 2 which is end of ethernet header and start of IP payload Ip traffic split ----------------- Bits which take care of this are SPLM and SPLOFST SPLM = Split mode is set to Layer 2 SPLOFST = These bits indicate the value of offset from the beginning of Length/Type field at which header split should take place when the appropriate SPLM is selected. Reset value is 2bytes. Un-tagged data (without VLAN) |MAC-DA|MAC-SA|Ether type|IP header|IP data|Rest of the payload| 2bytes ^ | Tagged data (with VLAN) |MAC-DA|MAC-SA|VLAN Tag|Ether type|IP header|IP data|Rest of the payload| 2bytes ^ | Non-IP traffic split such AV packet ------------------------------------ Bits which take care of this are SAVE = Split AV Enable SAVO = Split AV Offset, similar to SPLOFST but this is for AVTP packets. |Preamble|MAC-DA|MAC-SA|VLAN tag|Ether type|IEEE 1722 payload|CRC| 2bytes ^ | Signed-off-by: Abhishek Chauhan <quic_abchauha@quicinc.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/20241016234313.3992214-1-quic_abchauha@quicinc.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-10-16 16:43:13 -07:00
value = readl(ioaddr + GMAC_EXT_CFG1);
value |= GMAC_CONFIG1_SPLM(1); /* Split mode set to L2OFST */
value |= GMAC_CONFIG1_SAVE_EN; /* Enable Split AV mode */
writel(value, ioaddr + GMAC_EXT_CFG1);
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (en)
value |= DMA_CONTROL_SPH;
else
value &= ~DMA_CONTROL_SPH;
writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
}
static int dwmac4_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr,
bool en, u32 chan)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
if (en)
value |= DMA_CONTROL_EDSE;
else
value &= ~DMA_CONTROL_EDSE;
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs,
chan)) & DMA_CONTROL_EDSE;
if (en && !value)
return -EIO;
writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL);
return 0;
}
2016-04-01 11:37:30 +02:00
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
.init_chan = dwmac4_dma_init_channel,
.init_rx_chan = dwmac4_dma_init_rx_chan,
.init_tx_chan = dwmac4_dma_init_tx_chan,
2016-04-01 11:37:30 +02:00
.axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs,
.dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
.dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
2016-04-01 11:37:30 +02:00
.enable_dma_irq = dwmac4_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx,
.stop_tx = dwmac4_dma_stop_tx,
.start_rx = dwmac4_dma_start_rx,
.stop_rx = dwmac4_dma_stop_rx,
.dma_interrupt = dwmac4_dma_interrupt,
.get_hw_feature = dwmac4_get_hw_feature,
.rx_watchdog = dwmac4_rx_watchdog,
.set_rx_ring_len = dwmac4_set_rx_ring_len,
.set_tx_ring_len = dwmac4_set_tx_ring_len,
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
.enable_sph = dwmac4_enable_sph,
2016-04-01 11:37:30 +02:00
};
const struct stmmac_dma_ops dwmac410_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
.init_chan = dwmac410_dma_init_channel,
.init_rx_chan = dwmac4_dma_init_rx_chan,
.init_tx_chan = dwmac4_dma_init_tx_chan,
2016-04-01 11:37:30 +02:00
.axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs,
.dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
.dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
2016-04-01 11:37:30 +02:00
.enable_dma_irq = dwmac410_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx,
.stop_tx = dwmac4_dma_stop_tx,
.start_rx = dwmac4_dma_start_rx,
.stop_rx = dwmac4_dma_stop_rx,
.dma_interrupt = dwmac4_dma_interrupt,
.get_hw_feature = dwmac4_get_hw_feature,
.rx_watchdog = dwmac4_rx_watchdog,
.set_rx_ring_len = dwmac4_set_rx_ring_len,
.set_tx_ring_len = dwmac4_set_tx_ring_len,
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
.enable_sph = dwmac4_enable_sph,
.enable_tbs = dwmac4_enable_tbs,
2016-04-01 11:37:30 +02:00
};