2019-06-01 10:08:37 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-04-01 11:37:28 +02:00
|
|
|
/*
|
|
|
|
* This contains the functions to handle the descriptors for DesignWare databook
|
|
|
|
* 4.xx.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2015 STMicroelectronics Ltd
|
|
|
|
*
|
|
|
|
* Author: Alexandre Torgue <alexandre.torgue@st.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/stmmac.h>
|
|
|
|
#include "common.h"
|
2020-01-13 17:24:12 +01:00
|
|
|
#include "dwmac4.h"
|
2016-04-01 11:37:28 +02:00
|
|
|
#include "dwmac4_descs.h"
|
|
|
|
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
|
2016-04-01 11:37:28 +02:00
|
|
|
struct dma_desc *p,
|
|
|
|
void __iomem *ioaddr)
|
|
|
|
{
|
|
|
|
unsigned int tdes3;
|
|
|
|
int ret = tx_done;
|
|
|
|
|
2016-11-14 18:58:05 +01:00
|
|
|
tdes3 = le32_to_cpu(p->des3);
|
2016-04-01 11:37:28 +02:00
|
|
|
|
|
|
|
/* Get tx owner first */
|
|
|
|
if (unlikely(tdes3 & TDES3_OWN))
|
|
|
|
return tx_dma_own;
|
|
|
|
|
|
|
|
/* Verify tx error by looking at the last segment. */
|
|
|
|
if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
|
|
|
|
return tx_not_ls;
|
|
|
|
|
|
|
|
if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
|
2021-12-08 18:06:51 +08:00
|
|
|
ret = tx_err;
|
|
|
|
|
2016-04-01 11:37:28 +02:00
|
|
|
if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
|
|
|
|
x->tx_jabber++;
|
|
|
|
if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
|
|
|
|
x->tx_frame_flushed++;
|
|
|
|
if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
|
|
|
|
x->tx_losscarrier++;
|
|
|
|
}
|
|
|
|
if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
|
|
|
|
x->tx_carrier++;
|
|
|
|
}
|
|
|
|
if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
|
|
|
|
(tdes3 & TDES3_EXCESSIVE_COLLISION)))
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
x->tx_collision +=
|
2016-04-01 11:37:28 +02:00
|
|
|
(tdes3 & TDES3_COLLISION_COUNT_MASK)
|
|
|
|
>> TDES3_COLLISION_COUNT_SHIFT;
|
|
|
|
|
|
|
|
if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
|
|
|
|
x->tx_deferred++;
|
|
|
|
|
2021-12-08 18:06:51 +08:00
|
|
|
if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) {
|
2016-04-01 11:37:28 +02:00
|
|
|
x->tx_underflow++;
|
2021-12-08 18:06:51 +08:00
|
|
|
ret |= tx_err_bump_tc;
|
|
|
|
}
|
2016-04-01 11:37:28 +02:00
|
|
|
|
|
|
|
if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
|
|
|
|
x->tx_ip_header_error++;
|
|
|
|
|
|
|
|
if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
|
|
|
|
x->tx_payload_error++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(tdes3 & TDES3_DEFERRED))
|
|
|
|
x->tx_deferred++;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
|
2016-04-01 11:37:28 +02:00
|
|
|
struct dma_desc *p)
|
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
unsigned int rdes1 = le32_to_cpu(p->des1);
|
|
|
|
unsigned int rdes2 = le32_to_cpu(p->des2);
|
|
|
|
unsigned int rdes3 = le32_to_cpu(p->des3);
|
2016-04-01 11:37:28 +02:00
|
|
|
int message_type;
|
|
|
|
int ret = good_frame;
|
|
|
|
|
|
|
|
if (unlikely(rdes3 & RDES3_OWN))
|
|
|
|
return dma_own;
|
|
|
|
|
2019-11-11 15:42:35 +01:00
|
|
|
if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR))
|
2016-04-01 11:37:28 +02:00
|
|
|
return discard_frame;
|
2019-11-11 15:42:35 +01:00
|
|
|
if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
|
|
|
|
return rx_not_ls;
|
2016-04-01 11:37:28 +02:00
|
|
|
|
|
|
|
if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
|
|
|
|
if (unlikely(rdes3 & RDES3_GIANT_PACKET))
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
x->rx_length++;
|
2016-04-01 11:37:28 +02:00
|
|
|
if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
|
|
|
|
x->rx_gmac_overflow++;
|
|
|
|
|
|
|
|
if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
|
|
|
|
x->rx_watchdog++;
|
|
|
|
|
|
|
|
if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
|
|
|
|
x->rx_mii++;
|
|
|
|
|
net: stmmac: use per-queue 64 bit statistics where necessary
Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.
Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.
To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-07-18 00:06:30 +08:00
|
|
|
if (unlikely(rdes3 & RDES3_CRC_ERROR))
|
2017-02-08 09:31:17 +01:00
|
|
|
x->rx_crc_errors++;
|
2016-04-01 11:37:28 +02:00
|
|
|
|
|
|
|
if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
|
|
|
|
x->dribbling_bit++;
|
|
|
|
|
|
|
|
ret = discard_frame;
|
|
|
|
}
|
|
|
|
|
|
|
|
message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
|
|
|
|
|
|
|
|
if (rdes1 & RDES1_IP_HDR_ERROR)
|
|
|
|
x->ip_hdr_err++;
|
|
|
|
if (rdes1 & RDES1_IP_CSUM_BYPASSED)
|
|
|
|
x->ip_csum_bypassed++;
|
|
|
|
if (rdes1 & RDES1_IPV4_HEADER)
|
|
|
|
x->ipv4_pkt_rcvd++;
|
|
|
|
if (rdes1 & RDES1_IPV6_HEADER)
|
|
|
|
x->ipv6_pkt_rcvd++;
|
net: stmmac: Add DW QoS Eth v4/v5 ip payload error statistics
Add DW QoS Eth v4/v5 ip payload error statistics, and rename descriptor
bit macro because v4/v5 descriptor IPCE bit claims ip checksum
error or TCP/UDP/ICMP segment length error.
Here is bit description from DW QoS Eth data book(Part 19.6.2.2)
bit7 IPCE: IP Payload Error
When this bit is programmed, it indicates either of the following:
1).The 16-bit IP payload checksum (that is, the TCP, UDP, or ICMP
checksum) calculated by the MAC does not match the corresponding
checksum field in the received segment.
2).The TCP, UDP, or ICMP segment length does not match the payload
length value in the IP Header field.
3).The TCP, UDP, or ICMP segment length is less than minimum allowed
segment length for TCP, UDP, or ICMP.
Signed-off-by: Minda Chen <minda.chen@starfivetech.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Reviewed-by: Serge Semin <fancer.lancer@gmail.com>
Link: https://patch.msgid.link/20241008111443.81467-1-minda.chen@starfivetech.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-10-08 19:14:43 +08:00
|
|
|
if (rdes1 & RDES1_IP_PAYLOAD_ERROR)
|
|
|
|
x->ip_payload_err++;
|
2016-11-14 09:27:30 +01:00
|
|
|
|
|
|
|
if (message_type == RDES_EXT_NO_PTP)
|
|
|
|
x->no_ptp_rx_msg_type_ext++;
|
|
|
|
else if (message_type == RDES_EXT_SYNC)
|
|
|
|
x->ptp_rx_msg_type_sync++;
|
2016-04-01 11:37:28 +02:00
|
|
|
else if (message_type == RDES_EXT_FOLLOW_UP)
|
2016-11-14 09:27:30 +01:00
|
|
|
x->ptp_rx_msg_type_follow_up++;
|
2016-04-01 11:37:28 +02:00
|
|
|
else if (message_type == RDES_EXT_DELAY_REQ)
|
2016-11-14 09:27:30 +01:00
|
|
|
x->ptp_rx_msg_type_delay_req++;
|
2016-04-01 11:37:28 +02:00
|
|
|
else if (message_type == RDES_EXT_DELAY_RESP)
|
2016-11-14 09:27:30 +01:00
|
|
|
x->ptp_rx_msg_type_delay_resp++;
|
2016-04-01 11:37:28 +02:00
|
|
|
else if (message_type == RDES_EXT_PDELAY_REQ)
|
2016-11-14 09:27:30 +01:00
|
|
|
x->ptp_rx_msg_type_pdelay_req++;
|
2016-04-01 11:37:28 +02:00
|
|
|
else if (message_type == RDES_EXT_PDELAY_RESP)
|
2016-11-14 09:27:30 +01:00
|
|
|
x->ptp_rx_msg_type_pdelay_resp++;
|
2016-04-01 11:37:28 +02:00
|
|
|
else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
|
2016-11-14 09:27:30 +01:00
|
|
|
x->ptp_rx_msg_type_pdelay_follow_up++;
|
|
|
|
else if (message_type == RDES_PTP_ANNOUNCE)
|
|
|
|
x->ptp_rx_msg_type_announce++;
|
|
|
|
else if (message_type == RDES_PTP_MANAGEMENT)
|
|
|
|
x->ptp_rx_msg_type_management++;
|
|
|
|
else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
|
|
|
|
x->ptp_rx_msg_pkt_reserved_type++;
|
2016-04-01 11:37:28 +02:00
|
|
|
|
|
|
|
if (rdes1 & RDES1_PTP_PACKET_TYPE)
|
|
|
|
x->ptp_frame_type++;
|
|
|
|
if (rdes1 & RDES1_PTP_VER)
|
|
|
|
x->ptp_ver++;
|
|
|
|
if (rdes1 & RDES1_TIMESTAMP_DROPPED)
|
|
|
|
x->timestamp_dropped++;
|
|
|
|
|
|
|
|
if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
|
|
|
|
x->sa_rx_filter_fail++;
|
|
|
|
ret = discard_frame;
|
|
|
|
}
|
|
|
|
if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
|
|
|
|
x->da_rx_filter_fail++;
|
|
|
|
ret = discard_frame;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rdes2 & RDES2_L3_FILTER_MATCH)
|
|
|
|
x->l3_filter_match++;
|
|
|
|
if (rdes2 & RDES2_L4_FILTER_MATCH)
|
|
|
|
x->l4_filter_match++;
|
|
|
|
if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
|
|
|
|
>> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
|
|
|
|
x->l3_l4_filter_no_match++;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dwmac4_rd_get_tx_len(struct dma_desc *p)
|
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dwmac4_get_tx_owner(struct dma_desc *p)
|
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_set_tx_owner(struct dma_desc *p)
|
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
p->des3 |= cpu_to_le32(TDES3_OWN);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
2018-05-18 14:56:07 +01:00
|
|
|
static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
|
2016-04-01 11:37:28 +02:00
|
|
|
{
|
2024-08-31 09:11:14 +08:00
|
|
|
u32 flags = (RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
|
2018-05-18 14:56:07 +01:00
|
|
|
|
|
|
|
if (!disable_rx_ic)
|
2024-08-31 09:11:14 +08:00
|
|
|
flags |= RDES3_INT_ON_COMPLETION_EN;
|
|
|
|
|
|
|
|
p->des3 |= cpu_to_le32(flags);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dwmac4_get_tx_ls(struct dma_desc *p)
|
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
|
|
|
|
>> TDES3_LAST_DESCRIPTOR_SHIFT;
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
2023-11-21 13:38:42 +08:00
|
|
|
static u16 dwmac4_wrback_get_rx_vlan_tci(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
return (le32_to_cpu(p->des0) & RDES0_VLAN_TAG_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool dwmac4_wrback_get_rx_vlan_valid(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
return ((le32_to_cpu(p->des3) & RDES3_LAST_DESCRIPTOR) &&
|
|
|
|
(le32_to_cpu(p->des3) & RDES3_RDES0_VALID));
|
|
|
|
}
|
|
|
|
|
2016-04-01 11:37:28 +02:00
|
|
|
static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
|
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
|
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
|
|
|
|
{
|
2016-11-14 09:27:29 +01:00
|
|
|
/* Context type from W/B descriptor must be zero */
|
2016-11-22 11:29:28 -05:00
|
|
|
if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
|
2017-06-08 23:03:09 +02:00
|
|
|
return 0;
|
2016-11-14 09:27:29 +01:00
|
|
|
|
|
|
|
/* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
|
2016-11-22 11:29:28 -05:00
|
|
|
if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
|
2017-06-08 23:03:09 +02:00
|
|
|
return 1;
|
2016-11-14 09:27:29 +01:00
|
|
|
|
2017-06-08 23:03:09 +02:00
|
|
|
return 0;
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
2018-04-16 16:08:12 +01:00
|
|
|
static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
|
2016-04-01 11:37:28 +02:00
|
|
|
{
|
|
|
|
struct dma_desc *p = (struct dma_desc *)desc;
|
|
|
|
u64 ns;
|
|
|
|
|
2016-11-14 18:58:05 +01:00
|
|
|
ns = le32_to_cpu(p->des0);
|
2016-04-01 11:37:28 +02:00
|
|
|
/* convert high/sec time stamp value to nanosecond */
|
2016-11-14 18:58:05 +01:00
|
|
|
ns += le32_to_cpu(p->des1) * 1000000000ULL;
|
2016-04-01 11:37:28 +02:00
|
|
|
|
2018-04-16 16:08:12 +01:00
|
|
|
*ts = ns;
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
2016-11-14 09:27:29 +01:00
|
|
|
static int dwmac4_rx_check_timestamp(void *desc)
|
|
|
|
{
|
|
|
|
struct dma_desc *p = (struct dma_desc *)desc;
|
2019-02-15 10:49:09 +01:00
|
|
|
unsigned int rdes0 = le32_to_cpu(p->des0);
|
|
|
|
unsigned int rdes1 = le32_to_cpu(p->des1);
|
|
|
|
unsigned int rdes3 = le32_to_cpu(p->des3);
|
2016-11-14 09:27:29 +01:00
|
|
|
u32 own, ctxt;
|
|
|
|
int ret = 1;
|
|
|
|
|
2019-02-15 10:49:09 +01:00
|
|
|
own = rdes3 & RDES3_OWN;
|
|
|
|
ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
|
2016-11-14 09:27:29 +01:00
|
|
|
>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
|
|
|
|
|
|
|
|
if (likely(!own && ctxt)) {
|
2019-02-15 10:49:09 +01:00
|
|
|
if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
|
2016-11-14 09:27:29 +01:00
|
|
|
/* Corrupted value */
|
|
|
|
ret = -EINVAL;
|
|
|
|
else
|
|
|
|
/* A valid Timestamp is ready to be read */
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Timestamp not ready */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-12-18 23:34:00 +01:00
|
|
|
static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
|
|
|
|
u32 ats)
|
2016-04-01 11:37:28 +02:00
|
|
|
{
|
|
|
|
struct dma_desc *p = (struct dma_desc *)desc;
|
2016-11-14 09:27:29 +01:00
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
/* Get the status from normal w/b descriptor */
|
2019-02-14 17:03:44 +01:00
|
|
|
if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) {
|
2016-11-22 11:29:28 -05:00
|
|
|
if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
|
2016-11-14 09:27:29 +01:00
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
/* Check if timestamp is OK from context descriptor */
|
|
|
|
do {
|
2017-12-18 23:34:00 +01:00
|
|
|
ret = dwmac4_rx_check_timestamp(next_desc);
|
2016-11-14 09:27:29 +01:00
|
|
|
if (ret < 0)
|
|
|
|
goto exit;
|
|
|
|
i++;
|
2016-04-01 11:37:28 +02:00
|
|
|
|
2017-10-20 14:37:36 +01:00
|
|
|
} while ((ret == 1) && (i < 10));
|
2016-11-14 09:27:29 +01:00
|
|
|
|
|
|
|
if (i == 10)
|
|
|
|
ret = -EBUSY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
exit:
|
2017-06-08 23:03:09 +02:00
|
|
|
if (likely(ret == 0))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
2019-03-27 22:35:35 +02:00
|
|
|
int mode, int end, int bfsize)
|
2016-04-01 11:37:28 +02:00
|
|
|
{
|
2018-05-18 14:56:07 +01:00
|
|
|
dwmac4_set_rx_owner(p, disable_rx_ic);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
|
|
|
|
{
|
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
|
|
|
p->des2 = 0;
|
|
|
|
p->des3 = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
|
|
|
bool csum_flag, int mode, bool tx_own,
|
2017-04-10 20:33:29 +02:00
|
|
|
bool ls, unsigned int tot_pkt_len)
|
2016-04-01 11:37:28 +02:00
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
unsigned int tdes3 = le32_to_cpu(p->des3);
|
2016-04-01 11:37:28 +02:00
|
|
|
|
2016-11-14 18:58:05 +01:00
|
|
|
p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
|
2016-04-01 11:37:28 +02:00
|
|
|
|
2017-04-10 20:33:29 +02:00
|
|
|
tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
|
2016-04-01 11:37:28 +02:00
|
|
|
if (is_fs)
|
|
|
|
tdes3 |= TDES3_FIRST_DESCRIPTOR;
|
|
|
|
else
|
|
|
|
tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
|
|
|
|
|
|
|
|
if (likely(csum_flag))
|
|
|
|
tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
|
|
|
|
else
|
|
|
|
tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
|
|
|
|
|
|
|
|
if (ls)
|
|
|
|
tdes3 |= TDES3_LAST_DESCRIPTOR;
|
|
|
|
else
|
|
|
|
tdes3 &= ~TDES3_LAST_DESCRIPTOR;
|
|
|
|
|
|
|
|
/* Finally set the OWN bit. Later the DMA will start! */
|
|
|
|
if (tx_own)
|
|
|
|
tdes3 |= TDES3_OWN;
|
|
|
|
|
2018-01-22 16:59:50 +01:00
|
|
|
if (is_fs && tx_own)
|
2016-04-01 11:37:28 +02:00
|
|
|
/* When the own bit, for the first frame, has to be set, all
|
|
|
|
* descriptors for the same frame has to be set before, to
|
|
|
|
* avoid race condition.
|
|
|
|
*/
|
2016-12-18 21:38:12 +01:00
|
|
|
dma_wmb();
|
2016-04-01 11:37:28 +02:00
|
|
|
|
2016-11-14 18:58:05 +01:00
|
|
|
p->des3 = cpu_to_le32(tdes3);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
|
|
|
|
int len1, int len2, bool tx_own,
|
|
|
|
bool ls, unsigned int tcphdrlen,
|
|
|
|
unsigned int tcppayloadlen)
|
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
unsigned int tdes3 = le32_to_cpu(p->des3);
|
2016-04-01 11:37:28 +02:00
|
|
|
|
|
|
|
if (len1)
|
2016-11-14 18:58:05 +01:00
|
|
|
p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
|
2016-04-01 11:37:28 +02:00
|
|
|
|
|
|
|
if (len2)
|
2016-11-14 18:58:05 +01:00
|
|
|
p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
|
|
|
|
& TDES2_BUFFER2_SIZE_MASK);
|
2016-04-01 11:37:28 +02:00
|
|
|
|
|
|
|
if (is_fs) {
|
|
|
|
tdes3 |= TDES3_FIRST_DESCRIPTOR |
|
|
|
|
TDES3_TCP_SEGMENTATION_ENABLE |
|
|
|
|
((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
|
|
|
|
TDES3_SLOT_NUMBER_MASK) |
|
|
|
|
((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
|
|
|
|
} else {
|
|
|
|
tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ls)
|
|
|
|
tdes3 |= TDES3_LAST_DESCRIPTOR;
|
|
|
|
else
|
|
|
|
tdes3 &= ~TDES3_LAST_DESCRIPTOR;
|
|
|
|
|
|
|
|
/* Finally set the OWN bit. Later the DMA will start! */
|
|
|
|
if (tx_own)
|
|
|
|
tdes3 |= TDES3_OWN;
|
|
|
|
|
2018-01-22 16:59:50 +01:00
|
|
|
if (is_fs && tx_own)
|
2016-04-01 11:37:28 +02:00
|
|
|
/* When the own bit, for the first frame, has to be set, all
|
|
|
|
* descriptors for the same frame has to be set before, to
|
|
|
|
* avoid race condition.
|
|
|
|
*/
|
2016-12-18 21:38:12 +01:00
|
|
|
dma_wmb();
|
2016-04-01 11:37:28 +02:00
|
|
|
|
2016-11-14 18:58:05 +01:00
|
|
|
p->des3 = cpu_to_le32(tdes3);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
|
|
|
|
{
|
net: stmmac: make dwmac4_release_tx_desc() clear all descriptor fields
Make dwmac4_release_tx_desc() clear all descriptor fields, not just
TDES2 and TDES3.
I'm suspecting that TDES0 and TDES1 wasn't cleared because the DMA
engine uses them to store the tx hardware timestamp (if PTP is enabled).
However, stmmac_tx_clean() calls stmmac_get_tx_hwtstamp(), which reads
and saves the timestamp, before it calls release_tx_desc(), so this
is not an issue.
stmmac_xmit() and stmmac_tso_xmit() both always overwrite TDES0,
however, stmmac_tso_xmit() sometimes sets TDES1, and since neither
stmmac_xmit() nor stmmac_tso_xmit() explicitly clears TDES1, both
functions might reuse a DMA descriptor with old TDES1 data.
I haven't observed any misbehavior even though TDES1 sometimes
point to an old skb, however, explicitly clearing both TDES0 and TDES1
in dwmac4_release_tx_desc() minimizes the chances of undefined behavior.
Signed-off-by: Niklas Cassel <niklas.cassel@axis.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-02-26 22:47:09 +01:00
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
2016-04-01 11:37:28 +02:00
|
|
|
p->des2 = 0;
|
|
|
|
p->des3 = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
|
|
|
|
{
|
2016-11-14 18:58:05 +01:00
|
|
|
p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
2021-02-25 17:01:12 +08:00
|
|
|
static void dwmac4_display_ring(void *head, unsigned int size, bool rx,
|
|
|
|
dma_addr_t dma_rx_phy, unsigned int desc_size)
|
2016-04-01 11:37:28 +02:00
|
|
|
{
|
2021-02-25 17:01:12 +08:00
|
|
|
dma_addr_t dma_addr;
|
2016-04-01 11:37:28 +02:00
|
|
|
int i;
|
|
|
|
|
|
|
|
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
|
|
|
|
|
2021-02-25 17:01:12 +08:00
|
|
|
if (desc_size == sizeof(struct dma_desc)) {
|
|
|
|
struct dma_desc *p = (struct dma_desc *)head;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
dma_addr = dma_rx_phy + i * sizeof(*p);
|
|
|
|
pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
|
|
|
|
i, &dma_addr,
|
|
|
|
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
|
|
|
|
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
} else if (desc_size == sizeof(struct dma_extended_desc)) {
|
|
|
|
struct dma_extended_desc *extp = (struct dma_extended_desc *)head;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
dma_addr = dma_rx_phy + i * sizeof(*extp);
|
|
|
|
pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
|
|
|
i, &dma_addr,
|
|
|
|
le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1),
|
|
|
|
le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3),
|
|
|
|
le32_to_cpu(extp->des4), le32_to_cpu(extp->des5),
|
|
|
|
le32_to_cpu(extp->des6), le32_to_cpu(extp->des7));
|
|
|
|
extp++;
|
|
|
|
}
|
|
|
|
} else if (desc_size == sizeof(struct dma_edesc)) {
|
|
|
|
struct dma_edesc *ep = (struct dma_edesc *)head;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
dma_addr = dma_rx_phy + i * sizeof(*ep);
|
|
|
|
pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
|
|
|
i, &dma_addr,
|
|
|
|
le32_to_cpu(ep->des4), le32_to_cpu(ep->des5),
|
|
|
|
le32_to_cpu(ep->des6), le32_to_cpu(ep->des7),
|
|
|
|
le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1),
|
|
|
|
le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3));
|
|
|
|
ep++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pr_err("unsupported descriptor!");
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
|
|
|
|
{
|
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
2016-11-14 18:58:05 +01:00
|
|
|
p->des2 = cpu_to_le32(mss);
|
|
|
|
p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
|
2016-04-01 11:37:28 +02:00
|
|
|
}
|
|
|
|
|
2018-05-18 14:56:00 +01:00
|
|
|
static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
|
|
|
|
{
|
2019-10-02 16:52:58 +02:00
|
|
|
p->des0 = cpu_to_le32(lower_32_bits(addr));
|
|
|
|
p->des1 = cpu_to_le32(upper_32_bits(addr));
|
2018-05-18 14:56:00 +01:00
|
|
|
}
|
|
|
|
|
2018-05-18 14:56:01 +01:00
|
|
|
static void dwmac4_clear(struct dma_desc *p)
|
|
|
|
{
|
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
|
|
|
p->des2 = 0;
|
|
|
|
p->des3 = 0;
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:41:25 +02:00
|
|
|
static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type)
|
|
|
|
{
|
|
|
|
sarc_type <<= TDES3_SA_INSERT_CTRL_SHIFT;
|
|
|
|
|
|
|
|
p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK);
|
|
|
|
}
|
|
|
|
|
2019-07-04 00:59:10 +08:00
|
|
|
static int set_16kib_bfsize(int mtu)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (unlikely(mtu >= BUF_SIZE_8KiB))
|
|
|
|
ret = BUF_SIZE_16KiB;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:41:26 +02:00
|
|
|
static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
|
|
|
|
u32 inner_type)
|
|
|
|
{
|
|
|
|
p->des0 = 0;
|
|
|
|
p->des1 = 0;
|
|
|
|
p->des2 = 0;
|
|
|
|
p->des3 = 0;
|
|
|
|
|
|
|
|
/* Inner VLAN */
|
|
|
|
if (inner_type) {
|
|
|
|
u32 des = inner_tag << TDES2_IVT_SHIFT;
|
|
|
|
|
|
|
|
des &= TDES2_IVT_MASK;
|
|
|
|
p->des2 = cpu_to_le32(des);
|
|
|
|
|
|
|
|
des = inner_type << TDES3_IVTIR_SHIFT;
|
|
|
|
des &= TDES3_IVTIR_MASK;
|
|
|
|
p->des3 = cpu_to_le32(des | TDES3_IVLTV);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Outer VLAN */
|
|
|
|
p->des3 |= cpu_to_le32(tag & TDES3_VLAN_TAG);
|
|
|
|
p->des3 |= cpu_to_le32(TDES3_VLTV);
|
|
|
|
|
|
|
|
p->des3 |= cpu_to_le32(TDES3_CONTEXT_TYPE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
|
|
|
|
{
|
|
|
|
type <<= TDES2_VLAN_TAG_SHIFT;
|
|
|
|
p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
|
|
|
|
}
|
|
|
|
|
2020-09-11 11:55:58 +08:00
|
|
|
static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
|
2019-11-11 15:42:35 +01:00
|
|
|
{
|
|
|
|
*len = le32_to_cpu(p->des2) & RDES2_HL;
|
|
|
|
}
|
|
|
|
|
2021-02-25 17:01:13 +08:00
|
|
|
static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool buf2_valid)
|
2019-11-11 15:42:35 +01:00
|
|
|
{
|
|
|
|
p->des2 = cpu_to_le32(lower_32_bits(addr));
|
2021-02-25 17:01:13 +08:00
|
|
|
p->des3 = cpu_to_le32(upper_32_bits(addr));
|
|
|
|
|
|
|
|
if (buf2_valid)
|
|
|
|
p->des3 |= cpu_to_le32(RDES3_BUFFER2_VALID_ADDR);
|
|
|
|
else
|
|
|
|
p->des3 &= cpu_to_le32(~RDES3_BUFFER2_VALID_ADDR);
|
2019-11-11 15:42:35 +01:00
|
|
|
}
|
|
|
|
|
2020-01-13 17:24:12 +01:00
|
|
|
static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
|
|
|
|
{
|
|
|
|
p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV);
|
|
|
|
p->des5 = cpu_to_le32(nsec & TDES5_LT);
|
|
|
|
p->des6 = 0;
|
|
|
|
p->des7 = 0;
|
|
|
|
}
|
|
|
|
|
2016-04-01 11:37:28 +02:00
|
|
|
const struct stmmac_desc_ops dwmac4_desc_ops = {
|
|
|
|
.tx_status = dwmac4_wrback_get_tx_status,
|
|
|
|
.rx_status = dwmac4_wrback_get_rx_status,
|
|
|
|
.get_tx_len = dwmac4_rd_get_tx_len,
|
|
|
|
.get_tx_owner = dwmac4_get_tx_owner,
|
|
|
|
.set_tx_owner = dwmac4_set_tx_owner,
|
|
|
|
.set_rx_owner = dwmac4_set_rx_owner,
|
|
|
|
.get_tx_ls = dwmac4_get_tx_ls,
|
2023-11-21 13:38:42 +08:00
|
|
|
.get_rx_vlan_tci = dwmac4_wrback_get_rx_vlan_tci,
|
|
|
|
.get_rx_vlan_valid = dwmac4_wrback_get_rx_vlan_valid,
|
2016-04-01 11:37:28 +02:00
|
|
|
.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
|
|
|
|
.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
|
|
|
|
.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
|
2016-11-14 09:27:29 +01:00
|
|
|
.get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
|
|
|
|
.get_timestamp = dwmac4_get_timestamp,
|
2016-04-01 11:37:28 +02:00
|
|
|
.set_tx_ic = dwmac4_rd_set_tx_ic,
|
|
|
|
.prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
|
|
|
|
.prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
|
|
|
|
.release_tx_desc = dwmac4_release_tx_desc,
|
|
|
|
.init_rx_desc = dwmac4_rd_init_rx_desc,
|
|
|
|
.init_tx_desc = dwmac4_rd_init_tx_desc,
|
|
|
|
.display_ring = dwmac4_display_ring,
|
|
|
|
.set_mss = dwmac4_set_mss_ctxt,
|
2018-05-18 14:56:00 +01:00
|
|
|
.set_addr = dwmac4_set_addr,
|
2018-05-18 14:56:01 +01:00
|
|
|
.clear = dwmac4_clear,
|
2019-09-10 16:41:25 +02:00
|
|
|
.set_sarc = dwmac4_set_sarc,
|
2019-09-10 16:41:26 +02:00
|
|
|
.set_vlan_tag = dwmac4_set_vlan_tag,
|
|
|
|
.set_vlan = dwmac4_set_vlan,
|
2019-11-11 15:42:35 +01:00
|
|
|
.get_rx_header_len = dwmac4_get_rx_header_len,
|
|
|
|
.set_sec_addr = dwmac4_set_sec_addr,
|
2020-01-13 17:24:12 +01:00
|
|
|
.set_tbs = dwmac4_set_tbs,
|
2016-04-01 11:37:28 +02:00
|
|
|
};
|
|
|
|
|
2019-07-04 00:59:10 +08:00
|
|
|
const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
|
|
|
|
.set_16kib_bfsize = set_16kib_bfsize,
|
|
|
|
};
|