2022-10-31 21:32:41 +09:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Renesas Ethernet Switch device driver
|
|
|
|
*
|
|
|
|
* Copyright (C) 2022 Renesas Electronics Corporation
|
|
|
|
*/
|
|
|
|
|
2023-09-26 21:30:54 +09:00
|
|
|
#include <linux/clk.h>
|
2022-10-31 21:32:41 +09:00
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/iopoll.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
2022-10-31 21:32:42 +09:00
|
|
|
#include <linux/net_tstamp.h>
|
2022-10-31 21:32:41 +09:00
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_mdio.h>
|
|
|
|
#include <linux/of_net.h>
|
|
|
|
#include <linux/phy/phy.h>
|
2023-07-26 19:49:39 -06:00
|
|
|
#include <linux/platform_device.h>
|
2023-10-17 20:34:02 +09:00
|
|
|
#include <linux/pm.h>
|
2022-10-31 21:32:41 +09:00
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
2023-08-07 09:32:30 +09:00
|
|
|
#include <linux/sys_soc.h>
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
#include "rswitch.h"
|
|
|
|
|
|
|
|
static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
|
|
|
|
1, RSWITCH_TIMEOUT_US);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
|
|
|
|
{
|
|
|
|
iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Common Agent block (COMA) */
|
|
|
|
static void rswitch_reset(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
iowrite32(RRC_RR, priv->addr + RRC);
|
|
|
|
iowrite32(RRC_RR_CLR, priv->addr + RRC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_clock_enable(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_clock_disable(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
iowrite32(RCDC_RCD, priv->addr + RCDC);
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr,
|
|
|
|
unsigned int port)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
u32 val = ioread32(coma_addr + RCEC);
|
|
|
|
|
|
|
|
if (val & RCEC_RCE)
|
|
|
|
return (val & BIT(port)) ? true : false;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port,
|
|
|
|
int enable)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
if (enable) {
|
|
|
|
val = ioread32(coma_addr + RCEC);
|
|
|
|
iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
|
|
|
|
} else {
|
|
|
|
val = ioread32(coma_addr + RCDC);
|
|
|
|
iowrite32(val | BIT(port), coma_addr + RCDC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_bpool_config(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = ioread32(priv->addr + CABPIRM);
|
|
|
|
if (val & CABPIRM_BPR)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
|
|
|
|
|
|
|
|
return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
|
|
|
|
}
|
|
|
|
|
2023-06-08 11:20:07 +09:00
|
|
|
static void rswitch_coma_init(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0);
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
/* R-Switch-2 block (TOP) */
|
|
|
|
static void rswitch_top_init(struct rswitch_private *priv)
|
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
|
|
|
|
iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Forwarding engine block (MFWD) */
|
|
|
|
static void rswitch_fwd_init(struct rswitch_private *priv)
|
|
|
|
{
|
2024-12-09 11:24:11 +05:00
|
|
|
u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2024-12-09 11:24:11 +05:00
|
|
|
/* Start with empty configuration */
|
|
|
|
for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
|
|
|
|
/* Disable all port features */
|
|
|
|
iowrite32(0, priv->addr + FWPC0(i));
|
|
|
|
/* Disallow L3 forwarding and direct descriptor forwarding */
|
|
|
|
iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
|
|
|
|
priv->addr + FWPC1(i));
|
|
|
|
/* Disallow L2 forwarding */
|
|
|
|
iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
|
|
|
|
priv->addr + FWPC2(i));
|
|
|
|
/* Disallow port based forwarding */
|
2022-10-31 21:32:41 +09:00
|
|
|
iowrite32(0, priv->addr + FWPBFC(i));
|
|
|
|
}
|
|
|
|
|
2024-12-09 11:24:11 +05:00
|
|
|
/* For enabled ETHA ports, setup port based forwarding */
|
|
|
|
rswitch_for_each_enabled_port(priv, i) {
|
|
|
|
/* Port based forwarding from port i to GWCA port */
|
|
|
|
rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
|
|
|
|
FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
|
|
|
|
/* Within GWCA port, forward to Rx queue for port i */
|
2022-10-31 21:32:41 +09:00
|
|
|
iowrite32(priv->rdev[i]->rx_queue->index,
|
|
|
|
priv->addr + FWPBFCSDC(GWCA_INDEX, i));
|
|
|
|
}
|
|
|
|
|
2024-12-09 11:24:11 +05:00
|
|
|
/* For GWCA port, allow direct descriptor forwarding */
|
|
|
|
rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Gateway CPU agent block (GWCA) */
|
|
|
|
static int rswitch_gwca_change_mode(struct rswitch_private *priv,
|
|
|
|
enum rswitch_gwca_mode mode)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
|
|
|
|
rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
|
|
|
|
|
|
|
|
iowrite32(mode, priv->addr + GWMC);
|
|
|
|
|
|
|
|
ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
|
|
|
|
|
|
|
|
if (mode == GWMC_OPC_DISABLE)
|
|
|
|
rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
|
|
|
|
|
|
|
|
return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
|
|
|
|
|
|
|
|
return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
|
|
|
|
{
|
|
|
|
u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
|
|
|
|
if (dis[i] & mask[i])
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
|
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
|
|
|
|
dis[i] = ioread32(priv->addr + GWDIS(i));
|
|
|
|
dis[i] &= ioread32(priv->addr + GWDIE(i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static void rswitch_enadis_data_irq(struct rswitch_private *priv,
|
|
|
|
unsigned int index, bool enable)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
|
|
|
|
|
|
|
|
iowrite32(BIT(index % 32), priv->addr + offs);
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static void rswitch_ack_data_irq(struct rswitch_private *priv,
|
|
|
|
unsigned int index)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
u32 offs = GWDIS(index / 32);
|
|
|
|
|
|
|
|
iowrite32(BIT(index % 32), priv->addr + offs);
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq,
|
|
|
|
bool cur, unsigned int num)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int index = cur ? gq->cur : gq->dirty;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
if (index + num >= gq->ring_size)
|
|
|
|
index = (index + num) % gq->ring_size;
|
|
|
|
else
|
|
|
|
index += num;
|
|
|
|
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
if (gq->cur >= gq->dirty)
|
|
|
|
return gq->cur - gq->dirty;
|
|
|
|
else
|
|
|
|
return gq->ring_size - gq->dirty + gq->cur;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
|
|
|
|
{
|
2023-02-09 17:17:38 +09:00
|
|
|
struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:24 +09:00
|
|
|
static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
|
|
|
|
unsigned int start_index,
|
|
|
|
unsigned int num)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i, index;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
index = (i + start_index) % gq->ring_size;
|
2023-12-08 13:10:24 +09:00
|
|
|
if (gq->rx_bufs[index])
|
2022-10-31 21:32:41 +09:00
|
|
|
continue;
|
2023-12-08 13:10:24 +09:00
|
|
|
gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE);
|
|
|
|
if (!gq->rx_bufs[index])
|
2022-10-31 21:32:41 +09:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2023-12-08 13:10:23 +09:00
|
|
|
for (; i-- > 0; ) {
|
2022-10-31 21:32:41 +09:00
|
|
|
index = (i + start_index) % gq->ring_size;
|
2023-12-08 13:10:24 +09:00
|
|
|
skb_free_frag(gq->rx_bufs[index]);
|
|
|
|
gq->rx_bufs[index] = NULL;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_gwca_queue_free(struct net_device *ndev,
|
|
|
|
struct rswitch_gwca_queue *gq)
|
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
net: renesas: rswitch: Remove gptp flag from rswitch_gwca_queue
In the previous code, the gptp flag was completely related to
the !dir_tx in struct rswitch_gwca_queue because
rswitch_gwca_queue_alloc() was called below:
< In rswitch_txdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
TX_RING_SIZE);
So, dir_tx = true, and gptp = false.
< In rswitch_rxdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
RX_RING_SIZE);
So, dir_tx = false, and gptp = true.
In the future, a new queue handling for timestamp will be implemented
and this gptp flag is confusable. So, remove the gptp flag.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-02-09 17:17:40 +09:00
|
|
|
if (!gq->dir_tx) {
|
2022-10-31 21:32:41 +09:00
|
|
|
dma_free_coherent(ndev->dev.parent,
|
|
|
|
sizeof(struct rswitch_ext_ts_desc) *
|
2023-02-09 17:17:38 +09:00
|
|
|
(gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
|
|
|
|
gq->rx_ring = NULL;
|
net: renesas: rswitch: Remove gptp flag from rswitch_gwca_queue
In the previous code, the gptp flag was completely related to
the !dir_tx in struct rswitch_gwca_queue because
rswitch_gwca_queue_alloc() was called below:
< In rswitch_txdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
TX_RING_SIZE);
So, dir_tx = true, and gptp = false.
< In rswitch_rxdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
RX_RING_SIZE);
So, dir_tx = false, and gptp = true.
In the future, a new queue handling for timestamp will be implemented
and this gptp flag is confusable. So, remove the gptp flag.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-02-09 17:17:40 +09:00
|
|
|
|
|
|
|
for (i = 0; i < gq->ring_size; i++)
|
2023-12-08 13:10:24 +09:00
|
|
|
skb_free_frag(gq->rx_bufs[i]);
|
|
|
|
kfree(gq->rx_bufs);
|
|
|
|
gq->rx_bufs = NULL;
|
2022-10-31 21:32:41 +09:00
|
|
|
} else {
|
|
|
|
dma_free_coherent(ndev->dev.parent,
|
|
|
|
sizeof(struct rswitch_ext_desc) *
|
2023-02-09 17:17:38 +09:00
|
|
|
(gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
|
|
|
|
gq->tx_ring = NULL;
|
2023-12-08 13:10:24 +09:00
|
|
|
kfree(gq->skbs);
|
|
|
|
gq->skbs = NULL;
|
2023-12-08 13:10:25 +09:00
|
|
|
kfree(gq->unmap_addrs);
|
|
|
|
gq->unmap_addrs = NULL;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-09 17:17:41 +09:00
|
|
|
static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
|
|
|
|
|
|
|
|
dma_free_coherent(&priv->pdev->dev,
|
|
|
|
sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
|
|
|
|
gq->ts_ring, gq->ring_dma);
|
|
|
|
gq->ts_ring = NULL;
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
static int rswitch_gwca_queue_alloc(struct net_device *ndev,
|
|
|
|
struct rswitch_private *priv,
|
|
|
|
struct rswitch_gwca_queue *gq,
|
2023-12-08 13:10:23 +09:00
|
|
|
bool dir_tx, unsigned int ring_size)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i, bit;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
gq->dir_tx = dir_tx;
|
|
|
|
gq->ring_size = ring_size;
|
|
|
|
gq->ndev = ndev;
|
|
|
|
|
net: renesas: rswitch: Remove gptp flag from rswitch_gwca_queue
In the previous code, the gptp flag was completely related to
the !dir_tx in struct rswitch_gwca_queue because
rswitch_gwca_queue_alloc() was called below:
< In rswitch_txdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
TX_RING_SIZE);
So, dir_tx = true, and gptp = false.
< In rswitch_rxdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
RX_RING_SIZE);
So, dir_tx = false, and gptp = true.
In the future, a new queue handling for timestamp will be implemented
and this gptp flag is confusable. So, remove the gptp flag.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-02-09 17:17:40 +09:00
|
|
|
if (!dir_tx) {
|
2023-12-08 13:10:24 +09:00
|
|
|
gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL);
|
|
|
|
if (!gq->rx_bufs)
|
|
|
|
return -ENOMEM;
|
|
|
|
if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
|
|
|
|
goto out;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-09 17:17:38 +09:00
|
|
|
gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
|
2022-10-31 21:32:41 +09:00
|
|
|
sizeof(struct rswitch_ext_ts_desc) *
|
|
|
|
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
|
net: renesas: rswitch: Remove gptp flag from rswitch_gwca_queue
In the previous code, the gptp flag was completely related to
the !dir_tx in struct rswitch_gwca_queue because
rswitch_gwca_queue_alloc() was called below:
< In rswitch_txdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
TX_RING_SIZE);
So, dir_tx = true, and gptp = false.
< In rswitch_rxdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
RX_RING_SIZE);
So, dir_tx = false, and gptp = true.
In the future, a new queue handling for timestamp will be implemented
and this gptp flag is confusable. So, remove the gptp flag.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-02-09 17:17:40 +09:00
|
|
|
} else {
|
2023-12-08 13:10:24 +09:00
|
|
|
gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
|
|
|
|
if (!gq->skbs)
|
|
|
|
return -ENOMEM;
|
2023-12-08 13:10:25 +09:00
|
|
|
gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL);
|
|
|
|
if (!gq->unmap_addrs)
|
|
|
|
goto out;
|
2023-02-09 17:17:38 +09:00
|
|
|
gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
|
|
|
|
sizeof(struct rswitch_ext_desc) *
|
|
|
|
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
|
net: renesas: rswitch: Remove gptp flag from rswitch_gwca_queue
In the previous code, the gptp flag was completely related to
the !dir_tx in struct rswitch_gwca_queue because
rswitch_gwca_queue_alloc() was called below:
< In rswitch_txdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
TX_RING_SIZE);
So, dir_tx = true, and gptp = false.
< In rswitch_rxdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
RX_RING_SIZE);
So, dir_tx = false, and gptp = true.
In the future, a new queue handling for timestamp will be implemented
and this gptp flag is confusable. So, remove the gptp flag.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-02-09 17:17:40 +09:00
|
|
|
}
|
|
|
|
|
2023-02-09 17:17:38 +09:00
|
|
|
if (!gq->rx_ring && !gq->tx_ring)
|
2022-10-31 21:32:41 +09:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
i = gq->index / 32;
|
|
|
|
bit = BIT(gq->index % 32);
|
|
|
|
if (dir_tx)
|
|
|
|
priv->gwca.tx_irq_bits[i] |= bit;
|
|
|
|
else
|
|
|
|
priv->gwca.rx_irq_bits[i] |= bit;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
rswitch_gwca_queue_free(ndev, gq);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
|
|
|
|
{
|
|
|
|
desc->dptrl = cpu_to_le32(lower_32_bits(addr));
|
|
|
|
desc->dptrh = upper_32_bits(addr) & 0xff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
|
|
|
|
{
|
|
|
|
return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_gwca_queue_format(struct net_device *ndev,
|
|
|
|
struct rswitch_private *priv,
|
|
|
|
struct rswitch_gwca_queue *gq)
|
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
|
2022-10-31 21:32:41 +09:00
|
|
|
struct rswitch_ext_desc *desc;
|
|
|
|
struct rswitch_desc *linkfix;
|
|
|
|
dma_addr_t dma_addr;
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-09 17:17:38 +09:00
|
|
|
memset(gq->tx_ring, 0, ring_size);
|
|
|
|
for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
|
2022-10-31 21:32:41 +09:00
|
|
|
if (!gq->dir_tx) {
|
|
|
|
dma_addr = dma_map_single(ndev->dev.parent,
|
2023-12-08 13:10:24 +09:00
|
|
|
gq->rx_bufs[i] + RSWITCH_HEADROOM,
|
|
|
|
RSWITCH_MAP_BUF_SIZE,
|
2022-10-31 21:32:41 +09:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
|
|
|
goto err;
|
|
|
|
|
2023-12-08 13:10:24 +09:00
|
|
|
desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_desc_set_dptr(&desc->desc, dma_addr);
|
|
|
|
desc->desc.die_dt = DT_FEMPTY | DIE;
|
|
|
|
} else {
|
|
|
|
desc->desc.die_dt = DT_EEMPTY | DIE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
|
|
|
|
desc->desc.die_dt = DT_LINKFIX;
|
|
|
|
|
2023-02-09 17:17:39 +09:00
|
|
|
linkfix = &priv->gwca.linkfix_table[gq->index];
|
2022-10-31 21:32:41 +09:00
|
|
|
linkfix->die_dt = DT_LINKFIX;
|
|
|
|
rswitch_desc_set_dptr(linkfix, gq->ring_dma);
|
|
|
|
|
2023-06-08 11:20:07 +09:00
|
|
|
iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
|
2022-10-31 21:32:41 +09:00
|
|
|
priv->addr + GWDCC_OFFS(gq->index));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (!gq->dir_tx) {
|
2023-12-08 13:10:23 +09:00
|
|
|
for (desc = gq->tx_ring; i-- > 0; desc++) {
|
2022-10-31 21:32:41 +09:00
|
|
|
dma_addr = rswitch_desc_get_dptr(&desc->desc);
|
2023-12-08 13:10:24 +09:00
|
|
|
dma_unmap_single(ndev->dev.parent, dma_addr,
|
|
|
|
RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2023-02-09 17:17:41 +09:00
|
|
|
static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int start_index,
|
|
|
|
unsigned int num)
|
2023-02-09 17:17:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
|
|
|
|
struct rswitch_ts_desc *desc;
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i, index;
|
2023-02-09 17:17:41 +09:00
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
index = (i + start_index) % gq->ring_size;
|
|
|
|
desc = &gq->ts_ring[index];
|
|
|
|
desc->desc.die_dt = DT_FEMPTY_ND | DIE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-09 17:17:38 +09:00
|
|
|
static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
|
|
|
|
struct rswitch_gwca_queue *gq,
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int start_index,
|
|
|
|
unsigned int num)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
struct rswitch_ext_ts_desc *desc;
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i, index;
|
2022-10-31 21:32:41 +09:00
|
|
|
dma_addr_t dma_addr;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
index = (i + start_index) % gq->ring_size;
|
2023-02-09 17:17:38 +09:00
|
|
|
desc = &gq->rx_ring[index];
|
2022-10-31 21:32:41 +09:00
|
|
|
if (!gq->dir_tx) {
|
|
|
|
dma_addr = dma_map_single(ndev->dev.parent,
|
2023-12-08 13:10:24 +09:00
|
|
|
gq->rx_bufs[index] + RSWITCH_HEADROOM,
|
|
|
|
RSWITCH_MAP_BUF_SIZE,
|
2022-10-31 21:32:41 +09:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
|
|
|
goto err;
|
|
|
|
|
2023-12-08 13:10:24 +09:00
|
|
|
desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_desc_set_dptr(&desc->desc, dma_addr);
|
|
|
|
dma_wmb();
|
|
|
|
desc->desc.die_dt = DT_FEMPTY | DIE;
|
|
|
|
desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
|
|
|
|
} else {
|
|
|
|
desc->desc.die_dt = DT_EEMPTY | DIE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (!gq->dir_tx) {
|
2023-12-08 13:10:23 +09:00
|
|
|
for (; i-- > 0; ) {
|
2022-10-31 21:32:41 +09:00
|
|
|
index = (i + start_index) % gq->ring_size;
|
2023-02-09 17:17:38 +09:00
|
|
|
desc = &gq->rx_ring[index];
|
2022-10-31 21:32:41 +09:00
|
|
|
dma_addr = rswitch_desc_get_dptr(&desc->desc);
|
2023-12-08 13:10:24 +09:00
|
|
|
dma_unmap_single(ndev->dev.parent, dma_addr,
|
|
|
|
RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2023-02-09 17:17:38 +09:00
|
|
|
static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
|
|
|
|
struct rswitch_private *priv,
|
|
|
|
struct rswitch_gwca_queue *gq)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
|
2022-10-31 21:32:41 +09:00
|
|
|
struct rswitch_ext_ts_desc *desc;
|
|
|
|
struct rswitch_desc *linkfix;
|
|
|
|
int err;
|
|
|
|
|
2023-02-09 17:17:38 +09:00
|
|
|
memset(gq->rx_ring, 0, ring_size);
|
|
|
|
err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2023-02-09 17:17:38 +09:00
|
|
|
desc = &gq->rx_ring[gq->ring_size]; /* Last */
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
|
|
|
|
desc->desc.die_dt = DT_LINKFIX;
|
|
|
|
|
2023-02-09 17:17:39 +09:00
|
|
|
linkfix = &priv->gwca.linkfix_table[gq->index];
|
2022-10-31 21:32:41 +09:00
|
|
|
linkfix->die_dt = DT_LINKFIX;
|
|
|
|
rswitch_desc_set_dptr(linkfix, gq->ring_dma);
|
|
|
|
|
2023-06-08 11:20:07 +09:00
|
|
|
iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
|
|
|
|
GWDCC_ETS | GWDCC_EDE,
|
2022-10-31 21:32:41 +09:00
|
|
|
priv->addr + GWDCC_OFFS(gq->index));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-02-09 17:17:39 +09:00
|
|
|
static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i, num_queues = priv->gwca.num_queues;
|
2023-02-09 17:17:39 +09:00
|
|
|
struct rswitch_gwca *gwca = &priv->gwca;
|
2022-10-31 21:32:41 +09:00
|
|
|
struct device *dev = &priv->pdev->dev;
|
|
|
|
|
2023-02-09 17:17:39 +09:00
|
|
|
gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
|
|
|
|
gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
|
|
|
|
&gwca->linkfix_table_dma, GFP_KERNEL);
|
|
|
|
if (!gwca->linkfix_table)
|
2022-10-31 21:32:41 +09:00
|
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < num_queues; i++)
|
2023-02-09 17:17:39 +09:00
|
|
|
gwca->linkfix_table[i].die_dt = DT_EOS;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-02-09 17:17:39 +09:00
|
|
|
static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-02-09 17:17:39 +09:00
|
|
|
struct rswitch_gwca *gwca = &priv->gwca;
|
|
|
|
|
|
|
|
if (gwca->linkfix_table)
|
|
|
|
dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
|
|
|
|
gwca->linkfix_table, gwca->linkfix_table_dma);
|
|
|
|
gwca->linkfix_table = NULL;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
2023-06-08 10:57:27 +09:00
|
|
|
static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
|
|
|
|
struct rswitch_ts_desc *desc;
|
|
|
|
|
|
|
|
gq->ring_size = TS_RING_SIZE;
|
|
|
|
gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
|
|
|
|
sizeof(struct rswitch_ts_desc) *
|
|
|
|
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!gq->ts_ring)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
|
|
|
|
desc = &gq->ts_ring[gq->ring_size];
|
|
|
|
desc->desc.die_dt = DT_LINKFIX;
|
|
|
|
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
struct rswitch_gwca_queue *gq;
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int index;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
|
|
|
|
if (index >= priv->gwca.num_queues)
|
|
|
|
return NULL;
|
|
|
|
set_bit(index, priv->gwca.used);
|
|
|
|
gq = &priv->gwca.queues[index];
|
|
|
|
memset(gq, 0, sizeof(*gq));
|
|
|
|
gq->index = index;
|
|
|
|
|
|
|
|
return gq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_gwca_put(struct rswitch_private *priv,
|
|
|
|
struct rswitch_gwca_queue *gq)
|
|
|
|
{
|
|
|
|
clear_bit(gq->index, priv->gwca.used);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_txdmac_alloc(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
struct rswitch_private *priv = rdev->priv;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
rdev->tx_queue = rswitch_gwca_get(priv);
|
|
|
|
if (!rdev->tx_queue)
|
|
|
|
return -EBUSY;
|
|
|
|
|
net: renesas: rswitch: Remove gptp flag from rswitch_gwca_queue
In the previous code, the gptp flag was completely related to
the !dir_tx in struct rswitch_gwca_queue because
rswitch_gwca_queue_alloc() was called below:
< In rswitch_txdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
TX_RING_SIZE);
So, dir_tx = true, and gptp = false.
< In rswitch_rxdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
RX_RING_SIZE);
So, dir_tx = false, and gptp = true.
In the future, a new queue handling for timestamp will be implemented
and this gptp flag is confusable. So, remove the gptp flag.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-02-09 17:17:40 +09:00
|
|
|
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (err < 0) {
|
|
|
|
rswitch_gwca_put(priv, rdev->tx_queue);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_txdmac_free(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
|
|
|
|
rswitch_gwca_queue_free(ndev, rdev->tx_queue);
|
|
|
|
rswitch_gwca_put(rdev->priv, rdev->tx_queue);
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = priv->rdev[index];
|
|
|
|
|
|
|
|
return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_rxdmac_alloc(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
struct rswitch_private *priv = rdev->priv;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
rdev->rx_queue = rswitch_gwca_get(priv);
|
|
|
|
if (!rdev->rx_queue)
|
|
|
|
return -EBUSY;
|
|
|
|
|
net: renesas: rswitch: Remove gptp flag from rswitch_gwca_queue
In the previous code, the gptp flag was completely related to
the !dir_tx in struct rswitch_gwca_queue because
rswitch_gwca_queue_alloc() was called below:
< In rswitch_txdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
TX_RING_SIZE);
So, dir_tx = true, and gptp = false.
< In rswitch_rxdmac_alloc() >
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
RX_RING_SIZE);
So, dir_tx = false, and gptp = true.
In the future, a new queue handling for timestamp will be implemented
and this gptp flag is confusable. So, remove the gptp flag.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-02-09 17:17:40 +09:00
|
|
|
err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (err < 0) {
|
|
|
|
rswitch_gwca_put(priv, rdev->rx_queue);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_rxdmac_free(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
|
|
|
|
rswitch_gwca_queue_free(ndev, rdev->rx_queue);
|
|
|
|
rswitch_gwca_put(rdev->priv, rdev->rx_queue);
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = priv->rdev[index];
|
|
|
|
struct net_device *ndev = rdev->ndev;
|
|
|
|
|
2023-02-09 17:17:38 +09:00
|
|
|
return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_gwca_hw_init(struct rswitch_private *priv)
|
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i;
|
|
|
|
int err;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = rswitch_gwca_mcast_table_reset(priv);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
err = rswitch_gwca_axi_ram_reset(priv);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
|
|
|
|
iowrite32(0, priv->addr + GWTTFC);
|
2023-02-09 17:17:39 +09:00
|
|
|
iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
|
|
|
|
iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
|
2023-02-09 17:17:41 +09:00
|
|
|
iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
|
|
|
|
iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
|
2023-12-08 13:10:27 +09:00
|
|
|
iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f),
|
|
|
|
priv->addr + GWMDNC);
|
2023-02-09 17:17:41 +09:00
|
|
|
iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
|
2023-06-08 11:20:07 +09:00
|
|
|
|
|
|
|
iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
|
|
|
|
err = rswitch_rxdmac_init(priv, i);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
err = rswitch_txdmac_init(priv, i);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_gwca_halt(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
priv->gwca_halt = true;
|
|
|
|
err = rswitch_gwca_hw_deinit(priv);
|
|
|
|
dev_err(&priv->pdev->dev, "halted (%d)\n", err);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:28 +09:00
|
|
|
static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev,
|
|
|
|
struct rswitch_gwca_queue *gq,
|
|
|
|
struct rswitch_ext_ts_desc *desc)
|
|
|
|
{
|
|
|
|
dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc);
|
|
|
|
u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
|
|
|
|
u8 die_dt = desc->desc.die_dt & DT_MASK;
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
|
|
|
|
dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
/* The RX descriptor order will be one of the following:
|
|
|
|
* - FSINGLE
|
|
|
|
* - FSTART -> FEND
|
|
|
|
* - FSTART -> FMID -> FEND
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Check whether the descriptor is unexpected order */
|
|
|
|
switch (die_dt) {
|
|
|
|
case DT_FSTART:
|
|
|
|
case DT_FSINGLE:
|
|
|
|
if (gq->skb_fstart) {
|
|
|
|
dev_kfree_skb_any(gq->skb_fstart);
|
|
|
|
gq->skb_fstart = NULL;
|
|
|
|
ndev->stats.rx_dropped++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DT_FMID:
|
|
|
|
case DT_FEND:
|
|
|
|
if (!gq->skb_fstart) {
|
|
|
|
ndev->stats.rx_dropped++;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle the descriptor */
|
|
|
|
switch (die_dt) {
|
|
|
|
case DT_FSTART:
|
|
|
|
case DT_FSINGLE:
|
|
|
|
skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
|
|
|
|
if (skb) {
|
|
|
|
skb_reserve(skb, RSWITCH_HEADROOM);
|
|
|
|
skb_put(skb, pkt_len);
|
|
|
|
gq->pkt_len = pkt_len;
|
|
|
|
if (die_dt == DT_FSTART) {
|
|
|
|
gq->skb_fstart = skb;
|
|
|
|
skb = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DT_FMID:
|
|
|
|
case DT_FEND:
|
|
|
|
skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags,
|
|
|
|
virt_to_page(gq->rx_bufs[gq->cur]),
|
|
|
|
offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM,
|
|
|
|
pkt_len, RSWITCH_BUF_SIZE);
|
|
|
|
if (die_dt == DT_FEND) {
|
|
|
|
skb = gq->skb_fstart;
|
|
|
|
gq->skb_fstart = NULL;
|
|
|
|
}
|
|
|
|
gq->pkt_len += pkt_len;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
static bool rswitch_rx(struct net_device *ndev, int *quota)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
struct rswitch_gwca_queue *gq = rdev->rx_queue;
|
|
|
|
struct rswitch_ext_ts_desc *desc;
|
2023-12-08 13:10:23 +09:00
|
|
|
int limit, boguscnt, ret;
|
2022-10-31 21:32:41 +09:00
|
|
|
struct sk_buff *skb;
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int num;
|
2022-10-31 21:32:42 +09:00
|
|
|
u32 get_ts;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-03-15 16:04:23 +09:00
|
|
|
if (*quota <= 0)
|
|
|
|
return true;
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
boguscnt = min_t(int, gq->ring_size, *quota);
|
|
|
|
limit = boguscnt;
|
|
|
|
|
2023-02-09 17:17:38 +09:00
|
|
|
desc = &gq->rx_ring[gq->cur];
|
2022-10-31 21:32:41 +09:00
|
|
|
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
|
|
|
|
dma_rmb();
|
2023-12-08 13:10:28 +09:00
|
|
|
skb = rswitch_rx_handle_desc(ndev, gq, desc);
|
2023-12-08 13:10:24 +09:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
2022-10-31 21:32:42 +09:00
|
|
|
get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
|
|
|
|
if (get_ts) {
|
|
|
|
struct skb_shared_hwtstamps *shhwtstamps;
|
|
|
|
struct timespec64 ts;
|
|
|
|
|
|
|
|
shhwtstamps = skb_hwtstamps(skb);
|
|
|
|
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
|
|
|
|
ts.tv_sec = __le32_to_cpu(desc->ts_sec);
|
|
|
|
ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
|
|
|
|
shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
|
|
|
|
}
|
2022-10-31 21:32:41 +09:00
|
|
|
skb->protocol = eth_type_trans(skb, ndev);
|
2023-06-08 11:20:06 +09:00
|
|
|
napi_gro_receive(&rdev->napi, skb);
|
2022-10-31 21:32:41 +09:00
|
|
|
rdev->ndev->stats.rx_packets++;
|
2023-12-08 13:10:28 +09:00
|
|
|
rdev->ndev->stats.rx_bytes += gq->pkt_len;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-12-08 13:10:24 +09:00
|
|
|
out:
|
|
|
|
gq->rx_bufs[gq->cur] = NULL;
|
2022-10-31 21:32:41 +09:00
|
|
|
gq->cur = rswitch_next_queue_index(gq, true, 1);
|
2023-02-09 17:17:38 +09:00
|
|
|
desc = &gq->rx_ring[gq->cur];
|
2023-03-15 16:04:23 +09:00
|
|
|
|
|
|
|
if (--boguscnt <= 0)
|
|
|
|
break;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
num = rswitch_get_num_cur_queues(gq);
|
2023-12-08 13:10:24 +09:00
|
|
|
ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
2023-02-09 17:17:38 +09:00
|
|
|
ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
gq->dirty = rswitch_next_queue_index(gq, false, num);
|
|
|
|
|
2023-03-15 16:04:23 +09:00
|
|
|
*quota -= limit - boguscnt;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
return boguscnt <= 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
rswitch_gwca_halt(rdev->priv);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:22 +09:00
|
|
|
static void rswitch_tx_free(struct net_device *ndev)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
struct rswitch_gwca_queue *gq = rdev->tx_queue;
|
|
|
|
struct rswitch_ext_desc *desc;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
2024-12-08 14:50:02 +05:00
|
|
|
desc = &gq->tx_ring[gq->dirty];
|
|
|
|
while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
|
2022-10-31 21:32:41 +09:00
|
|
|
dma_rmb();
|
2024-12-08 14:50:02 +05:00
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
skb = gq->skbs[gq->dirty];
|
|
|
|
if (skb) {
|
2024-07-02 17:08:37 -04:00
|
|
|
rdev->ndev->stats.tx_packets++;
|
|
|
|
rdev->ndev->stats.tx_bytes += skb->len;
|
2023-12-08 13:10:25 +09:00
|
|
|
dma_unmap_single(ndev->dev.parent,
|
|
|
|
gq->unmap_addrs[gq->dirty],
|
|
|
|
skb->len, DMA_TO_DEVICE);
|
2022-10-31 21:32:41 +09:00
|
|
|
dev_kfree_skb_any(gq->skbs[gq->dirty]);
|
|
|
|
gq->skbs[gq->dirty] = NULL;
|
|
|
|
}
|
2024-12-08 14:50:02 +05:00
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
desc->desc.die_dt = DT_EEMPTY;
|
2024-12-08 14:50:02 +05:00
|
|
|
gq->dirty = rswitch_next_queue_index(gq, false, 1);
|
|
|
|
desc = &gq->tx_ring[gq->dirty];
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = napi->dev;
|
|
|
|
struct rswitch_private *priv;
|
|
|
|
struct rswitch_device *rdev;
|
2023-09-12 10:49:36 +09:00
|
|
|
unsigned long flags;
|
2022-10-31 21:32:41 +09:00
|
|
|
int quota = budget;
|
|
|
|
|
|
|
|
rdev = netdev_priv(ndev);
|
|
|
|
priv = rdev->priv;
|
|
|
|
|
|
|
|
retry:
|
2023-12-08 13:10:22 +09:00
|
|
|
rswitch_tx_free(ndev);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
if (rswitch_rx(ndev, "a))
|
|
|
|
goto out;
|
|
|
|
else if (rdev->priv->gwca_halt)
|
|
|
|
goto err;
|
|
|
|
else if (rswitch_is_queue_rxed(rdev->rx_queue))
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
netif_wake_subqueue(ndev, 0);
|
|
|
|
|
2023-09-12 10:49:35 +09:00
|
|
|
if (napi_complete_done(napi, budget - quota)) {
|
2023-09-12 10:49:36 +09:00
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
2024-12-09 16:32:04 +05:00
|
|
|
if (test_bit(rdev->port, priv->opened_ports)) {
|
|
|
|
rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
|
|
|
|
rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
|
|
|
|
}
|
2023-09-12 10:49:36 +09:00
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
2023-09-12 10:49:35 +09:00
|
|
|
}
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
out:
|
|
|
|
return budget - quota;
|
|
|
|
|
|
|
|
err:
|
|
|
|
napi_complete(napi);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_queue_interrupt(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
|
|
|
|
if (napi_schedule_prep(&rdev->napi)) {
|
2023-09-12 10:49:36 +09:00
|
|
|
spin_lock(&rdev->priv->lock);
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
|
|
|
|
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
|
2023-09-12 10:49:36 +09:00
|
|
|
spin_unlock(&rdev->priv->lock);
|
2022-10-31 21:32:41 +09:00
|
|
|
__napi_schedule(&rdev->napi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
|
|
|
|
{
|
|
|
|
struct rswitch_gwca_queue *gq;
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i, index, bit;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
for (i = 0; i < priv->gwca.num_queues; i++) {
|
|
|
|
gq = &priv->gwca.queues[i];
|
|
|
|
index = gq->index / 32;
|
|
|
|
bit = BIT(gq->index % 32);
|
|
|
|
if (!(dis[index] & bit))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rswitch_ack_data_irq(priv, gq->index);
|
|
|
|
rswitch_queue_interrupt(gq->ndev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct rswitch_private *priv = dev_id;
|
|
|
|
u32 dis[RSWITCH_NUM_IRQ_REGS];
|
|
|
|
irqreturn_t ret = IRQ_NONE;
|
|
|
|
|
|
|
|
rswitch_get_data_irq_status(priv, dis);
|
|
|
|
|
|
|
|
if (rswitch_is_any_data_irq(priv, dis, true) ||
|
|
|
|
rswitch_is_any_data_irq(priv, dis, false))
|
|
|
|
ret = rswitch_data_irq(priv, dis);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
char *resource_name, *irq_name;
|
|
|
|
int i, ret, irq;
|
|
|
|
|
|
|
|
for (i = 0; i < GWCA_NUM_IRQS; i++) {
|
|
|
|
resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
|
|
|
|
if (!resource_name)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
irq = platform_get_irq_byname(priv->pdev, resource_name);
|
|
|
|
kfree(resource_name);
|
|
|
|
if (irq < 0)
|
|
|
|
return irq;
|
|
|
|
|
|
|
|
irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
|
|
|
|
GWCA_IRQ_NAME, i);
|
|
|
|
if (!irq_name)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
|
|
|
|
0, irq_name, priv);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-02-09 17:17:41 +09:00
|
|
|
static void rswitch_ts(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
|
|
|
|
struct skb_shared_hwtstamps shhwtstamps;
|
|
|
|
struct rswitch_ts_desc *desc;
|
2024-12-12 11:25:58 +05:00
|
|
|
struct rswitch_device *rdev;
|
|
|
|
struct sk_buff *ts_skb;
|
2023-02-09 17:17:41 +09:00
|
|
|
struct timespec64 ts;
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int num;
|
2023-02-09 17:17:41 +09:00
|
|
|
u32 tag, port;
|
|
|
|
|
|
|
|
desc = &gq->ts_ring[gq->cur];
|
|
|
|
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
|
|
|
|
dma_rmb();
|
|
|
|
|
|
|
|
port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
|
2024-12-12 11:25:58 +05:00
|
|
|
if (unlikely(port >= RSWITCH_NUM_PORTS))
|
|
|
|
goto next;
|
|
|
|
rdev = priv->rdev[port];
|
2023-02-09 17:17:41 +09:00
|
|
|
|
2024-12-12 11:25:58 +05:00
|
|
|
tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
|
|
|
|
if (unlikely(tag >= TS_TAGS_PER_PORT))
|
|
|
|
goto next;
|
|
|
|
ts_skb = xchg(&rdev->ts_skb[tag], NULL);
|
|
|
|
smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
|
|
|
|
clear_bit(tag, rdev->ts_skb_used);
|
|
|
|
|
|
|
|
if (unlikely(!ts_skb))
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
|
|
|
ts.tv_sec = __le32_to_cpu(desc->ts_sec);
|
|
|
|
ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
|
|
|
|
shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
|
|
|
|
skb_tstamp_tx(ts_skb, &shhwtstamps);
|
|
|
|
dev_consume_skb_irq(ts_skb);
|
|
|
|
|
|
|
|
next:
|
2023-02-09 17:17:41 +09:00
|
|
|
gq->cur = rswitch_next_queue_index(gq, true, 1);
|
|
|
|
desc = &gq->ts_ring[gq->cur];
|
|
|
|
}
|
|
|
|
|
|
|
|
num = rswitch_get_num_cur_queues(gq);
|
|
|
|
rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
|
|
|
|
gq->dirty = rswitch_next_queue_index(gq, false, num);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct rswitch_private *priv = dev_id;
|
|
|
|
|
|
|
|
if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
|
|
|
|
iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
|
|
|
|
rswitch_ts(priv);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
|
|
|
|
{
|
|
|
|
int irq;
|
|
|
|
|
|
|
|
irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
|
|
|
|
if (irq < 0)
|
|
|
|
return irq;
|
|
|
|
|
|
|
|
return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
|
|
|
|
0, GWCA_TS_IRQ_NAME, priv);
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
/* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
|
|
|
|
static int rswitch_etha_change_mode(struct rswitch_etha *etha,
|
|
|
|
enum rswitch_etha_mode mode)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
|
|
|
|
rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
|
|
|
|
|
|
|
|
iowrite32(mode, etha->addr + EAMC);
|
|
|
|
|
|
|
|
ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
|
|
|
|
|
|
|
|
if (mode == EAMC_OPC_DISABLE)
|
|
|
|
rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
|
|
|
|
{
|
|
|
|
u32 mrmac0 = ioread32(etha->addr + MRMAC0);
|
|
|
|
u32 mrmac1 = ioread32(etha->addr + MRMAC1);
|
|
|
|
u8 *mac = ða->mac_addr[0];
|
|
|
|
|
|
|
|
mac[0] = (mrmac0 >> 8) & 0xFF;
|
|
|
|
mac[1] = (mrmac0 >> 0) & 0xFF;
|
|
|
|
mac[2] = (mrmac1 >> 24) & 0xFF;
|
|
|
|
mac[3] = (mrmac1 >> 16) & 0xFF;
|
|
|
|
mac[4] = (mrmac1 >> 8) & 0xFF;
|
|
|
|
mac[5] = (mrmac1 >> 0) & 0xFF;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
|
|
|
|
{
|
|
|
|
iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
|
|
|
|
iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
|
|
|
|
etha->addr + MRMAC1);
|
|
|
|
}
|
|
|
|
|
2022-11-15 16:09:55 +03:00
|
|
|
static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
iowrite32(MLVC_PLV, etha->addr + MLVC);
|
|
|
|
|
|
|
|
return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
|
|
|
|
{
|
2024-12-11 10:30:12 +05:00
|
|
|
u32 pis, lsc;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
rswitch_etha_write_mac_address(etha, mac);
|
|
|
|
|
2024-12-11 10:30:12 +05:00
|
|
|
switch (etha->phy_interface) {
|
|
|
|
case PHY_INTERFACE_MODE_SGMII:
|
|
|
|
pis = MPIC_PIS_GMII;
|
|
|
|
break;
|
|
|
|
case PHY_INTERFACE_MODE_USXGMII:
|
|
|
|
case PHY_INTERFACE_MODE_5GBASER:
|
|
|
|
pis = MPIC_PIS_XGMII;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
switch (etha->speed) {
|
|
|
|
case 100:
|
2024-12-11 10:30:12 +05:00
|
|
|
lsc = MPIC_LSC_100M;
|
2022-10-31 21:32:41 +09:00
|
|
|
break;
|
|
|
|
case 1000:
|
2024-12-11 10:30:12 +05:00
|
|
|
lsc = MPIC_LSC_1G;
|
2022-10-31 21:32:41 +09:00
|
|
|
break;
|
|
|
|
case 2500:
|
2024-12-11 10:30:12 +05:00
|
|
|
lsc = MPIC_LSC_2_5G;
|
2022-10-31 21:32:41 +09:00
|
|
|
break;
|
|
|
|
default:
|
2024-12-11 10:30:12 +05:00
|
|
|
lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
|
|
|
|
break;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
2024-12-11 10:30:12 +05:00
|
|
|
rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
|
|
|
|
FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
|
|
|
|
{
|
2024-12-16 12:19:54 +05:00
|
|
|
rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
|
|
|
|
FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
|
|
|
|
FIELD_PREP(MPIC_PSMHT, 0x06));
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
|
|
|
|
rswitch_rmac_setting(etha, mac);
|
|
|
|
rswitch_etha_enable_mii(etha);
|
|
|
|
|
|
|
|
err = rswitch_etha_wait_link_verification(etha);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
|
|
|
|
}
|
|
|
|
|
2024-12-16 12:19:56 +05:00
|
|
|
static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
|
|
|
|
unsigned int mmf, unsigned int pda,
|
|
|
|
unsigned int pra, unsigned int pop,
|
|
|
|
unsigned int prd)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
int ret;
|
|
|
|
|
2024-12-16 12:19:56 +05:00
|
|
|
val = MPSM_PSME |
|
|
|
|
FIELD_PREP(MPSM_MFF, mmf) |
|
|
|
|
FIELD_PREP(MPSM_PDA, pda) |
|
|
|
|
FIELD_PREP(MPSM_PRA, pra) |
|
|
|
|
FIELD_PREP(MPSM_POP, pop) |
|
|
|
|
FIELD_PREP(MPSM_PRD, prd);
|
|
|
|
iowrite32(val, etha->addr + MPSM);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2024-12-16 12:19:55 +05:00
|
|
|
ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (read) {
|
2024-12-16 12:19:56 +05:00
|
|
|
val = ioread32(etha->addr + MPSM);
|
|
|
|
ret = FIELD_GET(MPSM_PRD, val);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-01-17 00:52:27 +01:00
|
|
|
static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
|
|
|
|
int regad)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_etha *etha = bus->priv;
|
2024-12-16 12:19:56 +05:00
|
|
|
int ret;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2024-12-16 12:19:56 +05:00
|
|
|
ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
|
|
|
|
MPSM_POP_ADDRESS, regad);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
|
|
|
|
MPSM_POP_READ_C45, 0);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
2023-01-17 00:52:27 +01:00
|
|
|
static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
|
|
|
|
int regad, u16 val)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_etha *etha = bus->priv;
|
2024-12-16 12:19:56 +05:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
|
|
|
|
MPSM_POP_ADDRESS, regad);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2024-12-16 12:19:56 +05:00
|
|
|
return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
|
|
|
|
MPSM_POP_WRITE, val);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
2024-12-16 12:19:57 +05:00
|
|
|
static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
|
|
|
|
{
|
|
|
|
struct rswitch_etha *etha = bus->priv;
|
|
|
|
|
|
|
|
return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
|
|
|
|
MPSM_POP_READ_C22, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
|
|
|
|
int regad, u16 val)
|
|
|
|
{
|
|
|
|
struct rswitch_etha *etha = bus->priv;
|
|
|
|
|
|
|
|
return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
|
|
|
|
MPSM_POP_WRITE, val);
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
/* Call of_node_put(port) after done */
|
|
|
|
static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
|
|
|
|
{
|
|
|
|
struct device_node *ports, *port;
|
|
|
|
int err = 0;
|
|
|
|
u32 index;
|
|
|
|
|
|
|
|
ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
|
|
|
|
"ethernet-ports");
|
|
|
|
if (!ports)
|
|
|
|
return NULL;
|
|
|
|
|
2025-02-05 17:12:09 +01:00
|
|
|
for_each_available_child_of_node(ports, port) {
|
2022-10-31 21:32:41 +09:00
|
|
|
err = of_property_read_u32(port, "reg", &index);
|
|
|
|
if (err < 0) {
|
|
|
|
port = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
2025-02-05 17:12:09 +01:00
|
|
|
if (index == rdev->etha->index)
|
2022-10-31 21:32:41 +09:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
of_node_put(ports);
|
|
|
|
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_etha_get_params(struct rswitch_device *rdev)
|
|
|
|
{
|
2023-02-01 22:14:54 +09:00
|
|
|
u32 max_speed;
|
2022-10-31 21:32:41 +09:00
|
|
|
int err;
|
|
|
|
|
2023-02-01 22:14:50 +09:00
|
|
|
if (!rdev->np_port)
|
2023-01-20 09:19:59 +09:00
|
|
|
return 0; /* ignored */
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:50 +09:00
|
|
|
err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
|
2023-02-01 22:14:54 +09:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
|
|
|
|
if (!err) {
|
|
|
|
rdev->etha->speed = max_speed;
|
|
|
|
return 0;
|
|
|
|
}
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:54 +09:00
|
|
|
/* if no "max-speed" property, let's use default speed */
|
2022-10-31 21:32:41 +09:00
|
|
|
switch (rdev->etha->phy_interface) {
|
|
|
|
case PHY_INTERFACE_MODE_MII:
|
|
|
|
rdev->etha->speed = SPEED_100;
|
|
|
|
break;
|
|
|
|
case PHY_INTERFACE_MODE_SGMII:
|
|
|
|
rdev->etha->speed = SPEED_1000;
|
|
|
|
break;
|
|
|
|
case PHY_INTERFACE_MODE_USXGMII:
|
|
|
|
rdev->etha->speed = SPEED_2500;
|
|
|
|
break;
|
|
|
|
default:
|
2023-02-01 22:14:54 +09:00
|
|
|
return -EINVAL;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
2023-02-01 22:14:54 +09:00
|
|
|
return 0;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_mii_register(struct rswitch_device *rdev)
|
|
|
|
{
|
|
|
|
struct device_node *mdio_np;
|
|
|
|
struct mii_bus *mii_bus;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mii_bus = mdiobus_alloc();
|
|
|
|
if (!mii_bus)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
mii_bus->name = "rswitch_mii";
|
|
|
|
sprintf(mii_bus->id, "etha%d", rdev->etha->index);
|
|
|
|
mii_bus->priv = rdev->etha;
|
2023-01-17 00:52:27 +01:00
|
|
|
mii_bus->read_c45 = rswitch_etha_mii_read_c45;
|
|
|
|
mii_bus->write_c45 = rswitch_etha_mii_write_c45;
|
2024-12-16 12:19:57 +05:00
|
|
|
mii_bus->read = rswitch_etha_mii_read_c22;
|
|
|
|
mii_bus->write = rswitch_etha_mii_write_c22;
|
2022-10-31 21:32:41 +09:00
|
|
|
mii_bus->parent = &rdev->priv->pdev->dev;
|
|
|
|
|
2023-02-01 22:14:50 +09:00
|
|
|
mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
|
2022-10-31 21:32:41 +09:00
|
|
|
err = of_mdiobus_register(mii_bus, mdio_np);
|
|
|
|
if (err < 0) {
|
|
|
|
mdiobus_free(mii_bus);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdev->etha->mii = mii_bus;
|
|
|
|
|
|
|
|
out:
|
|
|
|
of_node_put(mdio_np);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_mii_unregister(struct rswitch_device *rdev)
|
|
|
|
{
|
|
|
|
if (rdev->etha->mii) {
|
|
|
|
mdiobus_unregister(rdev->etha->mii);
|
|
|
|
mdiobus_free(rdev->etha->mii);
|
|
|
|
rdev->etha->mii = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
static void rswitch_adjust_link(struct net_device *ndev)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-02-01 22:14:51 +09:00
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
struct phy_device *phydev = ndev->phydev;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
if (phydev->link != rdev->etha->link) {
|
|
|
|
phy_print_status(phydev);
|
2023-02-01 22:14:53 +09:00
|
|
|
if (phydev->link)
|
|
|
|
phy_power_on(rdev->serdes);
|
2023-10-10 21:48:58 +09:00
|
|
|
else if (rdev->serdes->power_count)
|
2023-02-01 22:14:53 +09:00
|
|
|
phy_power_off(rdev->serdes);
|
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
rdev->etha->link = phydev->link;
|
2023-08-07 09:32:30 +09:00
|
|
|
|
|
|
|
if (!rdev->priv->etha_no_runtime_change &&
|
|
|
|
phydev->speed != rdev->etha->speed) {
|
|
|
|
rdev->etha->speed = phydev->speed;
|
|
|
|
|
|
|
|
rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
|
|
|
|
phy_set_speed(rdev->serdes, rdev->etha->speed);
|
|
|
|
}
|
2023-02-01 22:14:51 +09:00
|
|
|
}
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
|
|
|
|
struct phy_device *phydev)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-08-07 09:32:30 +09:00
|
|
|
if (!rdev->priv->etha_no_runtime_change)
|
|
|
|
return;
|
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
switch (rdev->etha->speed) {
|
|
|
|
case SPEED_2500:
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
|
|
|
|
break;
|
|
|
|
case SPEED_1000:
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
|
|
|
|
break;
|
|
|
|
case SPEED_100:
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
phy_set_max_speed(phydev, rdev->etha->speed);
|
|
|
|
}
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
static int rswitch_phy_device_init(struct rswitch_device *rdev)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-02-01 22:14:51 +09:00
|
|
|
struct phy_device *phydev;
|
|
|
|
struct device_node *phy;
|
2023-02-01 22:14:52 +09:00
|
|
|
int err = -ENOENT;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:50 +09:00
|
|
|
if (!rdev->np_port)
|
2022-10-31 21:32:41 +09:00
|
|
|
return -ENODEV;
|
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
|
|
|
|
if (!phy)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2023-02-01 22:14:52 +09:00
|
|
|
/* Set phydev->host_interfaces before calling of_phy_connect() to
|
|
|
|
* configure the PHY with the information of host_interfaces.
|
|
|
|
*/
|
|
|
|
phydev = of_phy_find_device(phy);
|
|
|
|
if (!phydev)
|
|
|
|
goto out;
|
|
|
|
__set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
|
2023-10-17 20:34:02 +09:00
|
|
|
phydev->mac_managed_pm = true;
|
2023-02-01 22:14:52 +09:00
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
|
|
|
|
rdev->etha->phy_interface);
|
|
|
|
if (!phydev)
|
2023-02-01 22:14:52 +09:00
|
|
|
goto out;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
phy_set_max_speed(phydev, SPEED_2500);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
|
|
|
|
rswitch_phy_remove_link_mode(rdev, phydev);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
phy_attached_info(phydev);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:52 +09:00
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
of_node_put(phy);
|
|
|
|
|
|
|
|
return err;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
2023-03-21 14:17:45 +01:00
|
|
|
if (rdev->ndev->phydev)
|
2023-02-01 22:14:51 +09:00
|
|
|
phy_disconnect(rdev->ndev->phydev);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_serdes_set_params(struct rswitch_device *rdev)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2023-02-01 22:14:50 +09:00
|
|
|
err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
|
2022-10-31 21:32:41 +09:00
|
|
|
rdev->etha->phy_interface);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2023-02-01 22:14:50 +09:00
|
|
|
return phy_set_speed(rdev->serdes, rdev->etha->speed);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!rdev->etha->operated) {
|
|
|
|
err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2023-08-07 09:32:30 +09:00
|
|
|
if (rdev->priv->etha_no_runtime_change)
|
|
|
|
rdev->etha->operated = true;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
err = rswitch_mii_register(rdev);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
err = rswitch_phy_device_init(rdev);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (err < 0)
|
2023-02-01 22:14:51 +09:00
|
|
|
goto err_phy_device_init;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:50 +09:00
|
|
|
rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
|
|
|
|
if (IS_ERR(rdev->serdes)) {
|
|
|
|
err = PTR_ERR(rdev->serdes);
|
|
|
|
goto err_serdes_phy_get;
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
err = rswitch_serdes_set_params(rdev);
|
|
|
|
if (err < 0)
|
|
|
|
goto err_serdes_set_params;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_serdes_set_params:
|
2023-02-01 22:14:50 +09:00
|
|
|
err_serdes_phy_get:
|
2023-02-01 22:14:51 +09:00
|
|
|
rswitch_phy_device_deinit(rdev);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-02-01 22:14:51 +09:00
|
|
|
err_phy_device_init:
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_mii_unregister(rdev);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
|
|
|
|
{
|
2023-02-01 22:14:51 +09:00
|
|
|
rswitch_phy_device_deinit(rdev);
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_mii_unregister(rdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_ether_port_init_all(struct rswitch_private *priv)
|
|
|
|
{
|
2023-10-17 20:34:01 +09:00
|
|
|
unsigned int i;
|
|
|
|
int err;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-01-20 09:19:59 +09:00
|
|
|
rswitch_for_each_enabled_port(priv, i) {
|
2022-10-31 21:32:41 +09:00
|
|
|
err = rswitch_ether_port_init_one(priv->rdev[i]);
|
|
|
|
if (err)
|
|
|
|
goto err_init_one;
|
|
|
|
}
|
|
|
|
|
2023-01-20 09:19:59 +09:00
|
|
|
rswitch_for_each_enabled_port(priv, i) {
|
2023-02-01 22:14:50 +09:00
|
|
|
err = phy_init(priv->rdev[i]->serdes);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (err)
|
|
|
|
goto err_serdes;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_serdes:
|
2023-01-20 09:19:59 +09:00
|
|
|
rswitch_for_each_enabled_port_continue_reverse(priv, i)
|
2023-02-01 22:14:50 +09:00
|
|
|
phy_exit(priv->rdev[i]->serdes);
|
2022-10-31 21:32:41 +09:00
|
|
|
i = RSWITCH_NUM_PORTS;
|
|
|
|
|
|
|
|
err_init_one:
|
2023-01-20 09:19:59 +09:00
|
|
|
rswitch_for_each_enabled_port_continue_reverse(priv, i)
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_ether_port_deinit_one(priv->rdev[i]);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
|
|
|
|
{
|
2023-12-08 13:10:23 +09:00
|
|
|
unsigned int i;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2024-12-07 00:21:39 +05:00
|
|
|
rswitch_for_each_enabled_port(priv, i) {
|
2023-02-01 22:14:50 +09:00
|
|
|
phy_exit(priv->rdev[i]->serdes);
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_ether_port_deinit_one(priv->rdev[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_open(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
2023-09-12 10:49:36 +09:00
|
|
|
unsigned long flags;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2024-12-09 16:32:04 +05:00
|
|
|
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
|
|
|
|
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
napi_enable(&rdev->napi);
|
|
|
|
|
2023-09-12 10:49:36 +09:00
|
|
|
spin_lock_irqsave(&rdev->priv->lock, flags);
|
2024-12-09 16:32:04 +05:00
|
|
|
bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
|
|
|
|
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
|
2023-09-12 10:49:36 +09:00
|
|
|
spin_unlock_irqrestore(&rdev->priv->lock, flags);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2024-12-09 16:32:04 +05:00
|
|
|
phy_start(ndev->phydev);
|
2023-03-15 16:04:24 +09:00
|
|
|
|
2024-12-09 16:32:04 +05:00
|
|
|
netif_start_queue(ndev);
|
2023-02-09 17:17:41 +09:00
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
return 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int rswitch_stop(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
2024-12-12 11:25:58 +05:00
|
|
|
struct sk_buff *ts_skb;
|
2023-09-12 10:49:36 +09:00
|
|
|
unsigned long flags;
|
2024-12-12 11:25:58 +05:00
|
|
|
unsigned int tag;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
netif_tx_stop_all_queues(ndev);
|
2024-12-09 16:32:04 +05:00
|
|
|
|
|
|
|
phy_stop(ndev->phydev);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&rdev->priv->lock, flags);
|
|
|
|
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
|
|
|
|
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
|
2023-03-15 16:04:24 +09:00
|
|
|
bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
|
2024-12-09 16:32:04 +05:00
|
|
|
spin_unlock_irqrestore(&rdev->priv->lock, flags);
|
|
|
|
|
|
|
|
napi_disable(&rdev->napi);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-03-15 16:04:24 +09:00
|
|
|
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
|
|
|
|
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
|
2023-02-09 17:17:41 +09:00
|
|
|
|
2024-12-12 11:25:58 +05:00
|
|
|
for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
|
|
|
|
tag < TS_TAGS_PER_PORT;
|
|
|
|
tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
|
|
|
|
ts_skb = xchg(&rdev->ts_skb[tag], NULL);
|
|
|
|
clear_bit(tag, rdev->ts_skb_used);
|
|
|
|
if (ts_skb)
|
|
|
|
dev_kfree_skb(ts_skb);
|
2023-02-09 17:17:41 +09:00
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
return 0;
|
|
|
|
};
|
|
|
|
|
2023-12-08 13:10:26 +09:00
|
|
|
static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct rswitch_ext_desc *desc)
|
|
|
|
{
|
|
|
|
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
|
|
|
|
INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
|
|
|
|
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
|
2024-12-12 11:25:58 +05:00
|
|
|
unsigned int tag;
|
2023-12-08 13:10:26 +09:00
|
|
|
|
2024-12-12 11:25:58 +05:00
|
|
|
tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
|
|
|
|
if (tag == TS_TAGS_PER_PORT)
|
2023-12-08 13:10:26 +09:00
|
|
|
return false;
|
2024-12-12 11:25:58 +05:00
|
|
|
smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
|
|
|
|
rdev->ts_skb[tag] = skb_get(skb);
|
|
|
|
set_bit(tag, rdev->ts_skb_used);
|
2023-12-08 13:10:26 +09:00
|
|
|
|
|
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
2024-12-12 11:25:58 +05:00
|
|
|
desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
|
2023-12-08 13:10:26 +09:00
|
|
|
|
|
|
|
skb_tx_timestamp(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct rswitch_ext_desc *desc,
|
|
|
|
dma_addr_t dma_addr, u16 len, u8 die_dt)
|
|
|
|
{
|
|
|
|
rswitch_desc_set_dptr(&desc->desc, dma_addr);
|
|
|
|
desc->desc.info_ds = cpu_to_le16(len);
|
|
|
|
if (!rswitch_ext_desc_set_info1(rdev, skb, desc))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
dma_wmb();
|
|
|
|
|
|
|
|
desc->desc.die_dt = die_dt;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:29 +09:00
|
|
|
static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index)
|
|
|
|
{
|
|
|
|
if (nr_desc == 1)
|
|
|
|
return DT_FSINGLE | DIE;
|
|
|
|
if (index == 0)
|
|
|
|
return DT_FSTART;
|
|
|
|
if (nr_desc - 1 == index)
|
|
|
|
return DT_FEND | DIE;
|
|
|
|
return DT_FMID;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len)
|
|
|
|
{
|
|
|
|
switch (die_dt & DT_MASK) {
|
|
|
|
case DT_FSINGLE:
|
|
|
|
case DT_FEND:
|
|
|
|
return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE;
|
|
|
|
case DT_FSTART:
|
|
|
|
case DT_FMID:
|
|
|
|
return RSWITCH_DESC_BUF_SIZE;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
net: ethernet: renesas: Fix return type of rswitch_start_xmit()
With clang's kernel control flow integrity (kCFI, CONFIG_CFI_CLANG),
indirect call targets are validated against the expected function
pointer prototype to make sure the call target is valid to help mitigate
ROP attacks. If they are not identical, there is a failure at run time,
which manifests as either a kernel panic or thread getting killed. A
proposed warning in clang aims to catch these at compile time, which
reveals:
drivers/net/ethernet/renesas/rswitch.c:1533:20: error: incompatible function pointer types initializing 'netdev_tx_t (*)(struct sk_buff *, struct net_device *)' (aka 'enum netdev_tx (*)(struct sk_buff *, struct net_device *)') with an expression of type 'int (struct sk_buff *, struct net_device *)' [-Werror,-Wincompatible-function-pointer-types-strict]
.ndo_start_xmit = rswitch_start_xmit,
^~~~~~~~~~~~~~~~~~
1 error generated.
->ndo_start_xmit() in 'struct net_device_ops' expects a return type of
'netdev_tx_t', not 'int'. Adjust the return type of rswitch_start_xmit()
to match the prototype's to resolve the warning and CFI failure.
Link: https://github.com/ClangBuiltLinux/linux/issues/1750
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20221103220032.2142122-1-nathan@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-11-03 15:00:32 -07:00
|
|
|
static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
struct rswitch_gwca_queue *gq = rdev->tx_queue;
|
2023-12-08 13:10:29 +09:00
|
|
|
dma_addr_t dma_addr, dma_addr_orig;
|
2023-11-22 14:11:41 +09:00
|
|
|
netdev_tx_t ret = NETDEV_TX_OK;
|
2022-10-31 21:32:41 +09:00
|
|
|
struct rswitch_ext_desc *desc;
|
2023-12-08 13:10:29 +09:00
|
|
|
unsigned int i, nr_desc;
|
|
|
|
u8 die_dt;
|
|
|
|
u16 len;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-12-08 13:10:29 +09:00
|
|
|
nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
|
|
|
|
if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
|
2022-10-31 21:32:41 +09:00
|
|
|
netif_stop_subqueue(ndev, 0);
|
2023-05-29 16:38:17 +09:00
|
|
|
return NETDEV_TX_BUSY;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
if (skb_put_padto(skb, ETH_ZLEN))
|
|
|
|
return ret;
|
|
|
|
|
2023-12-08 13:10:29 +09:00
|
|
|
dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
|
2023-11-22 14:11:43 +09:00
|
|
|
goto err_kfree;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2024-12-08 14:50:01 +05:00
|
|
|
/* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
|
|
|
|
gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
|
|
|
|
gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
|
2023-12-08 13:10:29 +09:00
|
|
|
|
2024-12-08 14:50:02 +05:00
|
|
|
dma_wmb();
|
2023-12-08 13:10:29 +09:00
|
|
|
|
|
|
|
/* DT_FSTART should be set at last. So, this is reverse order. */
|
|
|
|
for (i = nr_desc; i-- > 0; ) {
|
|
|
|
desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
|
|
|
|
die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
|
|
|
|
dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
|
|
|
|
len = rswitch_ext_desc_get_len(die_dt, skb->len);
|
|
|
|
if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
|
|
|
|
goto err_unmap;
|
|
|
|
}
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-12-08 13:10:29 +09:00
|
|
|
gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
|
|
|
|
|
2023-11-22 14:11:43 +09:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
err_unmap:
|
2024-12-08 14:50:03 +05:00
|
|
|
gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
|
2023-12-08 13:10:29 +09:00
|
|
|
dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
|
2023-11-22 14:11:43 +09:00
|
|
|
|
|
|
|
err_kfree:
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
return &ndev->stats;
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:42 +09:00
|
|
|
static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
struct rcar_gen4_ptp_private *ptp_priv;
|
|
|
|
struct hwtstamp_config config;
|
|
|
|
|
|
|
|
ptp_priv = rdev->priv->ptp_priv;
|
|
|
|
|
|
|
|
config.flags = 0;
|
|
|
|
config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
|
|
|
|
HWTSTAMP_TX_OFF;
|
|
|
|
switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
|
|
|
|
case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
|
|
|
|
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
|
|
|
|
break;
|
|
|
|
case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
|
|
|
|
config.rx_filter = HWTSTAMP_FILTER_ALL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
config.rx_filter = HWTSTAMP_FILTER_NONE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
|
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
|
|
|
|
struct hwtstamp_config config;
|
|
|
|
u32 tstamp_tx_ctrl;
|
|
|
|
|
|
|
|
if (copy_from_user(&config, req->ifr_data, sizeof(config)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (config.flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (config.tx_type) {
|
|
|
|
case HWTSTAMP_TX_OFF:
|
|
|
|
tstamp_tx_ctrl = 0;
|
|
|
|
break;
|
|
|
|
case HWTSTAMP_TX_ON:
|
|
|
|
tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (config.rx_filter) {
|
|
|
|
case HWTSTAMP_FILTER_NONE:
|
|
|
|
tstamp_rx_ctrl = 0;
|
|
|
|
break;
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
|
|
|
|
tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
config.rx_filter = HWTSTAMP_FILTER_ALL;
|
|
|
|
tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
|
|
|
|
rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
|
|
|
|
|
|
|
|
return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
|
|
|
|
{
|
|
|
|
if (!netif_running(ndev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-10-31 21:32:42 +09:00
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGHWTSTAMP:
|
|
|
|
return rswitch_hwstamp_get(ndev, req);
|
|
|
|
case SIOCSHWTSTAMP:
|
|
|
|
return rswitch_hwstamp_set(ndev, req);
|
|
|
|
default:
|
2023-02-01 22:14:51 +09:00
|
|
|
return phy_mii_ioctl(ndev->phydev, req, cmd);
|
2022-10-31 21:32:42 +09:00
|
|
|
}
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct net_device_ops rswitch_netdev_ops = {
|
|
|
|
.ndo_open = rswitch_open,
|
|
|
|
.ndo_stop = rswitch_stop,
|
|
|
|
.ndo_start_xmit = rswitch_start_xmit,
|
|
|
|
.ndo_get_stats = rswitch_get_stats,
|
|
|
|
.ndo_eth_ioctl = rswitch_eth_ioctl,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
|
|
|
};
|
|
|
|
|
2024-07-09 15:53:38 +02:00
|
|
|
static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
|
2022-10-31 21:32:42 +09:00
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
|
|
|
|
|
|
|
info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
|
|
|
|
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
|
|
|
|
SOF_TIMESTAMPING_TX_HARDWARE |
|
|
|
|
SOF_TIMESTAMPING_RX_HARDWARE |
|
|
|
|
SOF_TIMESTAMPING_RAW_HARDWARE;
|
|
|
|
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
|
|
|
|
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ethtool_ops rswitch_ethtool_ops = {
|
|
|
|
.get_ts_info = rswitch_get_ts_info,
|
2023-08-07 09:32:31 +09:00
|
|
|
.get_link_ksettings = phy_ethtool_get_link_ksettings,
|
|
|
|
.set_link_ksettings = phy_ethtool_set_link_ksettings,
|
2022-10-31 21:32:42 +09:00
|
|
|
};
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
static const struct of_device_id renesas_eth_sw_of_table[] = {
|
|
|
|
{ .compatible = "renesas,r8a779f0-ether-switch", },
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_etha *etha = &priv->etha[index];
|
|
|
|
|
|
|
|
memset(etha, 0, sizeof(*etha));
|
|
|
|
etha->index = index;
|
|
|
|
etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
|
|
|
|
etha->coma_addr = priv->addr;
|
2023-09-26 21:30:54 +09:00
|
|
|
|
|
|
|
/* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
|
|
|
|
* Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
|
|
|
|
* both the numerator and the denominator by 10.
|
|
|
|
*/
|
|
|
|
etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct platform_device *pdev = priv->pdev;
|
|
|
|
struct rswitch_device *rdev;
|
|
|
|
struct net_device *ndev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (index >= RSWITCH_NUM_PORTS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
|
|
|
|
if (!ndev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(ndev, &pdev->dev);
|
|
|
|
ether_setup(ndev);
|
|
|
|
|
|
|
|
rdev = netdev_priv(ndev);
|
|
|
|
rdev->ndev = ndev;
|
|
|
|
rdev->priv = priv;
|
|
|
|
priv->rdev[index] = rdev;
|
|
|
|
rdev->port = index;
|
|
|
|
rdev->etha = &priv->etha[index];
|
|
|
|
rdev->addr = priv->addr;
|
|
|
|
|
|
|
|
ndev->base_addr = (unsigned long)rdev->addr;
|
|
|
|
snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
|
|
|
|
ndev->netdev_ops = &rswitch_netdev_ops;
|
2022-10-31 21:32:42 +09:00
|
|
|
ndev->ethtool_ops = &rswitch_ethtool_ops;
|
2023-12-08 13:10:30 +09:00
|
|
|
ndev->max_mtu = RSWITCH_MAX_MTU;
|
|
|
|
ndev->min_mtu = ETH_MIN_MTU;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
netif_napi_add(ndev, &rdev->napi, rswitch_poll);
|
|
|
|
|
2023-02-01 22:14:50 +09:00
|
|
|
rdev->np_port = rswitch_get_port_node(rdev);
|
|
|
|
rdev->disabled = !rdev->np_port;
|
|
|
|
err = of_get_ethdev_address(rdev->np_port, ndev);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (err) {
|
|
|
|
if (is_valid_ether_addr(rdev->etha->mac_addr))
|
|
|
|
eth_hw_addr_set(ndev, rdev->etha->mac_addr);
|
|
|
|
else
|
|
|
|
eth_hw_addr_random(ndev);
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rswitch_etha_get_params(rdev);
|
|
|
|
if (err < 0)
|
|
|
|
goto out_get_params;
|
|
|
|
|
|
|
|
err = rswitch_rxdmac_alloc(ndev);
|
|
|
|
if (err < 0)
|
|
|
|
goto out_rxdmac;
|
|
|
|
|
|
|
|
err = rswitch_txdmac_alloc(ndev);
|
|
|
|
if (err < 0)
|
|
|
|
goto out_txdmac;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_txdmac:
|
|
|
|
rswitch_rxdmac_free(ndev);
|
|
|
|
|
|
|
|
out_rxdmac:
|
|
|
|
out_get_params:
|
2024-12-08 14:50:04 +05:00
|
|
|
of_node_put(rdev->np_port);
|
2022-10-31 21:32:41 +09:00
|
|
|
netif_napi_del(&rdev->napi);
|
|
|
|
free_netdev(ndev);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-12-08 13:10:23 +09:00
|
|
|
static void rswitch_device_free(struct rswitch_private *priv, unsigned int index)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_device *rdev = priv->rdev[index];
|
|
|
|
struct net_device *ndev = rdev->ndev;
|
|
|
|
|
|
|
|
rswitch_txdmac_free(ndev);
|
|
|
|
rswitch_rxdmac_free(ndev);
|
2024-12-08 14:50:04 +05:00
|
|
|
of_node_put(rdev->np_port);
|
2022-10-31 21:32:41 +09:00
|
|
|
netif_napi_del(&rdev->napi);
|
|
|
|
free_netdev(ndev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rswitch_init(struct rswitch_private *priv)
|
|
|
|
{
|
2023-10-17 20:34:01 +09:00
|
|
|
unsigned int i;
|
|
|
|
int err;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
|
|
|
|
rswitch_etha_init(priv, i);
|
|
|
|
|
|
|
|
rswitch_clock_enable(priv);
|
|
|
|
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
|
|
|
|
rswitch_etha_read_mac_address(&priv->etha[i]);
|
|
|
|
|
|
|
|
rswitch_reset(priv);
|
|
|
|
|
|
|
|
rswitch_clock_enable(priv);
|
|
|
|
rswitch_top_init(priv);
|
|
|
|
err = rswitch_bpool_config(priv);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2023-06-08 11:20:07 +09:00
|
|
|
rswitch_coma_init(priv);
|
|
|
|
|
2023-02-09 17:17:39 +09:00
|
|
|
err = rswitch_gwca_linkfix_alloc(priv);
|
2022-10-31 21:32:41 +09:00
|
|
|
if (err < 0)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2023-02-09 17:17:41 +09:00
|
|
|
err = rswitch_gwca_ts_queue_alloc(priv);
|
|
|
|
if (err < 0)
|
|
|
|
goto err_ts_queue_alloc;
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
|
|
|
|
err = rswitch_device_alloc(priv, i);
|
|
|
|
if (err < 0) {
|
2023-10-17 20:34:01 +09:00
|
|
|
for (; i-- > 0; )
|
2022-10-31 21:32:41 +09:00
|
|
|
rswitch_device_free(priv, i);
|
|
|
|
goto err_device_alloc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rswitch_fwd_init(priv);
|
|
|
|
|
2023-11-21 16:53:04 +01:00
|
|
|
err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
|
2023-11-21 16:53:05 +01:00
|
|
|
clk_get_rate(priv->clk));
|
2022-10-31 21:32:42 +09:00
|
|
|
if (err < 0)
|
|
|
|
goto err_ptp_register;
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
err = rswitch_gwca_request_irqs(priv);
|
|
|
|
if (err < 0)
|
|
|
|
goto err_gwca_request_irq;
|
|
|
|
|
2023-02-09 17:17:41 +09:00
|
|
|
err = rswitch_gwca_ts_request_irqs(priv);
|
|
|
|
if (err < 0)
|
|
|
|
goto err_gwca_ts_request_irq;
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
err = rswitch_gwca_hw_init(priv);
|
|
|
|
if (err < 0)
|
|
|
|
goto err_gwca_hw_init;
|
|
|
|
|
|
|
|
err = rswitch_ether_port_init_all(priv);
|
|
|
|
if (err)
|
|
|
|
goto err_ether_port_init_all;
|
|
|
|
|
2023-01-20 09:19:59 +09:00
|
|
|
rswitch_for_each_enabled_port(priv, i) {
|
2022-10-31 21:32:41 +09:00
|
|
|
err = register_netdev(priv->rdev[i]->ndev);
|
|
|
|
if (err) {
|
2023-01-20 09:19:59 +09:00
|
|
|
rswitch_for_each_enabled_port_continue_reverse(priv, i)
|
2022-10-31 21:32:41 +09:00
|
|
|
unregister_netdev(priv->rdev[i]->ndev);
|
|
|
|
goto err_register_netdev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-20 09:19:59 +09:00
|
|
|
rswitch_for_each_enabled_port(priv, i)
|
2022-11-18 09:27:24 +09:00
|
|
|
netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
|
2022-10-31 21:32:41 +09:00
|
|
|
priv->rdev[i]->ndev->dev_addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_register_netdev:
|
|
|
|
rswitch_ether_port_deinit_all(priv);
|
|
|
|
|
|
|
|
err_ether_port_init_all:
|
|
|
|
rswitch_gwca_hw_deinit(priv);
|
|
|
|
|
|
|
|
err_gwca_hw_init:
|
2023-02-09 17:17:41 +09:00
|
|
|
err_gwca_ts_request_irq:
|
2022-10-31 21:32:41 +09:00
|
|
|
err_gwca_request_irq:
|
2022-10-31 21:32:42 +09:00
|
|
|
rcar_gen4_ptp_unregister(priv->ptp_priv);
|
|
|
|
|
|
|
|
err_ptp_register:
|
2022-10-31 21:32:41 +09:00
|
|
|
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
|
|
|
|
rswitch_device_free(priv, i);
|
|
|
|
|
|
|
|
err_device_alloc:
|
2023-02-09 17:17:41 +09:00
|
|
|
rswitch_gwca_ts_queue_free(priv);
|
|
|
|
|
|
|
|
err_ts_queue_alloc:
|
2023-02-09 17:17:39 +09:00
|
|
|
rswitch_gwca_linkfix_free(priv);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-08-07 09:32:30 +09:00
|
|
|
static const struct soc_device_attribute rswitch_soc_no_speed_change[] = {
|
|
|
|
{ .soc_id = "r8a779f0", .revision = "ES1.0" },
|
|
|
|
{ /* Sentinel */ }
|
|
|
|
};
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
static int renesas_eth_sw_probe(struct platform_device *pdev)
|
|
|
|
{
|
2023-08-07 09:32:30 +09:00
|
|
|
const struct soc_device_attribute *attr;
|
2022-10-31 21:32:41 +09:00
|
|
|
struct rswitch_private *priv;
|
|
|
|
struct resource *res;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
|
|
|
|
if (!res) {
|
|
|
|
dev_err(&pdev->dev, "invalid resource\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
|
|
|
if (!priv)
|
|
|
|
return -ENOMEM;
|
2023-09-12 10:49:36 +09:00
|
|
|
spin_lock_init(&priv->lock);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-09-26 21:30:54 +09:00
|
|
|
priv->clk = devm_clk_get(&pdev->dev, NULL);
|
|
|
|
if (IS_ERR(priv->clk))
|
|
|
|
return PTR_ERR(priv->clk);
|
|
|
|
|
2023-08-07 09:32:30 +09:00
|
|
|
attr = soc_device_match(rswitch_soc_no_speed_change);
|
|
|
|
if (attr)
|
|
|
|
priv->etha_no_runtime_change = true;
|
|
|
|
|
2022-10-31 21:32:42 +09:00
|
|
|
priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
|
|
|
|
if (!priv->ptp_priv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
platform_set_drvdata(pdev, priv);
|
|
|
|
priv->pdev = pdev;
|
|
|
|
priv->addr = devm_ioremap_resource(&pdev->dev, res);
|
|
|
|
if (IS_ERR(priv->addr))
|
|
|
|
return PTR_ERR(priv->addr);
|
|
|
|
|
2022-10-31 21:32:42 +09:00
|
|
|
priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
|
|
|
|
if (ret < 0) {
|
|
|
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->gwca.index = AGENT_INDEX_GWCA;
|
|
|
|
priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
|
|
|
|
RSWITCH_MAX_NUM_QUEUES);
|
|
|
|
priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
|
|
|
|
sizeof(*priv->gwca.queues), GFP_KERNEL);
|
|
|
|
if (!priv->gwca.queues)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pm_runtime_enable(&pdev->dev);
|
|
|
|
pm_runtime_get_sync(&pdev->dev);
|
|
|
|
|
|
|
|
ret = rswitch_init(priv);
|
2022-12-26 16:13:27 +09:00
|
|
|
if (ret < 0) {
|
|
|
|
pm_runtime_put(&pdev->dev);
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
|
|
|
return ret;
|
|
|
|
}
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
device_set_wakeup_capable(&pdev->dev, 1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rswitch_deinit(struct rswitch_private *priv)
|
|
|
|
{
|
2023-10-17 20:34:01 +09:00
|
|
|
unsigned int i;
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
rswitch_gwca_hw_deinit(priv);
|
2022-10-31 21:32:42 +09:00
|
|
|
rcar_gen4_ptp_unregister(priv->ptp_priv);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
2023-10-10 21:48:57 +09:00
|
|
|
rswitch_for_each_enabled_port(priv, i) {
|
2022-10-31 21:32:41 +09:00
|
|
|
struct rswitch_device *rdev = priv->rdev[i];
|
|
|
|
|
|
|
|
unregister_netdev(rdev->ndev);
|
2023-10-10 21:48:57 +09:00
|
|
|
rswitch_ether_port_deinit_one(rdev);
|
|
|
|
phy_exit(priv->rdev[i]->serdes);
|
2022-10-31 21:32:41 +09:00
|
|
|
}
|
|
|
|
|
2023-10-10 21:48:57 +09:00
|
|
|
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
|
|
|
|
rswitch_device_free(priv, i);
|
|
|
|
|
2023-02-09 17:17:41 +09:00
|
|
|
rswitch_gwca_ts_queue_free(priv);
|
2023-02-09 17:17:39 +09:00
|
|
|
rswitch_gwca_linkfix_free(priv);
|
2022-10-31 21:32:41 +09:00
|
|
|
|
|
|
|
rswitch_clock_disable(priv);
|
|
|
|
}
|
|
|
|
|
2023-09-18 22:42:13 +02:00
|
|
|
static void renesas_eth_sw_remove(struct platform_device *pdev)
|
2022-10-31 21:32:41 +09:00
|
|
|
{
|
|
|
|
struct rswitch_private *priv = platform_get_drvdata(pdev);
|
|
|
|
|
|
|
|
rswitch_deinit(priv);
|
|
|
|
|
|
|
|
pm_runtime_put(&pdev->dev);
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
|
|
|
|
|
|
|
platform_set_drvdata(pdev, NULL);
|
|
|
|
}
|
|
|
|
|
2023-10-17 20:34:02 +09:00
|
|
|
static int renesas_eth_sw_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
struct rswitch_private *priv = dev_get_drvdata(dev);
|
|
|
|
struct net_device *ndev;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
rswitch_for_each_enabled_port(priv, i) {
|
|
|
|
ndev = priv->rdev[i]->ndev;
|
|
|
|
if (netif_running(ndev)) {
|
|
|
|
netif_device_detach(ndev);
|
|
|
|
rswitch_stop(ndev);
|
|
|
|
}
|
|
|
|
if (priv->rdev[i]->serdes->init_count)
|
|
|
|
phy_exit(priv->rdev[i]->serdes);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int renesas_eth_sw_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
struct rswitch_private *priv = dev_get_drvdata(dev);
|
|
|
|
struct net_device *ndev;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
rswitch_for_each_enabled_port(priv, i) {
|
|
|
|
phy_init(priv->rdev[i]->serdes);
|
|
|
|
ndev = priv->rdev[i]->ndev;
|
|
|
|
if (netif_running(ndev)) {
|
|
|
|
rswitch_open(ndev);
|
|
|
|
netif_device_attach(ndev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
|
|
|
|
renesas_eth_sw_resume);
|
|
|
|
|
2022-10-31 21:32:41 +09:00
|
|
|
static struct platform_driver renesas_eth_sw_driver_platform = {
|
|
|
|
.probe = renesas_eth_sw_probe,
|
2024-10-03 12:01:03 +02:00
|
|
|
.remove = renesas_eth_sw_remove,
|
2022-10-31 21:32:41 +09:00
|
|
|
.driver = {
|
|
|
|
.name = "renesas_eth_sw",
|
2023-10-17 20:34:02 +09:00
|
|
|
.pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
|
2022-10-31 21:32:41 +09:00
|
|
|
.of_match_table = renesas_eth_sw_of_table,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
module_platform_driver(renesas_eth_sw_driver_platform);
|
|
|
|
MODULE_AUTHOR("Yoshihiro Shimoda");
|
|
|
|
MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
|
|
|
|
MODULE_LICENSE("GPL");
|