2019-04-26 20:12:23 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2012-03-18 20:17:54 +00:00
|
|
|
/*
|
|
|
|
* Texas Instruments Ethernet Switch Driver
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 Texas Instruments
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <linux/irqreturn.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/netdevice.h>
|
2012-10-29 08:45:20 +00:00
|
|
|
#include <linux/net_tstamp.h>
|
2012-03-18 20:17:54 +00:00
|
|
|
#include <linux/phy.h>
|
2018-11-25 18:15:25 -06:00
|
|
|
#include <linux/phy/phy.h>
|
2012-03-18 20:17:54 +00:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/delay.h>
|
2012-07-17 08:09:50 +00:00
|
|
|
#include <linux/pm_runtime.h>
|
2018-05-30 23:51:54 +02:00
|
|
|
#include <linux/gpio/consumer.h>
|
2012-07-30 10:17:14 +00:00
|
|
|
#include <linux/of.h>
|
2015-10-17 06:04:35 +02:00
|
|
|
#include <linux/of_mdio.h>
|
2012-07-30 10:17:14 +00:00
|
|
|
#include <linux/of_net.h>
|
2023-04-10 18:27:19 -05:00
|
|
|
#include <linux/of_platform.h>
|
2013-02-05 08:26:48 +00:00
|
|
|
#include <linux/if_vlan.h>
|
2018-04-05 16:25:34 -07:00
|
|
|
#include <linux/kmemleak.h>
|
2018-05-17 01:21:45 +03:00
|
|
|
#include <linux/sys_soc.h>
|
2023-08-04 20:05:24 +02:00
|
|
|
#include <net/page_pool/helpers.h>
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
#include <linux/bpf.h>
|
|
|
|
#include <linux/bpf_trace.h>
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2013-06-06 23:45:14 +05:30
|
|
|
#include <linux/pinctrl/consumer.h>
|
2018-07-24 00:26:31 +03:00
|
|
|
#include <net/pkt_cls.h>
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2013-08-19 17:47:40 +05:30
|
|
|
#include "cpsw.h"
|
2012-03-18 20:17:54 +00:00
|
|
|
#include "cpsw_ale.h"
|
2019-04-26 20:12:37 +03:00
|
|
|
#include "cpsw_priv.h"
|
2019-04-26 20:12:41 +03:00
|
|
|
#include "cpsw_sl.h"
|
2012-10-29 08:45:20 +00:00
|
|
|
#include "cpts.h"
|
2012-03-18 20:17:54 +00:00
|
|
|
#include "davinci_cpdma.h"
|
|
|
|
|
net: ethernet: ti: cpsw: add CBS Qdisc offload
The cpsw has up to 4 FIFOs per port and upper 3 FIFOs can feed rate
limited queue with shaping. In order to set and enable shaping for
those 3 FIFOs queues the network device with CBS qdisc attached is
needed. The CBS configuration is added for dual-emac/single port mode
only, but potentially can be used in switch mode also, based on
switchdev for instance.
Despite the FIFO shapers can work w/o cpdma level shapers the base
usage must be in combine with cpdma level shapers as described in TRM,
that are set as maximum rates for interface queues with sysfs.
One of the possible configuration with txq shapers and CBS shapers:
Configured with echo RATE >
/sys/class/net/eth0/queues/tx-0/tx_maxrate
/---------------------------------------------------
/
/ cpdma level shapers
+----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+
| c7 | | c6 | | c5 | | c4 | | c3 | | c2 | | c1 | | c0 |
\ / \ / \ / \ / \ / \ / \ / \ /
\ / \ / \ / \ / \ / \ / \ / \ /
\/ \/ \/ \/ \/ \/ \/ \/
+---------|------|------|------|-------------------------------------+
| +----+ | | +---+ |
| | +----+ | | |
| v v v v |
| +----+ +----+ +----+ +----+ p p+----+ +----+ +----+ +----+ |
| | | | | | | | | o o| | | | | | | | |
| | f3 | | f2 | | f1 | | f0 | r CPSW r| f3 | | f2 | | f1 | | f0 | |
| | | | | | | | | t t| | | | | | | | |
| \ / \ / \ / \ / 0 1\ / \ / \ / \ / |
| \ X \ / \ / \ / \ / \ / \ / \ / |
| \/ \ \/ \/ \/ \/ \/ \/ \/ |
+-------\------------------------------------------------------------+
\
\ FIFO shaper, set with CBS offload added in this patch,
\ FIFO0 cannot be rate limited
------------------------------------------------------
CBS shaper configuration is supposed to be used with root MQPRIO Qdisc
offload allowing to add sk_prio->tc->txq maps that direct traffic to
appropriate tx queue and maps L2 priority to FIFO shaper.
The CBS shaper is intended to be used for AVB where L2 priority
(pcp field) is used to differentiate class of traffic. So additionally
vlan needs to be created with appropriate egress sk_prio->l2 prio map.
If CBS has several tx queues assigned to it, the sum of their
bandwidth has not overlap bandwidth set for CBS. It's recomended the
CBS bandwidth to be a little bit more.
The CBS shaper is configured with CBS qdisc offload interface using tc
tool from iproute2 packet.
For instance:
$ tc qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \
map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 1
$ tc -g class show dev eth0
+---(100:ffe2) mqprio
| +---(100:3) mqprio
| +---(100:4) mqprio
|
+---(100:ffe1) mqprio
| +---(100:2) mqprio
|
+---(100:ffe0) mqprio
+---(100:1) mqprio
$ tc qdisc add dev eth0 parent 100:1 cbs locredit -1440 \
hicredit 60 sendslope -960000 idleslope 40000 offload 1
$ tc qdisc add dev eth0 parent 100:2 cbs locredit -1470 \
hicredit 62 sendslope -980000 idleslope 20000 offload 1
The above code set CBS shapers for tc0 and tc1, for that txq0 and
txq1 is used. Pay attention, the real set bandwidth can differ a bit
due to discreteness of configuration parameters.
Here parameters like locredit, hicredit and sendslope are ignored
internally and are supposed to be set with assumption that maximum
frame size for frame - 1500.
It's supposed that interface speed is not changed while reconnection,
not always is true, so inform user in case speed of interface was
changed, as it can impact on dependent shapers configuration.
For more examples see Documentation.
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-24 00:26:32 +03:00
|
|
|
#include <net/pkt_sched.h>
|
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
static int debug_level;
|
|
|
|
module_param(debug_level, int, 0);
|
|
|
|
MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
|
|
|
|
|
|
|
|
static int ale_ageout = 10;
|
|
|
|
module_param(ale_ageout, int, 0);
|
|
|
|
MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
|
|
|
|
|
|
|
|
static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
|
|
|
|
module_param(rx_packet_max, int, 0);
|
|
|
|
MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
|
|
|
|
|
2017-01-06 14:07:33 -06:00
|
|
|
static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
|
|
|
|
module_param(descs_pool_size, int, 0444);
|
|
|
|
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
|
|
|
|
|
2013-02-11 09:52:20 +00:00
|
|
|
#define for_each_slave(priv, func, arg...) \
|
|
|
|
do { \
|
2013-04-24 08:48:24 +00:00
|
|
|
struct cpsw_slave *slave; \
|
2016-08-10 02:22:42 +03:00
|
|
|
struct cpsw_common *cpsw = (priv)->cpsw; \
|
2013-04-24 08:48:24 +00:00
|
|
|
int n; \
|
2016-08-10 02:22:42 +03:00
|
|
|
if (cpsw->data.dual_emac) \
|
|
|
|
(func)((cpsw)->slaves + priv->emac_port, ##arg);\
|
2013-02-11 09:52:20 +00:00
|
|
|
else \
|
2016-08-10 02:22:42 +03:00
|
|
|
for (n = cpsw->data.slaves, \
|
|
|
|
slave = cpsw->slaves; \
|
2013-04-24 08:48:24 +00:00
|
|
|
n; n--) \
|
|
|
|
(func)(slave++, ##arg); \
|
2013-02-11 09:52:20 +00:00
|
|
|
} while (0)
|
|
|
|
|
2019-11-20 00:19:16 +02:00
|
|
|
static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
|
|
|
|
struct cpsw_priv *priv)
|
|
|
|
{
|
|
|
|
return cpsw->data.dual_emac ? priv->emac_port : cpsw->data.active_slave;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpsw_get_slave_port(u32 slave_num)
|
|
|
|
{
|
|
|
|
return slave_num + 1;
|
|
|
|
}
|
|
|
|
|
2018-11-08 22:27:57 +02:00
|
|
|
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
|
|
|
|
__be16 proto, u16 vid);
|
|
|
|
|
2014-01-23 00:03:12 +05:30
|
|
|
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
|
|
|
|
{
|
2016-08-10 02:22:44 +03:00
|
|
|
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
|
|
|
|
struct cpsw_ale *ale = cpsw->ale;
|
2014-01-23 00:03:12 +05:30
|
|
|
int i;
|
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
if (cpsw->data.dual_emac) {
|
2014-01-23 00:03:12 +05:30
|
|
|
bool flag = false;
|
|
|
|
|
|
|
|
/* Enabling promiscuous mode for one interface will be
|
|
|
|
* common for both the interface as the interface shares
|
|
|
|
* the same hardware resource.
|
|
|
|
*/
|
2016-08-10 02:22:42 +03:00
|
|
|
for (i = 0; i < cpsw->data.slaves; i++)
|
|
|
|
if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
|
2014-01-23 00:03:12 +05:30
|
|
|
flag = true;
|
|
|
|
|
|
|
|
if (!enable && flag) {
|
|
|
|
enable = true;
|
|
|
|
dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (enable) {
|
|
|
|
/* Enable Bypass */
|
|
|
|
cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
|
|
|
|
|
|
|
|
dev_dbg(&ndev->dev, "promiscuity enabled\n");
|
|
|
|
} else {
|
|
|
|
/* Disable Bypass */
|
|
|
|
cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
|
|
|
|
dev_dbg(&ndev->dev, "promiscuity disabled\n");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (enable) {
|
|
|
|
unsigned long timeout = jiffies + HZ;
|
|
|
|
|
2014-10-31 13:28:54 -04:00
|
|
|
/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
|
2016-08-10 02:22:42 +03:00
|
|
|
for (i = 0; i <= cpsw->data.slaves; i++) {
|
2014-01-23 00:03:12 +05:30
|
|
|
cpsw_ale_control_set(ale, i,
|
|
|
|
ALE_PORT_NOLEARN, 1);
|
|
|
|
cpsw_ale_control_set(ale, i,
|
|
|
|
ALE_PORT_NO_SA_UPDATE, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear All Untouched entries */
|
|
|
|
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
|
|
|
|
do {
|
|
|
|
cpu_relax();
|
|
|
|
if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
|
|
|
|
break;
|
|
|
|
} while (time_after(timeout, jiffies));
|
|
|
|
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
|
|
|
|
|
|
|
|
/* Clear all mcast from ALE */
|
drivers: net: cpsw: fix port_mask parameters in ale calls
ALE APIs expect to receive port masks as input values for arguments
port_mask, untag, reg_mcast, unreg_mcast. But there are few places in
code where port masks are passed left-shifted by cpsw_priv->host_port,
like below:
cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
ALE_ALL_PORTS << priv->host_port,
ALE_ALL_PORTS << priv->host_port, 0, 0);
and cpsw is still working just because priv->host_port == 0
and has never ever been changed.
Hence, fix port_mask parameters in ALE APIs calls and drop
"<< priv->host_port" from all places where it's used to
shift valid port mask.
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-07 15:16:43 +03:00
|
|
|
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
|
2018-11-08 22:27:56 +02:00
|
|
|
__hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
|
2014-01-23 00:03:12 +05:30
|
|
|
|
|
|
|
/* Flood All Unicast Packets to Host port */
|
|
|
|
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
|
|
|
|
dev_dbg(&ndev->dev, "promiscuity enabled\n");
|
|
|
|
} else {
|
2014-10-31 13:28:54 -04:00
|
|
|
/* Don't Flood All Unicast Packets to Host port */
|
2014-01-23 00:03:12 +05:30
|
|
|
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
|
|
|
|
|
2014-10-31 13:28:54 -04:00
|
|
|
/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
|
2016-08-10 02:22:42 +03:00
|
|
|
for (i = 0; i <= cpsw->data.slaves; i++) {
|
2014-01-23 00:03:12 +05:30
|
|
|
cpsw_ale_control_set(ale, i,
|
|
|
|
ALE_PORT_NOLEARN, 0);
|
|
|
|
cpsw_ale_control_set(ale, i,
|
|
|
|
ALE_PORT_NO_SA_UPDATE, 0);
|
|
|
|
}
|
|
|
|
dev_dbg(&ndev->dev, "promiscuity disabled\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-08 22:27:56 +02:00
|
|
|
/**
|
|
|
|
* cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
|
|
|
|
* if it's not deleted
|
|
|
|
* @ndev: device to sync
|
|
|
|
* @addr: address to be added or deleted
|
|
|
|
* @vid: vlan id, if vid < 0 set/unset address for real device
|
|
|
|
* @add: add address if the flag is set or remove otherwise
|
|
|
|
*/
|
|
|
|
static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
|
|
|
|
int vid, int add)
|
2018-10-12 18:28:15 +03:00
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
2018-11-08 22:27:56 +02:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
|
|
|
int mask, flags, ret;
|
|
|
|
|
|
|
|
if (vid < 0) {
|
|
|
|
if (cpsw->data.dual_emac)
|
|
|
|
vid = cpsw->slaves[priv->emac_port].port_vlan;
|
|
|
|
else
|
|
|
|
vid = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
|
|
|
|
flags = vid ? ALE_VLAN : 0;
|
|
|
|
|
|
|
|
if (add)
|
|
|
|
ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
|
|
|
|
else
|
|
|
|
ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
|
|
|
|
{
|
|
|
|
struct addr_sync_ctx *sync_ctx = ctx;
|
|
|
|
struct netdev_hw_addr *ha;
|
|
|
|
int found = 0, ret = 0;
|
|
|
|
|
|
|
|
if (!vdev || !(vdev->flags & IFF_UP))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* vlan address is relevant if its sync_cnt != 0 */
|
|
|
|
netdev_for_each_mc_addr(ha, vdev) {
|
|
|
|
if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
|
|
|
|
found = ha->sync_cnt;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found)
|
|
|
|
sync_ctx->consumed++;
|
|
|
|
|
|
|
|
if (sync_ctx->flush) {
|
|
|
|
if (!found)
|
|
|
|
cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found)
|
|
|
|
ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
|
|
|
|
{
|
|
|
|
struct addr_sync_ctx sync_ctx;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
sync_ctx.consumed = 0;
|
|
|
|
sync_ctx.addr = addr;
|
|
|
|
sync_ctx.ndev = ndev;
|
|
|
|
sync_ctx.flush = 0;
|
|
|
|
|
|
|
|
ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
|
|
|
|
if (sync_ctx.consumed < num && !ret)
|
|
|
|
ret = cpsw_set_mc(ndev, addr, -1, 1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
|
|
|
|
{
|
|
|
|
struct addr_sync_ctx sync_ctx;
|
|
|
|
|
|
|
|
sync_ctx.consumed = 0;
|
|
|
|
sync_ctx.addr = addr;
|
|
|
|
sync_ctx.ndev = ndev;
|
|
|
|
sync_ctx.flush = 1;
|
|
|
|
|
|
|
|
vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
|
|
|
|
if (sync_ctx.consumed == num)
|
|
|
|
cpsw_set_mc(ndev, addr, -1, 0);
|
2018-10-12 18:28:15 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-08 22:27:56 +02:00
|
|
|
static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
|
2012-10-29 08:45:11 +00:00
|
|
|
{
|
2018-11-08 22:27:56 +02:00
|
|
|
struct addr_sync_ctx *sync_ctx = ctx;
|
|
|
|
struct netdev_hw_addr *ha;
|
|
|
|
int found = 0;
|
2015-01-13 17:35:49 +05:30
|
|
|
|
2018-11-08 22:27:56 +02:00
|
|
|
if (!vdev || !(vdev->flags & IFF_UP))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* vlan address is relevant if its sync_cnt != 0 */
|
|
|
|
netdev_for_each_mc_addr(ha, vdev) {
|
|
|
|
if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
|
|
|
|
found = ha->sync_cnt;
|
|
|
|
break;
|
|
|
|
}
|
2018-10-12 18:28:15 +03:00
|
|
|
}
|
|
|
|
|
2018-11-08 22:27:56 +02:00
|
|
|
if (!found)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
sync_ctx->consumed++;
|
|
|
|
cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
|
|
|
|
{
|
|
|
|
struct addr_sync_ctx sync_ctx;
|
|
|
|
|
|
|
|
sync_ctx.addr = addr;
|
|
|
|
sync_ctx.ndev = ndev;
|
|
|
|
sync_ctx.consumed = 0;
|
|
|
|
|
|
|
|
vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
|
|
|
|
if (sync_ctx.consumed < num)
|
|
|
|
cpsw_set_mc(ndev, addr, -1, 0);
|
|
|
|
|
2018-10-12 18:28:15 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
|
|
|
|
{
|
2019-04-26 20:12:33 +03:00
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
|
|
|
int slave_port = -1;
|
|
|
|
|
|
|
|
if (cpsw->data.dual_emac)
|
|
|
|
slave_port = priv->emac_port + 1;
|
2012-10-29 08:45:11 +00:00
|
|
|
|
|
|
|
if (ndev->flags & IFF_PROMISC) {
|
|
|
|
/* Enable promiscuous mode */
|
2014-01-23 00:03:12 +05:30
|
|
|
cpsw_set_promiscious(ndev, true);
|
2019-04-26 20:12:33 +03:00
|
|
|
cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
|
2012-10-29 08:45:11 +00:00
|
|
|
return;
|
2014-01-23 00:03:12 +05:30
|
|
|
} else {
|
|
|
|
/* Disable promiscuous mode */
|
|
|
|
cpsw_set_promiscious(ndev, false);
|
2012-10-29 08:45:11 +00:00
|
|
|
}
|
|
|
|
|
2014-10-31 13:38:52 -04:00
|
|
|
/* Restore allmulti on vlans if necessary */
|
2019-04-26 20:12:33 +03:00
|
|
|
cpsw_ale_set_allmulti(cpsw->ale,
|
|
|
|
ndev->flags & IFF_ALLMULTI, slave_port);
|
2012-10-29 08:45:11 +00:00
|
|
|
|
2018-11-08 22:27:56 +02:00
|
|
|
/* add/remove mcast address either for real netdev or for vlan */
|
|
|
|
__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
|
|
|
|
cpsw_del_mc_addr);
|
2012-10-29 08:45:11 +00:00
|
|
|
}
|
|
|
|
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
|
|
|
|
{
|
2022-04-08 16:48:38 +03:00
|
|
|
len += CPSW_HEADROOM_NA;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
|
|
|
|
|
|
|
return SKB_DATA_ALIGN(len);
|
|
|
|
}
|
|
|
|
|
2013-12-11 15:58:07 -08:00
|
|
|
static void cpsw_rx_handler(void *token, int len, int status)
|
2012-03-18 20:17:54 +00:00
|
|
|
{
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
struct page *new_page, *page = token;
|
|
|
|
void *pa = page_address(page);
|
|
|
|
struct cpsw_meta_xdp *xmeta = pa + CPSW_XMETA_OFFSET;
|
|
|
|
struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev);
|
|
|
|
int pkt_size = cpsw->rx_packet_max;
|
|
|
|
int ret = 0, port, ch = xmeta->ch;
|
2022-01-18 11:22:04 +01:00
|
|
|
int headroom = CPSW_HEADROOM_NA;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
struct net_device *ndev = xmeta->ndev;
|
2025-03-18 12:46:11 +01:00
|
|
|
u32 metasize = 0;
|
2018-11-12 16:00:22 +02:00
|
|
|
struct cpsw_priv *priv;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
struct page_pool *pool;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct xdp_buff xdp;
|
|
|
|
dma_addr_t dma;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
if (cpsw->data.dual_emac && status >= 0) {
|
2018-07-31 01:05:39 +03:00
|
|
|
port = CPDMA_RX_SOURCE_PORT(status);
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
if (port)
|
2018-07-31 01:05:39 +03:00
|
|
|
ndev = cpsw->slaves[--port].ndev;
|
|
|
|
}
|
2013-02-11 09:52:20 +00:00
|
|
|
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
priv = netdev_priv(ndev);
|
|
|
|
pool = cpsw->page_pool[ch];
|
2014-04-10 14:23:23 +05:30
|
|
|
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
|
2017-01-19 18:58:26 +02:00
|
|
|
/* In dual emac mode check for all interfaces */
|
2017-02-14 16:02:36 +02:00
|
|
|
if (cpsw->data.dual_emac && cpsw->usage_count &&
|
2017-01-19 18:58:26 +02:00
|
|
|
(status >= 0)) {
|
2014-09-10 16:38:09 +05:30
|
|
|
/* The packet received is for the interface which
|
|
|
|
* is already down and the other interface is up
|
2015-03-06 20:49:12 -08:00
|
|
|
* and running, instead of freeing which results
|
2014-09-10 16:38:09 +05:30
|
|
|
* in reducing of the number of rx descriptor in
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
* DMA engine, requeue page back to cpdma.
|
2014-09-10 16:38:09 +05:30
|
|
|
*/
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
new_page = page;
|
2014-09-10 16:38:09 +05:30
|
|
|
goto requeue;
|
|
|
|
}
|
|
|
|
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
/* the interface is going down, pages are purged */
|
|
|
|
page_pool_recycle_direct(pool, page);
|
2012-03-18 20:17:54 +00:00
|
|
|
return;
|
|
|
|
}
|
2013-04-23 07:31:39 +00:00
|
|
|
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
new_page = page_pool_dev_alloc_pages(pool);
|
|
|
|
if (unlikely(!new_page)) {
|
|
|
|
new_page = page;
|
|
|
|
ndev->stats.rx_dropped++;
|
|
|
|
goto requeue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->xdp_prog) {
|
2022-01-18 11:22:04 +01:00
|
|
|
int size = len;
|
2020-12-22 22:09:28 +01:00
|
|
|
|
2020-12-22 22:09:29 +01:00
|
|
|
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
if (status & CPDMA_RX_VLAN_ENCAP) {
|
2020-12-22 22:09:29 +01:00
|
|
|
headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
|
|
|
|
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
}
|
|
|
|
|
2025-03-18 12:46:11 +01:00
|
|
|
xdp_prepare_buff(&xdp, pa, headroom, size, true);
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
|
2019-11-20 00:19:17 +02:00
|
|
|
port = priv->emac_port + cpsw->data.dual_emac;
|
2021-02-03 19:06:17 +01:00
|
|
|
ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
if (ret != CPSW_XDP_PASS)
|
|
|
|
goto requeue;
|
|
|
|
|
|
|
|
headroom = xdp.data - xdp.data_hard_start;
|
2025-03-18 12:46:11 +01:00
|
|
|
metasize = xdp.data - xdp.data_meta;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
|
|
|
|
/* XDP prog can modify vlan tag, so can't use encap header */
|
|
|
|
status &= ~CPDMA_RX_VLAN_ENCAP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pass skb to netstack if no XDP prog or returned XDP_PASS */
|
|
|
|
skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
|
|
|
|
if (!skb) {
|
2014-03-10 13:12:23 +01:00
|
|
|
ndev->stats.rx_dropped++;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
page_pool_recycle_direct(pool, page);
|
|
|
|
goto requeue;
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
skb_reserve(skb, headroom);
|
|
|
|
skb_put(skb, len);
|
2025-03-18 12:46:11 +01:00
|
|
|
if (metasize)
|
|
|
|
skb_metadata_set(skb, metasize);
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
skb->dev = ndev;
|
|
|
|
if (status & CPDMA_RX_VLAN_ENCAP)
|
|
|
|
cpsw_rx_vlan_encap(skb);
|
|
|
|
if (priv->rx_ts_enabled)
|
|
|
|
cpts_rx_timestamp(cpsw->cpts, skb);
|
|
|
|
skb->protocol = eth_type_trans(skb, ndev);
|
|
|
|
|
2021-06-15 15:27:41 +02:00
|
|
|
/* mark skb for recycling */
|
2021-08-06 10:46:19 +08:00
|
|
|
skb_mark_for_recycle(skb);
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
netif_receive_skb(skb);
|
|
|
|
|
|
|
|
ndev->stats.rx_bytes += len;
|
|
|
|
ndev->stats.rx_packets++;
|
|
|
|
|
2014-09-10 16:38:09 +05:30
|
|
|
requeue:
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
|
|
|
|
xmeta->ndev = ndev;
|
|
|
|
xmeta->ch = ch;
|
|
|
|
|
2022-01-18 11:22:04 +01:00
|
|
|
dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
|
|
|
|
pkt_size, 0);
|
2019-06-15 14:01:32 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
WARN_ON(ret == -ENOMEM);
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
page_pool_recycle_direct(pool, new_page);
|
2019-06-15 14:01:32 +03:00
|
|
|
}
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void _cpsw_adjust_link(struct cpsw_slave *slave,
|
|
|
|
struct cpsw_priv *priv, bool *link)
|
|
|
|
{
|
|
|
|
struct phy_device *phy = slave->phy;
|
|
|
|
u32 mac_control = 0;
|
|
|
|
u32 slave_port;
|
2016-08-10 02:22:42 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
if (!phy)
|
|
|
|
return;
|
|
|
|
|
2016-08-10 02:22:34 +03:00
|
|
|
slave_port = cpsw_get_slave_port(slave->slave_num);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
if (phy->link) {
|
2019-04-26 20:12:41 +03:00
|
|
|
mac_control = CPSW_SL_CTL_GMII_EN;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
if (phy->speed == 1000)
|
2019-04-26 20:12:41 +03:00
|
|
|
mac_control |= CPSW_SL_CTL_GIG;
|
2012-03-18 20:17:54 +00:00
|
|
|
if (phy->duplex)
|
2019-04-26 20:12:41 +03:00
|
|
|
mac_control |= CPSW_SL_CTL_FULLDUPLEX;
|
2012-09-27 09:19:34 +00:00
|
|
|
|
|
|
|
/* set speed_in input in case RMII mode is used in 100Mbps */
|
|
|
|
if (phy->speed == 100)
|
2019-04-26 20:12:41 +03:00
|
|
|
mac_control |= CPSW_SL_CTL_IFCTL_A;
|
2018-03-16 00:56:01 +08:00
|
|
|
/* in band mode only works in 10Mbps RGMII mode */
|
|
|
|
else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
|
2019-04-26 20:12:41 +03:00
|
|
|
mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
|
2012-09-27 09:19:34 +00:00
|
|
|
|
2014-09-08 22:54:02 +05:30
|
|
|
if (priv->rx_pause)
|
2019-04-26 20:12:41 +03:00
|
|
|
mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
|
2014-09-08 22:54:02 +05:30
|
|
|
|
|
|
|
if (priv->tx_pause)
|
2019-04-26 20:12:41 +03:00
|
|
|
mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
|
|
|
|
|
|
|
|
if (mac_control != slave->mac_control)
|
|
|
|
cpsw_sl_ctl_set(slave->mac_sl, mac_control);
|
|
|
|
|
|
|
|
/* enable forwarding */
|
|
|
|
cpsw_ale_control_set(cpsw->ale, slave_port,
|
|
|
|
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
|
2014-09-08 22:54:02 +05:30
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
*link = true;
|
net: ethernet: ti: cpsw: add CBS Qdisc offload
The cpsw has up to 4 FIFOs per port and upper 3 FIFOs can feed rate
limited queue with shaping. In order to set and enable shaping for
those 3 FIFOs queues the network device with CBS qdisc attached is
needed. The CBS configuration is added for dual-emac/single port mode
only, but potentially can be used in switch mode also, based on
switchdev for instance.
Despite the FIFO shapers can work w/o cpdma level shapers the base
usage must be in combine with cpdma level shapers as described in TRM,
that are set as maximum rates for interface queues with sysfs.
One of the possible configuration with txq shapers and CBS shapers:
Configured with echo RATE >
/sys/class/net/eth0/queues/tx-0/tx_maxrate
/---------------------------------------------------
/
/ cpdma level shapers
+----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+
| c7 | | c6 | | c5 | | c4 | | c3 | | c2 | | c1 | | c0 |
\ / \ / \ / \ / \ / \ / \ / \ /
\ / \ / \ / \ / \ / \ / \ / \ /
\/ \/ \/ \/ \/ \/ \/ \/
+---------|------|------|------|-------------------------------------+
| +----+ | | +---+ |
| | +----+ | | |
| v v v v |
| +----+ +----+ +----+ +----+ p p+----+ +----+ +----+ +----+ |
| | | | | | | | | o o| | | | | | | | |
| | f3 | | f2 | | f1 | | f0 | r CPSW r| f3 | | f2 | | f1 | | f0 | |
| | | | | | | | | t t| | | | | | | | |
| \ / \ / \ / \ / 0 1\ / \ / \ / \ / |
| \ X \ / \ / \ / \ / \ / \ / \ / |
| \/ \ \/ \/ \/ \/ \/ \/ \/ |
+-------\------------------------------------------------------------+
\
\ FIFO shaper, set with CBS offload added in this patch,
\ FIFO0 cannot be rate limited
------------------------------------------------------
CBS shaper configuration is supposed to be used with root MQPRIO Qdisc
offload allowing to add sk_prio->tc->txq maps that direct traffic to
appropriate tx queue and maps L2 priority to FIFO shaper.
The CBS shaper is intended to be used for AVB where L2 priority
(pcp field) is used to differentiate class of traffic. So additionally
vlan needs to be created with appropriate egress sk_prio->l2 prio map.
If CBS has several tx queues assigned to it, the sum of their
bandwidth has not overlap bandwidth set for CBS. It's recomended the
CBS bandwidth to be a little bit more.
The CBS shaper is configured with CBS qdisc offload interface using tc
tool from iproute2 packet.
For instance:
$ tc qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \
map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 1
$ tc -g class show dev eth0
+---(100:ffe2) mqprio
| +---(100:3) mqprio
| +---(100:4) mqprio
|
+---(100:ffe1) mqprio
| +---(100:2) mqprio
|
+---(100:ffe0) mqprio
+---(100:1) mqprio
$ tc qdisc add dev eth0 parent 100:1 cbs locredit -1440 \
hicredit 60 sendslope -960000 idleslope 40000 offload 1
$ tc qdisc add dev eth0 parent 100:2 cbs locredit -1470 \
hicredit 62 sendslope -980000 idleslope 20000 offload 1
The above code set CBS shapers for tc0 and tc1, for that txq0 and
txq1 is used. Pay attention, the real set bandwidth can differ a bit
due to discreteness of configuration parameters.
Here parameters like locredit, hicredit and sendslope are ignored
internally and are supposed to be set with assumption that maximum
frame size for frame - 1500.
It's supposed that interface speed is not changed while reconnection,
not always is true, so inform user in case speed of interface was
changed, as it can impact on dependent shapers configuration.
For more examples see Documentation.
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-24 00:26:32 +03:00
|
|
|
|
|
|
|
if (priv->shp_cfg_speed &&
|
|
|
|
priv->shp_cfg_speed != slave->phy->speed &&
|
|
|
|
!cpsw_shp_is_off(priv))
|
|
|
|
dev_warn(priv->dev,
|
|
|
|
"Speed was changed, CBS shaper speeds are changed!");
|
2012-03-18 20:17:54 +00:00
|
|
|
} else {
|
|
|
|
mac_control = 0;
|
|
|
|
/* disable forwarding */
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_control_set(cpsw->ale, slave_port,
|
2012-03-18 20:17:54 +00:00
|
|
|
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
|
2019-04-26 20:12:41 +03:00
|
|
|
|
|
|
|
cpsw_sl_wait_for_idle(slave->mac_sl, 100);
|
|
|
|
|
|
|
|
cpsw_sl_ctl_reset(slave->mac_sl);
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 20:12:41 +03:00
|
|
|
if (mac_control != slave->mac_control)
|
2012-03-18 20:17:54 +00:00
|
|
|
phy_print_status(phy);
|
|
|
|
|
|
|
|
slave->mac_control = mac_control;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cpsw_adjust_link(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
2016-12-10 14:23:49 +02:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2012-03-18 20:17:54 +00:00
|
|
|
bool link = false;
|
|
|
|
|
|
|
|
for_each_slave(priv, _cpsw_adjust_link, priv, &link);
|
|
|
|
|
|
|
|
if (link) {
|
2016-12-10 14:23:49 +02:00
|
|
|
if (cpsw_need_resplit(cpsw))
|
2019-04-26 20:12:26 +03:00
|
|
|
cpsw_split_res(cpsw);
|
2016-12-10 14:23:49 +02:00
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
netif_carrier_on(ndev);
|
|
|
|
if (netif_running(ndev))
|
2016-08-22 21:18:26 +03:00
|
|
|
netif_tx_wake_all_queues(ndev);
|
2012-03-18 20:17:54 +00:00
|
|
|
} else {
|
|
|
|
netif_carrier_off(ndev);
|
2016-08-22 21:18:26 +03:00
|
|
|
netif_tx_stop_all_queues(ndev);
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-11 09:52:20 +00:00
|
|
|
static inline void cpsw_add_dual_emac_def_ale_entries(
|
|
|
|
struct cpsw_priv *priv, struct cpsw_slave *slave,
|
|
|
|
u32 slave_port)
|
|
|
|
{
|
2016-08-10 02:22:44 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2016-04-07 15:16:44 +03:00
|
|
|
u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
|
2013-02-11 09:52:20 +00:00
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
if (cpsw->version == CPSW_VERSION_1)
|
2013-02-11 09:52:20 +00:00
|
|
|
slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
|
|
|
|
else
|
|
|
|
slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
|
2013-02-11 09:52:20 +00:00
|
|
|
port_mask, port_mask, 0);
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
|
net: ethernet: ti: cpsw: use for mcast entries only host port
In dual-emac mode the cpsw driver sends directed packets, that means
that packets go to the directed port, but an ALE lookup is performed
to determine untagged egress only. It means that on tx side no need
to add port bit for ALE mcast entry mask, and basically ALE entry
for port identification is needed only on rx side.
So, add only host port in dual_emac mode as used directed
transmission, and no need in one more port. For single port boards
and switch mode all ports used, as usual, so no changes for them.
Also it simplifies farther changes.
In other words, mcast entries for dual-emac should behave exactly
like unicast. It also can help avoid leaking packets between ports
with same vlan on h/w level if ports could became members of same vid.
So now, for instance, if mcast address 33:33:00:00:00:01 is added then
entries in ALE table:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x1
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x1
Instead of:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x3
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x5
With the same considerations, set only host port for unregistered
mcast for dual-emac mode in case of IFF_ALLMULTI is set, exactly like
it's done in cpsw_ale_set_allmulti().
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-12 19:06:29 +03:00
|
|
|
ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
|
|
|
|
HOST_PORT_NUM, ALE_VLAN |
|
|
|
|
ALE_SECURE, slave->port_vlan);
|
2018-05-01 12:41:22 -05:00
|
|
|
cpsw_ale_control_set(cpsw->ale, slave_port,
|
|
|
|
ALE_PORT_DROP_UNKNOWN_VLAN, 1);
|
2013-02-11 09:52:20 +00:00
|
|
|
}
|
|
|
|
|
2013-11-15 08:29:16 +01:00
|
|
|
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
|
|
|
|
{
|
|
|
|
u32 slave_port;
|
2017-04-03 17:34:28 +05:30
|
|
|
struct phy_device *phy;
|
2016-08-10 02:22:37 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2013-11-15 08:29:16 +01:00
|
|
|
|
2019-04-26 20:12:41 +03:00
|
|
|
cpsw_sl_reset(slave->mac_sl, 100);
|
|
|
|
cpsw_sl_ctl_reset(slave->mac_sl);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
/* setup priority mapping */
|
2019-04-26 20:12:41 +03:00
|
|
|
cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
|
|
|
|
RX_PRIORITY_MAPPING);
|
2012-10-29 08:45:15 +00:00
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
switch (cpsw->version) {
|
2012-10-29 08:45:15 +00:00
|
|
|
case CPSW_VERSION_1:
|
|
|
|
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
|
2017-05-08 14:21:21 -05:00
|
|
|
/* Increase RX FIFO size to 5 for supporting fullduplex
|
|
|
|
* flow control mode
|
|
|
|
*/
|
|
|
|
slave_write(slave,
|
|
|
|
(CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
|
|
|
|
CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
|
2012-10-29 08:45:15 +00:00
|
|
|
break;
|
|
|
|
case CPSW_VERSION_2:
|
2013-08-05 17:30:05 +05:30
|
|
|
case CPSW_VERSION_3:
|
2013-08-12 17:11:15 +05:30
|
|
|
case CPSW_VERSION_4:
|
2012-10-29 08:45:15 +00:00
|
|
|
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
|
2017-05-08 14:21:21 -05:00
|
|
|
/* Increase RX FIFO size to 5 for supporting fullduplex
|
|
|
|
* flow control mode
|
|
|
|
*/
|
|
|
|
slave_write(slave,
|
|
|
|
(CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
|
|
|
|
CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
|
2012-10-29 08:45:15 +00:00
|
|
|
break;
|
|
|
|
}
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
/* setup max packet size, and mac address */
|
2019-04-26 20:12:41 +03:00
|
|
|
cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
|
|
|
|
cpsw->rx_packet_max);
|
2012-03-18 20:17:54 +00:00
|
|
|
cpsw_set_slave_mac(slave, priv);
|
|
|
|
|
|
|
|
slave->mac_control = 0; /* no link yet */
|
|
|
|
|
2016-08-10 02:22:34 +03:00
|
|
|
slave_port = cpsw_get_slave_port(slave->slave_num);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
if (cpsw->data.dual_emac)
|
2013-02-11 09:52:20 +00:00
|
|
|
cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
|
|
|
|
else
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
|
2013-02-11 09:52:20 +00:00
|
|
|
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2016-04-27 21:32:31 -04:00
|
|
|
if (slave->data->phy_node) {
|
2017-04-03 17:34:28 +05:30
|
|
|
phy = of_phy_connect(priv->ndev, slave->data->phy_node,
|
2015-10-17 06:04:35 +02:00
|
|
|
&cpsw_adjust_link, 0, slave->data->phy_if);
|
2017-04-03 17:34:28 +05:30
|
|
|
if (!phy) {
|
2017-07-18 16:43:19 -05:00
|
|
|
dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
|
|
|
|
slave->data->phy_node,
|
2016-04-27 21:32:31 -04:00
|
|
|
slave->slave_num);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
2017-04-03 17:34:28 +05:30
|
|
|
phy = phy_connect(priv->ndev, slave->data->phy_id,
|
2013-01-14 00:52:52 +00:00
|
|
|
&cpsw_adjust_link, slave->data->phy_if);
|
2017-04-03 17:34:28 +05:30
|
|
|
if (IS_ERR(phy)) {
|
2016-04-27 21:32:31 -04:00
|
|
|
dev_err(priv->dev,
|
|
|
|
"phy \"%s\" not found on slave %d, err %ld\n",
|
|
|
|
slave->data->phy_id, slave->slave_num,
|
2017-04-03 17:34:28 +05:30
|
|
|
PTR_ERR(phy));
|
2016-04-27 21:32:31 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2016-01-06 20:11:13 +01:00
|
|
|
|
2024-02-06 06:29:28 +05:30
|
|
|
phy->mac_managed_pm = true;
|
|
|
|
|
2017-04-03 17:34:28 +05:30
|
|
|
slave->phy = phy;
|
|
|
|
|
2024-12-16 22:32:25 +01:00
|
|
|
phy_disable_eee(slave->phy);
|
|
|
|
|
2016-04-27 21:32:31 -04:00
|
|
|
phy_attached_info(slave->phy);
|
2013-09-21 00:50:40 +05:30
|
|
|
|
2016-04-27 21:32:31 -04:00
|
|
|
phy_start(slave->phy);
|
|
|
|
|
|
|
|
/* Configure GMII_SEL register */
|
2018-11-25 18:15:25 -06:00
|
|
|
if (!IS_ERR(slave->data->ifphy))
|
|
|
|
phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
|
|
|
|
slave->data->phy_if);
|
|
|
|
else
|
|
|
|
cpsw_phy_sel(cpsw->dev, slave->phy->interface,
|
|
|
|
slave->slave_num);
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2013-02-05 08:26:48 +00:00
|
|
|
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
|
|
|
|
{
|
2016-08-10 02:22:42 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
|
|
|
const int vlan = cpsw->data.default_vlan;
|
2013-02-05 08:26:48 +00:00
|
|
|
u32 reg;
|
|
|
|
int i;
|
2014-10-31 13:38:52 -04:00
|
|
|
int unreg_mcast_mask;
|
2013-02-05 08:26:48 +00:00
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
|
2013-02-05 08:26:48 +00:00
|
|
|
CPSW2_PORT_VLAN;
|
|
|
|
|
2016-08-10 02:22:39 +03:00
|
|
|
writel(vlan, &cpsw->host_port_regs->port_vlan);
|
2013-02-05 08:26:48 +00:00
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
for (i = 0; i < cpsw->data.slaves; i++)
|
|
|
|
slave_write(cpsw->slaves + i, vlan, reg);
|
2013-02-05 08:26:48 +00:00
|
|
|
|
2014-10-31 13:38:52 -04:00
|
|
|
if (priv->ndev->flags & IFF_ALLMULTI)
|
|
|
|
unreg_mcast_mask = ALE_ALL_PORTS;
|
|
|
|
else
|
|
|
|
unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
|
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
|
drivers: net: cpsw: fix port_mask parameters in ale calls
ALE APIs expect to receive port masks as input values for arguments
port_mask, untag, reg_mcast, unreg_mcast. But there are few places in
code where port masks are passed left-shifted by cpsw_priv->host_port,
like below:
cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
ALE_ALL_PORTS << priv->host_port,
ALE_ALL_PORTS << priv->host_port, 0, 0);
and cpsw is still working just because priv->host_port == 0
and has never ever been changed.
Hence, fix port_mask parameters in ALE APIs calls and drop
"<< priv->host_port" from all places where it's used to
shift valid port mask.
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-07 15:16:43 +03:00
|
|
|
ALE_ALL_PORTS, ALE_ALL_PORTS,
|
|
|
|
unreg_mcast_mask);
|
2013-02-05 08:26:48 +00:00
|
|
|
}
|
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
static void cpsw_init_host_port(struct cpsw_priv *priv)
|
|
|
|
{
|
2013-02-11 09:52:20 +00:00
|
|
|
u32 fifo_mode;
|
2016-08-10 02:22:39 +03:00
|
|
|
u32 control_reg;
|
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2013-02-05 08:26:48 +00:00
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
/* soft reset the controller and initialize ale */
|
2016-08-10 02:22:39 +03:00
|
|
|
soft_reset("cpsw", &cpsw->regs->soft_reset);
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_start(cpsw->ale);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2025-01-09 22:42:13 +01:00
|
|
|
/* switch to vlan aware mode */
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
|
2013-02-05 08:26:48 +00:00
|
|
|
CPSW_ALE_VLAN_AWARE);
|
2016-08-10 02:22:39 +03:00
|
|
|
control_reg = readl(&cpsw->regs->control);
|
2018-03-15 15:15:50 -05:00
|
|
|
control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
|
2016-08-10 02:22:39 +03:00
|
|
|
writel(control_reg, &cpsw->regs->control);
|
2016-08-10 02:22:42 +03:00
|
|
|
fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
|
2013-02-11 09:52:20 +00:00
|
|
|
CPSW_FIFO_NORMAL_MODE;
|
2016-08-10 02:22:39 +03:00
|
|
|
writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
/* setup host port priority mapping */
|
2017-11-30 18:21:11 -06:00
|
|
|
writel_relaxed(CPDMA_TX_PRIORITY_MAP,
|
|
|
|
&cpsw->host_port_regs->cpdma_tx_pri_map);
|
|
|
|
writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
|
2012-03-18 20:17:54 +00:00
|
|
|
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
|
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
if (!cpsw->data.dual_emac) {
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
|
2013-02-11 09:52:20 +00:00
|
|
|
0, 0);
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
|
2016-04-07 15:16:44 +03:00
|
|
|
ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
|
2013-02-11 09:52:20 +00:00
|
|
|
}
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2019-11-20 00:19:17 +02:00
|
|
|
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
|
2013-04-23 07:31:36 +00:00
|
|
|
{
|
2014-03-03 16:19:06 +05:30
|
|
|
u32 slave_port;
|
|
|
|
|
2016-08-10 02:22:34 +03:00
|
|
|
slave_port = cpsw_get_slave_port(slave->slave_num);
|
2014-03-03 16:19:06 +05:30
|
|
|
|
2013-04-23 07:31:36 +00:00
|
|
|
if (!slave->phy)
|
|
|
|
return;
|
|
|
|
phy_stop(slave->phy);
|
|
|
|
phy_disconnect(slave->phy);
|
|
|
|
slave->phy = NULL;
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_control_set(cpsw->ale, slave_port,
|
2014-03-03 16:19:06 +05:30
|
|
|
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
|
2019-04-26 20:12:41 +03:00
|
|
|
cpsw_sl_reset(slave->mac_sl, 100);
|
|
|
|
cpsw_sl_ctl_reset(slave->mac_sl);
|
2013-04-23 07:31:36 +00:00
|
|
|
}
|
|
|
|
|
2018-11-08 22:27:57 +02:00
|
|
|
static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
|
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = arg;
|
|
|
|
|
|
|
|
if (!vdev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-24 00:26:33 +03:00
|
|
|
/* restore resources after port reset */
|
|
|
|
static void cpsw_restore(struct cpsw_priv *priv)
|
|
|
|
{
|
2018-11-08 22:27:57 +02:00
|
|
|
/* restore vlan configurations */
|
|
|
|
vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
|
|
|
|
|
2018-07-24 00:26:33 +03:00
|
|
|
/* restore MQPRIO offload */
|
|
|
|
for_each_slave(priv, cpsw_mqprio_resume, priv);
|
|
|
|
|
|
|
|
/* restore CBS offload */
|
|
|
|
for_each_slave(priv, cpsw_cbs_resume, priv);
|
|
|
|
}
|
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
static int cpsw_ndo_open(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
2016-08-10 02:22:37 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2016-08-22 21:18:24 +03:00
|
|
|
int ret;
|
2012-03-18 20:17:54 +00:00
|
|
|
u32 reg;
|
|
|
|
|
2022-04-12 08:28:47 +00:00
|
|
|
ret = pm_runtime_resume_and_get(cpsw->dev);
|
|
|
|
if (ret < 0)
|
2016-06-24 21:23:42 +03:00
|
|
|
return ret;
|
2016-04-19 21:09:49 +03:00
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
netif_carrier_off(ndev);
|
|
|
|
|
2016-08-22 21:18:26 +03:00
|
|
|
/* Notify the stack of the actual queue counts. */
|
|
|
|
ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(priv->dev, "cannot set real number of tx queues\n");
|
|
|
|
goto err_cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(priv->dev, "cannot set real number of rx queues\n");
|
|
|
|
goto err_cleanup;
|
|
|
|
}
|
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
reg = cpsw->version;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
|
|
|
|
CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
|
|
|
|
CPSW_RTL_VERSION(reg));
|
|
|
|
|
2017-02-14 16:02:36 +02:00
|
|
|
/* Initialize host and slave ports */
|
|
|
|
if (!cpsw->usage_count)
|
2013-02-11 09:52:20 +00:00
|
|
|
cpsw_init_host_port(priv);
|
2012-03-18 20:17:54 +00:00
|
|
|
for_each_slave(priv, cpsw_slave_open, priv);
|
|
|
|
|
2013-02-05 08:26:48 +00:00
|
|
|
/* Add default VLAN */
|
2016-08-10 02:22:42 +03:00
|
|
|
if (!cpsw->data.dual_emac)
|
2014-06-18 17:21:48 +05:30
|
|
|
cpsw_add_default_vlan(priv);
|
|
|
|
else
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
|
drivers: net: cpsw: fix port_mask parameters in ale calls
ALE APIs expect to receive port masks as input values for arguments
port_mask, untag, reg_mcast, unreg_mcast. But there are few places in
code where port masks are passed left-shifted by cpsw_priv->host_port,
like below:
cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
ALE_ALL_PORTS << priv->host_port,
ALE_ALL_PORTS << priv->host_port, 0, 0);
and cpsw is still working just because priv->host_port == 0
and has never ever been changed.
Hence, fix port_mask parameters in ALE APIs calls and drop
"<< priv->host_port" from all places where it's used to
shift valid port mask.
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-07 15:16:43 +03:00
|
|
|
ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
|
2013-02-05 08:26:48 +00:00
|
|
|
|
2017-02-14 16:02:36 +02:00
|
|
|
/* initialize shared resources for every ndev */
|
|
|
|
if (!cpsw->usage_count) {
|
2013-02-11 09:52:20 +00:00
|
|
|
/* disable priority elevation */
|
2017-11-30 18:21:11 -06:00
|
|
|
writel_relaxed(0, &cpsw->regs->ptype);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2013-02-11 09:52:20 +00:00
|
|
|
/* enable statistics collection only on all ports */
|
2017-11-30 18:21:11 -06:00
|
|
|
writel_relaxed(0x7, &cpsw->regs->stat_port_en);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2014-09-08 22:54:02 +05:30
|
|
|
/* Enable internal fifo flow control */
|
2016-08-10 02:22:39 +03:00
|
|
|
writel(0x7, &cpsw->regs->flow_control);
|
2014-09-08 22:54:02 +05:30
|
|
|
|
2016-08-10 02:22:43 +03:00
|
|
|
napi_enable(&cpsw->napi_rx);
|
|
|
|
napi_enable(&cpsw->napi_tx);
|
2015-08-04 16:06:19 +05:30
|
|
|
|
2016-08-10 02:22:41 +03:00
|
|
|
if (cpsw->tx_irq_disabled) {
|
|
|
|
cpsw->tx_irq_disabled = false;
|
|
|
|
enable_irq(cpsw->irqs_table[1]);
|
2015-08-12 15:22:53 +05:30
|
|
|
}
|
|
|
|
|
2016-08-10 02:22:41 +03:00
|
|
|
if (cpsw->rx_irq_disabled) {
|
|
|
|
cpsw->rx_irq_disabled = false;
|
|
|
|
enable_irq(cpsw->irqs_table[0]);
|
2015-08-12 15:22:53 +05:30
|
|
|
}
|
|
|
|
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
/* create rxqs for both infs in dual mac as they use same pool
|
|
|
|
* and must be destroyed together when no users.
|
|
|
|
*/
|
|
|
|
ret = cpsw_create_xdp_rxqs(cpsw);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err_cleanup;
|
|
|
|
|
2016-08-22 21:18:24 +03:00
|
|
|
ret = cpsw_fill_rx_channels(priv);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err_cleanup;
|
2013-12-11 22:09:05 -06:00
|
|
|
|
2020-11-12 13:15:46 +02:00
|
|
|
if (cpsw->cpts) {
|
|
|
|
if (cpts_register(cpsw->cpts))
|
|
|
|
dev_err(priv->dev, "error registering cpts device\n");
|
|
|
|
else
|
|
|
|
writel(0x10, &cpsw->wr_regs->misc_en);
|
|
|
|
}
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2018-07-24 00:26:33 +03:00
|
|
|
cpsw_restore(priv);
|
|
|
|
|
2013-03-11 23:16:37 +00:00
|
|
|
/* Enable Interrupt pacing if configured */
|
2016-08-10 02:22:44 +03:00
|
|
|
if (cpsw->coal_intvl != 0) {
|
2013-03-11 23:16:37 +00:00
|
|
|
struct ethtool_coalesce coal;
|
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
coal.rx_coalesce_usecs = cpsw->coal_intvl;
|
2021-08-20 15:35:18 +08:00
|
|
|
cpsw_set_coalesce(ndev, &coal, NULL, NULL);
|
2013-03-11 23:16:37 +00:00
|
|
|
}
|
|
|
|
|
2016-08-10 02:22:40 +03:00
|
|
|
cpdma_ctlr_start(cpsw->dma);
|
|
|
|
cpsw_intr_enable(cpsw);
|
2017-02-14 16:02:36 +02:00
|
|
|
cpsw->usage_count++;
|
2014-04-10 14:23:24 +05:30
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
return 0;
|
|
|
|
|
2013-04-23 07:31:36 +00:00
|
|
|
err_cleanup:
|
2019-05-28 20:45:19 +03:00
|
|
|
if (!cpsw->usage_count) {
|
2022-11-09 09:15:37 +08:00
|
|
|
napi_disable(&cpsw->napi_rx);
|
|
|
|
napi_disable(&cpsw->napi_tx);
|
2019-05-28 20:45:19 +03:00
|
|
|
cpdma_ctlr_stop(cpsw->dma);
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
cpsw_destroy_xdp_rxqs(cpsw);
|
2019-05-28 20:45:19 +03:00
|
|
|
}
|
|
|
|
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
for_each_slave(priv, cpsw_slave_stop, cpsw);
|
2016-08-10 02:22:38 +03:00
|
|
|
pm_runtime_put_sync(cpsw->dev);
|
2013-04-23 07:31:36 +00:00
|
|
|
netif_carrier_off(priv->ndev);
|
|
|
|
return ret;
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cpsw_ndo_stop(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
2016-08-10 02:22:37 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
|
2018-11-08 22:27:56 +02:00
|
|
|
__hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
|
2016-08-22 21:18:26 +03:00
|
|
|
netif_tx_stop_all_queues(priv->ndev);
|
2012-03-18 20:17:54 +00:00
|
|
|
netif_carrier_off(priv->ndev);
|
2013-02-11 09:52:20 +00:00
|
|
|
|
2017-02-14 16:02:36 +02:00
|
|
|
if (cpsw->usage_count <= 1) {
|
2016-08-10 02:22:43 +03:00
|
|
|
napi_disable(&cpsw->napi_rx);
|
|
|
|
napi_disable(&cpsw->napi_tx);
|
2016-08-10 02:22:44 +03:00
|
|
|
cpts_unregister(cpsw->cpts);
|
2016-08-10 02:22:40 +03:00
|
|
|
cpsw_intr_disable(cpsw);
|
|
|
|
cpdma_ctlr_stop(cpsw->dma);
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_stop(cpsw->ale);
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
cpsw_destroy_xdp_rxqs(cpsw);
|
2013-02-11 09:52:20 +00:00
|
|
|
}
|
2016-08-10 02:22:44 +03:00
|
|
|
for_each_slave(priv, cpsw_slave_stop, cpsw);
|
2016-12-10 14:23:49 +02:00
|
|
|
|
|
|
|
if (cpsw_need_resplit(cpsw))
|
2019-04-26 20:12:26 +03:00
|
|
|
cpsw_split_res(cpsw);
|
2016-12-10 14:23:49 +02:00
|
|
|
|
2017-02-14 16:02:36 +02:00
|
|
|
cpsw->usage_count--;
|
2016-08-10 02:22:38 +03:00
|
|
|
pm_runtime_put_sync(cpsw->dev);
|
2012-03-18 20:17:54 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
2016-08-10 02:22:40 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2017-06-27 16:58:52 +03:00
|
|
|
struct cpts *cpts = cpsw->cpts;
|
2016-08-22 21:18:26 +03:00
|
|
|
struct netdev_queue *txq;
|
|
|
|
struct cpdma_chan *txch;
|
|
|
|
int ret, q_idx;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2021-08-05 17:55:53 +03:00
|
|
|
if (skb_put_padto(skb, CPSW_MIN_PACKET_SIZE)) {
|
2012-03-18 20:17:54 +00:00
|
|
|
cpsw_err(priv, tx_err, "packet pad failed\n");
|
2014-03-10 13:12:23 +01:00
|
|
|
ndev->stats.tx_dropped++;
|
2017-02-11 03:49:57 +02:00
|
|
|
return NET_XMIT_DROP;
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2013-02-11 09:52:19 +00:00
|
|
|
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
|
2018-11-12 16:00:22 +02:00
|
|
|
priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
|
2012-10-29 08:45:20 +00:00
|
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
|
|
|
|
2016-08-22 21:18:26 +03:00
|
|
|
q_idx = skb_get_queue_mapping(skb);
|
|
|
|
if (q_idx >= cpsw->tx_ch_num)
|
|
|
|
q_idx = q_idx % cpsw->tx_ch_num;
|
|
|
|
|
2016-11-29 17:00:51 +02:00
|
|
|
txch = cpsw->txv[q_idx].ch;
|
2018-02-06 19:17:06 -06:00
|
|
|
txq = netdev_get_tx_queue(ndev, q_idx);
|
2019-04-26 20:12:30 +03:00
|
|
|
skb_tx_timestamp(skb);
|
|
|
|
ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
|
|
|
|
priv->emac_port + cpsw->data.dual_emac);
|
2012-03-18 20:17:54 +00:00
|
|
|
if (unlikely(ret != 0)) {
|
|
|
|
cpsw_err(priv, tx_err, "desc submit failed\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2013-01-17 06:31:34 +00:00
|
|
|
/* If there is no more tx desc left free then we need to
|
|
|
|
* tell the kernel to stop sending us tx frames.
|
|
|
|
*/
|
2016-08-22 21:18:26 +03:00
|
|
|
if (unlikely(!cpdma_check_free_tx_desc(txch))) {
|
|
|
|
netif_tx_stop_queue(txq);
|
2018-02-06 19:17:06 -06:00
|
|
|
|
|
|
|
/* Barrier, so that stop_queue visible to other cpus */
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
|
|
|
|
if (cpdma_check_free_tx_desc(txch))
|
|
|
|
netif_tx_wake_queue(txq);
|
2016-08-22 21:18:26 +03:00
|
|
|
}
|
2013-01-17 06:31:34 +00:00
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
fail:
|
2014-03-10 13:12:23 +01:00
|
|
|
ndev->stats.tx_dropped++;
|
2016-08-22 21:18:26 +03:00
|
|
|
netif_tx_stop_queue(txq);
|
2018-02-06 19:17:06 -06:00
|
|
|
|
|
|
|
/* Barrier, so that stop_queue visible to other cpus */
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
|
|
|
|
if (cpdma_check_free_tx_desc(txch))
|
|
|
|
netif_tx_wake_queue(txq);
|
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
|
2013-07-25 23:44:01 +05:30
|
|
|
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
|
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
|
|
|
struct sockaddr *addr = (struct sockaddr *)p;
|
2016-08-10 02:22:37 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2013-07-25 23:44:01 +05:30
|
|
|
int flags = 0;
|
|
|
|
u16 vid = 0;
|
2016-06-24 21:23:45 +03:00
|
|
|
int ret;
|
2013-07-25 23:44:01 +05:30
|
|
|
|
|
|
|
if (!is_valid_ether_addr(addr->sa_data))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
2022-04-12 08:28:47 +00:00
|
|
|
ret = pm_runtime_resume_and_get(cpsw->dev);
|
|
|
|
if (ret < 0)
|
2016-06-24 21:23:45 +03:00
|
|
|
return ret;
|
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
if (cpsw->data.dual_emac) {
|
|
|
|
vid = cpsw->slaves[priv->emac_port].port_vlan;
|
2013-07-25 23:44:01 +05:30
|
|
|
flags = ALE_VLAN;
|
|
|
|
}
|
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
|
2013-07-25 23:44:01 +05:30
|
|
|
flags, vid);
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
|
2013-07-25 23:44:01 +05:30
|
|
|
flags, vid);
|
|
|
|
|
|
|
|
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
|
2021-10-01 14:32:20 -07:00
|
|
|
eth_hw_addr_set(ndev, priv->mac_addr);
|
2013-07-25 23:44:01 +05:30
|
|
|
for_each_slave(priv, cpsw_set_slave_mac, priv);
|
|
|
|
|
2016-08-10 02:22:38 +03:00
|
|
|
pm_runtime_put(cpsw->dev);
|
2016-06-24 21:23:45 +03:00
|
|
|
|
2013-07-25 23:44:01 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-02-05 08:26:48 +00:00
|
|
|
static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
|
|
|
|
unsigned short vid)
|
|
|
|
{
|
|
|
|
int ret;
|
2015-01-15 14:59:28 +05:30
|
|
|
int unreg_mcast_mask = 0;
|
net: ethernet: ti: cpsw: use for mcast entries only host port
In dual-emac mode the cpsw driver sends directed packets, that means
that packets go to the directed port, but an ALE lookup is performed
to determine untagged egress only. It means that on tx side no need
to add port bit for ALE mcast entry mask, and basically ALE entry
for port identification is needed only on rx side.
So, add only host port in dual_emac mode as used directed
transmission, and no need in one more port. For single port boards
and switch mode all ports used, as usual, so no changes for them.
Also it simplifies farther changes.
In other words, mcast entries for dual-emac should behave exactly
like unicast. It also can help avoid leaking packets between ports
with same vlan on h/w level if ports could became members of same vid.
So now, for instance, if mcast address 33:33:00:00:00:01 is added then
entries in ALE table:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x1
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x1
Instead of:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x3
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x5
With the same considerations, set only host port for unregistered
mcast for dual-emac mode in case of IFF_ALLMULTI is set, exactly like
it's done in cpsw_ale_set_allmulti().
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-12 19:06:29 +03:00
|
|
|
int mcast_mask;
|
2015-01-15 14:59:28 +05:30
|
|
|
u32 port_mask;
|
2016-08-10 02:22:42 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2014-10-31 13:38:52 -04:00
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
if (cpsw->data.dual_emac) {
|
2015-01-15 14:59:28 +05:30
|
|
|
port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
|
2013-02-05 08:26:48 +00:00
|
|
|
|
net: ethernet: ti: cpsw: use for mcast entries only host port
In dual-emac mode the cpsw driver sends directed packets, that means
that packets go to the directed port, but an ALE lookup is performed
to determine untagged egress only. It means that on tx side no need
to add port bit for ALE mcast entry mask, and basically ALE entry
for port identification is needed only on rx side.
So, add only host port in dual_emac mode as used directed
transmission, and no need in one more port. For single port boards
and switch mode all ports used, as usual, so no changes for them.
Also it simplifies farther changes.
In other words, mcast entries for dual-emac should behave exactly
like unicast. It also can help avoid leaking packets between ports
with same vlan on h/w level if ports could became members of same vid.
So now, for instance, if mcast address 33:33:00:00:00:01 is added then
entries in ALE table:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x1
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x1
Instead of:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x3
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x5
With the same considerations, set only host port for unregistered
mcast for dual-emac mode in case of IFF_ALLMULTI is set, exactly like
it's done in cpsw_ale_set_allmulti().
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-12 19:06:29 +03:00
|
|
|
mcast_mask = ALE_PORT_HOST;
|
2015-01-15 14:59:28 +05:30
|
|
|
if (priv->ndev->flags & IFF_ALLMULTI)
|
net: ethernet: ti: cpsw: use for mcast entries only host port
In dual-emac mode the cpsw driver sends directed packets, that means
that packets go to the directed port, but an ALE lookup is performed
to determine untagged egress only. It means that on tx side no need
to add port bit for ALE mcast entry mask, and basically ALE entry
for port identification is needed only on rx side.
So, add only host port in dual_emac mode as used directed
transmission, and no need in one more port. For single port boards
and switch mode all ports used, as usual, so no changes for them.
Also it simplifies farther changes.
In other words, mcast entries for dual-emac should behave exactly
like unicast. It also can help avoid leaking packets between ports
with same vlan on h/w level if ports could became members of same vid.
So now, for instance, if mcast address 33:33:00:00:00:01 is added then
entries in ALE table:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x1
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x1
Instead of:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x3
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x5
With the same considerations, set only host port for unregistered
mcast for dual-emac mode in case of IFF_ALLMULTI is set, exactly like
it's done in cpsw_ale_set_allmulti().
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-12 19:06:29 +03:00
|
|
|
unreg_mcast_mask = mcast_mask;
|
2015-01-15 14:59:28 +05:30
|
|
|
} else {
|
|
|
|
port_mask = ALE_ALL_PORTS;
|
net: ethernet: ti: cpsw: use for mcast entries only host port
In dual-emac mode the cpsw driver sends directed packets, that means
that packets go to the directed port, but an ALE lookup is performed
to determine untagged egress only. It means that on tx side no need
to add port bit for ALE mcast entry mask, and basically ALE entry
for port identification is needed only on rx side.
So, add only host port in dual_emac mode as used directed
transmission, and no need in one more port. For single port boards
and switch mode all ports used, as usual, so no changes for them.
Also it simplifies farther changes.
In other words, mcast entries for dual-emac should behave exactly
like unicast. It also can help avoid leaking packets between ports
with same vlan on h/w level if ports could became members of same vid.
So now, for instance, if mcast address 33:33:00:00:00:01 is added then
entries in ALE table:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x1
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x1
Instead of:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x3
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x5
With the same considerations, set only host port for unregistered
mcast for dual-emac mode in case of IFF_ALLMULTI is set, exactly like
it's done in cpsw_ale_set_allmulti().
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-12 19:06:29 +03:00
|
|
|
mcast_mask = port_mask;
|
2015-01-15 14:59:28 +05:30
|
|
|
|
|
|
|
if (priv->ndev->flags & IFF_ALLMULTI)
|
|
|
|
unreg_mcast_mask = ALE_ALL_PORTS;
|
|
|
|
else
|
|
|
|
unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
|
|
|
|
}
|
2013-02-05 08:26:48 +00:00
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
|
drivers: net: cpsw: fix port_mask parameters in ale calls
ALE APIs expect to receive port masks as input values for arguments
port_mask, untag, reg_mcast, unreg_mcast. But there are few places in
code where port masks are passed left-shifted by cpsw_priv->host_port,
like below:
cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
ALE_ALL_PORTS << priv->host_port,
ALE_ALL_PORTS << priv->host_port, 0, 0);
and cpsw is still working just because priv->host_port == 0
and has never ever been changed.
Hence, fix port_mask parameters in ALE APIs calls and drop
"<< priv->host_port" from all places where it's used to
shift valid port mask.
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-07 15:16:43 +03:00
|
|
|
unreg_mcast_mask);
|
2013-02-05 08:26:48 +00:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
|
2016-04-07 15:16:44 +03:00
|
|
|
HOST_PORT_NUM, ALE_VLAN, vid);
|
2013-02-05 08:26:48 +00:00
|
|
|
if (ret != 0)
|
|
|
|
goto clean_vid;
|
|
|
|
|
2016-08-10 02:22:44 +03:00
|
|
|
ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
|
net: ethernet: ti: cpsw: use for mcast entries only host port
In dual-emac mode the cpsw driver sends directed packets, that means
that packets go to the directed port, but an ALE lookup is performed
to determine untagged egress only. It means that on tx side no need
to add port bit for ALE mcast entry mask, and basically ALE entry
for port identification is needed only on rx side.
So, add only host port in dual_emac mode as used directed
transmission, and no need in one more port. For single port boards
and switch mode all ports used, as usual, so no changes for them.
Also it simplifies farther changes.
In other words, mcast entries for dual-emac should behave exactly
like unicast. It also can help avoid leaking packets between ports
with same vlan on h/w level if ports could became members of same vid.
So now, for instance, if mcast address 33:33:00:00:00:01 is added then
entries in ALE table:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x1
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x1
Instead of:
vid = 1, addr = 33:33:00:00:00:01, port_mask = 0x3
vid = 2, addr = 33:33:00:00:00:01, port_mask = 0x5
With the same considerations, set only host port for unregistered
mcast for dual-emac mode in case of IFF_ALLMULTI is set, exactly like
it's done in cpsw_ale_set_allmulti().
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-12 19:06:29 +03:00
|
|
|
mcast_mask, ALE_VLAN, vid, 0);
|
2013-02-05 08:26:48 +00:00
|
|
|
if (ret != 0)
|
|
|
|
goto clean_vlan_ucast;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
clean_vlan_ucast:
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
|
2016-04-07 15:16:44 +03:00
|
|
|
HOST_PORT_NUM, ALE_VLAN, vid);
|
2013-02-05 08:26:48 +00:00
|
|
|
clean_vid:
|
2016-08-10 02:22:44 +03:00
|
|
|
cpsw_ale_del_vlan(cpsw->ale, vid, 0);
|
2013-02-05 08:26:48 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
|
2013-04-19 02:04:28 +00:00
|
|
|
__be16 proto, u16 vid)
|
2013-02-05 08:26:48 +00:00
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
2016-08-10 02:22:37 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2016-06-24 21:23:45 +03:00
|
|
|
int ret;
|
2013-02-05 08:26:48 +00:00
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
if (vid == cpsw->data.default_vlan)
|
2013-02-05 08:26:48 +00:00
|
|
|
return 0;
|
|
|
|
|
2022-04-12 08:28:47 +00:00
|
|
|
ret = pm_runtime_resume_and_get(cpsw->dev);
|
|
|
|
if (ret < 0)
|
2016-06-24 21:23:45 +03:00
|
|
|
return ret;
|
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
if (cpsw->data.dual_emac) {
|
2015-01-22 15:19:22 +05:30
|
|
|
/* In dual EMAC, reserved VLAN id should not be used for
|
|
|
|
* creating VLAN interfaces as this can break the dual
|
|
|
|
* EMAC port separation
|
|
|
|
*/
|
|
|
|
int i;
|
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
for (i = 0; i < cpsw->data.slaves; i++) {
|
2018-08-10 15:47:09 +03:00
|
|
|
if (vid == cpsw->slaves[i].port_vlan) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
2015-01-22 15:19:22 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-05 08:26:48 +00:00
|
|
|
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
|
2016-06-24 21:23:45 +03:00
|
|
|
ret = cpsw_add_vlan_ale_entry(priv, vid);
|
2018-08-10 15:47:09 +03:00
|
|
|
err:
|
2016-08-10 02:22:38 +03:00
|
|
|
pm_runtime_put(cpsw->dev);
|
2016-06-24 21:23:45 +03:00
|
|
|
return ret;
|
2013-02-05 08:26:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
|
2013-04-19 02:04:28 +00:00
|
|
|
__be16 proto, u16 vid)
|
2013-02-05 08:26:48 +00:00
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
2016-08-10 02:22:37 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
2013-02-05 08:26:48 +00:00
|
|
|
int ret;
|
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
if (vid == cpsw->data.default_vlan)
|
2013-02-05 08:26:48 +00:00
|
|
|
return 0;
|
|
|
|
|
2022-04-12 08:28:47 +00:00
|
|
|
ret = pm_runtime_resume_and_get(cpsw->dev);
|
|
|
|
if (ret < 0)
|
2016-06-24 21:23:45 +03:00
|
|
|
return ret;
|
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
if (cpsw->data.dual_emac) {
|
2015-01-22 15:19:22 +05:30
|
|
|
int i;
|
|
|
|
|
2016-08-10 02:22:42 +03:00
|
|
|
for (i = 0; i < cpsw->data.slaves; i++) {
|
|
|
|
if (vid == cpsw->slaves[i].port_vlan)
|
2018-08-10 15:47:09 +03:00
|
|
|
goto err;
|
2015-01-22 15:19:22 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-05 08:26:48 +00:00
|
|
|
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
|
2016-08-10 02:22:44 +03:00
|
|
|
ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
|
2018-08-10 15:47:08 +03:00
|
|
|
ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
|
|
|
|
HOST_PORT_NUM, ALE_VLAN, vid);
|
|
|
|
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
|
|
|
|
0, ALE_VLAN, vid);
|
2020-08-24 11:10:52 -04:00
|
|
|
ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
|
2018-08-10 15:47:09 +03:00
|
|
|
err:
|
2016-08-10 02:22:38 +03:00
|
|
|
pm_runtime_put(cpsw->dev);
|
2016-06-24 21:23:45 +03:00
|
|
|
return ret;
|
2013-02-05 08:26:48 +00:00
|
|
|
}
|
|
|
|
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
|
|
|
|
struct xdp_frame **frames, u32 flags)
|
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
2019-11-20 00:19:17 +02:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
struct xdp_frame *xdpf;
|
2021-03-08 12:06:58 +01:00
|
|
|
int i, nxmit = 0, port;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
|
|
|
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
xdpf = frames[i];
|
2021-03-08 12:06:58 +01:00
|
|
|
if (xdpf->len < CPSW_MIN_PACKET_SIZE)
|
|
|
|
break;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
|
2019-11-20 00:19:17 +02:00
|
|
|
port = priv->emac_port + cpsw->data.dual_emac;
|
|
|
|
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
|
2021-03-08 12:06:58 +01:00
|
|
|
break;
|
|
|
|
nxmit++;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
}
|
|
|
|
|
2021-03-08 12:06:58 +01:00
|
|
|
return nxmit;
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
}
|
|
|
|
|
2019-04-27 20:08:25 -04:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void cpsw_ndo_poll_controller(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
|
|
|
|
|
|
|
|
cpsw_intr_disable(cpsw);
|
|
|
|
cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
|
|
|
|
cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
|
|
|
|
cpsw_intr_enable(cpsw);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2025-05-12 14:44:22 +03:00
|
|
|
/* We need a custom implementation of phy_do_ioctl_running() because in switch
|
|
|
|
* mode, dev->phydev may be different than the phy of the active_slave. We need
|
|
|
|
* to operate on the locally saved phy instead.
|
|
|
|
*/
|
|
|
|
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
|
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(dev);
|
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
|
|
|
int slave_no = cpsw_slave_index(cpsw, priv);
|
|
|
|
struct phy_device *phy;
|
|
|
|
|
|
|
|
if (!netif_running(dev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
phy = cpsw->slaves[slave_no].phy;
|
|
|
|
if (phy)
|
|
|
|
return phy_mii_ioctl(phy, req, cmd);
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
static const struct net_device_ops cpsw_netdev_ops = {
|
|
|
|
.ndo_open = cpsw_ndo_open,
|
|
|
|
.ndo_stop = cpsw_ndo_stop,
|
|
|
|
.ndo_start_xmit = cpsw_ndo_start_xmit,
|
2013-07-25 23:44:01 +05:30
|
|
|
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
|
2021-07-27 15:45:13 +02:00
|
|
|
.ndo_eth_ioctl = cpsw_ndo_ioctl,
|
2012-03-18 20:17:54 +00:00
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
|
2012-10-29 08:45:11 +00:00
|
|
|
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
|
2016-11-29 17:00:49 +02:00
|
|
|
.ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
|
2012-03-18 20:17:54 +00:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = cpsw_ndo_poll_controller,
|
|
|
|
#endif
|
2013-02-05 08:26:48 +00:00
|
|
|
.ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
|
2018-07-24 00:26:31 +03:00
|
|
|
.ndo_setup_tc = cpsw_ndo_setup_tc,
|
net: ethernet: ti: cpsw: add XDP support
Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.
Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.
XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.
In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.
Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-09 00:34:32 +03:00
|
|
|
.ndo_bpf = cpsw_ndo_bpf,
|
|
|
|
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
|
net: cpsw: convert to ndo_hwtstamp_get() and ndo_hwtstamp_set()
New timestamping API was introduced in commit 66f7223039c0 ("net: add
NDOs for configuring hardware timestamping") from kernel v6.6. It is
time to convert the two cpsw drivers to the new API, so that the
ndo_eth_ioctl() path can be removed completely.
The cpsw_hwtstamp_get() and cpsw_hwtstamp_set() methods (and their shim
definitions, for the case where CONFIG_TI_CPTS is not enabled) must have
their prototypes adjusted.
These methods are used by two drivers (cpsw and cpsw_new), with vastly
different configurations:
- cpsw has two operating modes:
- "dual EMAC" - enabled through the "dual_emac" device tree property -
creates one net_device per EMAC / slave interface (but there is no
bridging offload)
- "switch mode" - default - there is a single net_device, with two
EMACs/slaves behind it (and switching between them happens
unbeknownst to the network stack).
- cpsw_new always registers one net_device for each EMAC which doesn't
have status = "disabled". In terms of switching, it has two modes:
- "dual EMAC": default, no switching between ports, no switchdev
offload.
- "switch mode": enabled through the "switch_mode" devlink parameter,
offloads the Linux bridge through switchdev
Essentially, in 3 out of 4 operating modes, there is a bijective
relation between the net_device and the slave. Timestamping can thus be
configured on individual slaves. But in the "switch mode" of the cpsw
driver, ndo_eth_ioctl() targets a single slave, designated using the
"active_slave" device tree property.
To deal with these different cases, the common portion of the drivers,
cpsw_priv.c, has the cpsw_slave_index() function pointer, set to
separate, identically named cpsw_slave_index_priv() by the 2 drivers.
This is all relevant because cpsw_ndo_ioctl() has the old-style
phy_has_hwtstamp() logic which lets the PHY handle the timestamping
ioctls. Normally, that logic should be obsoleted by the more complex
logic in the core, which permits dynamically selecting the timestamp
provider - see dev_set_hwtstamp_phylib().
But I have doubts as to how this works for the "switch mode" of the dual
EMAC driver, because the core logic only engages if the PHY is visible
through ndev->phydev (this is set by phy_attach_direct()).
In cpsw.c, we have:
cpsw_ndo_open()
-> for_each_slave(priv, cpsw_slave_open, priv); // continues on errors
-> of_phy_connect()
-> phy_connect_direct()
-> phy_attach_direct()
OR
-> phy_connect()
-> phy_connect_direct()
-> phy_attach_direct()
The problem for "switch mode" is that the behavior of phy_attach_direct()
called twice in a row for the same net_device (once for each slave) is
probably undefined.
For sure it will overwrite dev->phydev. I don't see any explicit error
checks for this case, and even if there were, the for_each_slave() call
makes them non-fatal to cpsw_ndo_open() anyway.
I have no idea what is the extent to which this provides a usable
result, but the point is: only the last attached PHY will be visible
in dev->phydev, and this may well be a different PHY than
cpsw->slaves[slave_no].phy for the "active_slave".
In dual EMAC mode, as well as in cpsw_new, this should not be a problem.
I don't know whether PHY timestamping is a use case for the cpsw "switch
mode" as well, and I hope that there isn't, because for the sake of
simplicity, I've decided to deliberately break that functionality, by
refusing all PHY timestamping. Keeping it would mean blocking the old
API from ever being removed. In the new dev_set_hwtstamp_phylib() API,
it is not possible to operate on a phylib PHY other than dev->phydev,
and I would very much prefer not adding that much complexity for bizarre
driver decisions.
Final point about the cpsw_hwtstamp_get() conversion: we don't need to
propagate the unnecessary "config.flags = 0;", because dev_get_hwtstamp()
provides a zero-initialized struct kernel_hwtstamp_config.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Link: https://patch.msgid.link/20250512114422.4176010-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-05-12 14:44:21 +03:00
|
|
|
.ndo_hwtstamp_get = cpsw_hwtstamp_get,
|
|
|
|
.ndo_hwtstamp_set = cpsw_hwtstamp_set,
|
2012-03-18 20:17:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void cpsw_get_drvinfo(struct net_device *ndev,
|
|
|
|
struct ethtool_drvinfo *info)
|
|
|
|
{
|
2016-08-10 02:22:37 +03:00
|
|
|
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
|
2016-08-10 02:22:38 +03:00
|
|
|
struct platform_device *pdev = to_platform_device(cpsw->dev);
|
2013-01-06 00:44:26 +00:00
|
|
|
|
2022-08-30 22:14:54 +02:00
|
|
|
strscpy(info->driver, "cpsw", sizeof(info->driver));
|
|
|
|
strscpy(info->version, "1.0", sizeof(info->version));
|
|
|
|
strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2014-09-08 22:54:02 +05:30
|
|
|
static int cpsw_set_pauseparam(struct net_device *ndev,
|
|
|
|
struct ethtool_pauseparam *pause)
|
|
|
|
{
|
|
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
|
|
|
bool link;
|
|
|
|
|
|
|
|
priv->rx_pause = pause->rx_pause ? true : false;
|
|
|
|
priv->tx_pause = pause->tx_pause ? true : false;
|
|
|
|
|
|
|
|
for_each_slave(priv, _cpsw_adjust_link, priv, &link);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-19 18:58:27 +02:00
|
|
|
static int cpsw_set_channels(struct net_device *ndev,
|
|
|
|
struct ethtool_channels *chs)
|
|
|
|
{
|
2019-04-26 20:12:42 +03:00
|
|
|
return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
|
2017-01-06 14:07:34 -06:00
|
|
|
}
|
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
static const struct ethtool_ops cpsw_ethtool_ops = {
|
2020-03-16 13:47:08 -07:00
|
|
|
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
|
2012-03-18 20:17:54 +00:00
|
|
|
.get_drvinfo = cpsw_get_drvinfo,
|
|
|
|
.get_msglevel = cpsw_get_msglevel,
|
|
|
|
.set_msglevel = cpsw_set_msglevel,
|
|
|
|
.get_link = ethtool_op_get_link,
|
2012-10-29 08:45:20 +00:00
|
|
|
.get_ts_info = cpsw_get_ts_info,
|
2013-03-11 23:16:37 +00:00
|
|
|
.get_coalesce = cpsw_get_coalesce,
|
|
|
|
.set_coalesce = cpsw_set_coalesce,
|
2013-07-23 15:38:17 +05:30
|
|
|
.get_sset_count = cpsw_get_sset_count,
|
|
|
|
.get_strings = cpsw_get_strings,
|
|
|
|
.get_ethtool_stats = cpsw_get_ethtool_stats,
|
2014-09-08 22:54:02 +05:30
|
|
|
.get_pauseparam = cpsw_get_pauseparam,
|
|
|
|
.set_pauseparam = cpsw_set_pauseparam,
|
2013-08-20 07:59:38 +02:00
|
|
|
.get_wol = cpsw_get_wol,
|
|
|
|
.set_wol = cpsw_set_wol,
|
2014-07-22 23:25:07 +05:30
|
|
|
.get_regs_len = cpsw_get_regs_len,
|
|
|
|
.get_regs = cpsw_get_regs,
|
2016-06-24 21:23:44 +03:00
|
|
|
.begin = cpsw_ethtool_op_begin,
|
|
|
|
.complete = cpsw_ethtool_op_complete,
|
2016-08-22 21:18:28 +03:00
|
|
|
.get_channels = cpsw_get_channels,
|
|
|
|
.set_channels = cpsw_set_channels,
|
2016-10-08 17:46:15 +02:00
|
|
|
.get_link_ksettings = cpsw_get_link_ksettings,
|
|
|
|
.set_link_ksettings = cpsw_set_link_ksettings,
|
2016-11-28 09:41:33 +01:00
|
|
|
.get_eee = cpsw_get_eee,
|
2016-11-28 10:47:52 +01:00
|
|
|
.nway_reset = cpsw_nway_reset,
|
2017-01-06 14:07:34 -06:00
|
|
|
.get_ringparam = cpsw_get_ringparam,
|
|
|
|
.set_ringparam = cpsw_set_ringparam,
|
2012-03-18 20:17:54 +00:00
|
|
|
};
|
|
|
|
|
2016-04-27 21:25:25 -04:00
|
|
|
static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
2012-07-30 10:17:14 +00:00
|
|
|
struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct device_node *node = pdev->dev.of_node;
|
|
|
|
struct device_node *slave_node;
|
|
|
|
int i = 0, ret;
|
|
|
|
u32 prop;
|
|
|
|
|
|
|
|
if (!node)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (of_property_read_u32(node, "slaves", &prop)) {
|
2014-05-12 10:21:19 +05:30
|
|
|
dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
|
2012-07-30 10:17:14 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
data->slaves = prop;
|
|
|
|
|
2013-03-11 23:16:35 +00:00
|
|
|
if (of_property_read_u32(node, "active_slave", &prop)) {
|
2014-05-12 10:21:19 +05:30
|
|
|
dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
|
2013-09-21 00:50:38 +05:30
|
|
|
return -EINVAL;
|
2012-10-29 08:45:18 +00:00
|
|
|
}
|
2013-03-11 23:16:35 +00:00
|
|
|
data->active_slave = prop;
|
2012-10-29 08:45:18 +00:00
|
|
|
|
treewide: devm_kzalloc() -> devm_kcalloc()
The devm_kzalloc() function has a 2-factor argument form, devm_kcalloc().
This patch replaces cases of:
devm_kzalloc(handle, a * b, gfp)
with:
devm_kcalloc(handle, a * b, gfp)
as well as handling cases of:
devm_kzalloc(handle, a * b * c, gfp)
with:
devm_kzalloc(handle, array3_size(a, b, c), gfp)
as it's slightly less ugly than:
devm_kcalloc(handle, array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
devm_kzalloc(handle, 4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
Some manual whitespace fixes were needed in this patch, as Coccinelle
really liked to write "=devm_kcalloc..." instead of "= devm_kcalloc...".
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
expression HANDLE;
type TYPE;
expression THING, E;
@@
(
devm_kzalloc(HANDLE,
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
devm_kzalloc(HANDLE,
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression HANDLE;
expression COUNT;
typedef u8;
typedef __u8;
@@
(
devm_kzalloc(HANDLE,
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
expression HANDLE;
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
expression HANDLE;
identifier SIZE, COUNT;
@@
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression HANDLE;
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression HANDLE;
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
expression HANDLE;
identifier STRIDE, SIZE, COUNT;
@@
(
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression HANDLE;
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE,
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression HANDLE;
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, sizeof(THING) * C2, ...)
|
devm_kzalloc(HANDLE, sizeof(TYPE) * C2, ...)
|
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE, C1 * C2, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * E2
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * (E2)
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:07:58 -07:00
|
|
|
data->slave_data = devm_kcalloc(&pdev->dev,
|
|
|
|
data->slaves,
|
|
|
|
sizeof(struct cpsw_slave_data),
|
2013-09-21 00:50:38 +05:30
|
|
|
GFP_KERNEL);
|
2013-02-03 17:43:58 +00:00
|
|
|
if (!data->slave_data)
|
2013-09-21 00:50:38 +05:30
|
|
|
return -ENOMEM;
|
2012-07-30 10:17:14 +00:00
|
|
|
|
|
|
|
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
|
2014-05-12 10:21:19 +05:30
|
|
|
dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
|
2013-09-21 00:50:38 +05:30
|
|
|
return -EINVAL;
|
2012-07-30 10:17:14 +00:00
|
|
|
}
|
|
|
|
data->channels = prop;
|
|
|
|
|
|
|
|
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
|
2014-05-12 10:21:19 +05:30
|
|
|
dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
|
2013-09-21 00:50:38 +05:30
|
|
|
return -EINVAL;
|
2012-07-30 10:17:14 +00:00
|
|
|
}
|
|
|
|
data->bd_ram_size = prop;
|
|
|
|
|
|
|
|
if (of_property_read_u32(node, "mac_control", &prop)) {
|
2014-05-12 10:21:19 +05:30
|
|
|
dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
|
2013-09-21 00:50:38 +05:30
|
|
|
return -EINVAL;
|
2012-07-30 10:17:14 +00:00
|
|
|
}
|
|
|
|
data->mac_control = prop;
|
|
|
|
|
2013-10-04 14:44:40 +02:00
|
|
|
if (of_property_read_bool(node, "dual_emac"))
|
2020-09-19 15:46:17 +08:00
|
|
|
data->dual_emac = true;
|
2013-02-11 09:52:20 +00:00
|
|
|
|
2012-11-14 09:07:56 +00:00
|
|
|
/*
|
|
|
|
* Populate all the child nodes here...
|
|
|
|
*/
|
|
|
|
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
|
|
|
|
/* We do not want to force this, as in some cases may not have child */
|
|
|
|
if (ret)
|
2014-05-12 10:21:19 +05:30
|
|
|
dev_warn(&pdev->dev, "Doesn't have any child node\n");
|
2012-11-14 09:07:56 +00:00
|
|
|
|
2016-06-21 01:16:31 +01:00
|
|
|
for_each_available_child_of_node(node, slave_node) {
|
2012-07-30 10:17:14 +00:00
|
|
|
struct cpsw_slave_data *slave_data = data->slave_data + i;
|
2012-11-14 09:07:56 +00:00
|
|
|
int lenp;
|
|
|
|
const __be32 *parp;
|
|
|
|
|
2013-10-04 14:44:39 +02:00
|
|
|
/* This is no slave child node, continue */
|
2018-12-05 13:50:32 -06:00
|
|
|
if (!of_node_name_eq(slave_node, "slave"))
|
2013-10-04 14:44:39 +02:00
|
|
|
continue;
|
|
|
|
|
2018-11-25 18:15:25 -06:00
|
|
|
slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
|
|
|
|
NULL);
|
|
|
|
if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
|
|
|
|
IS_ERR(slave_data->ifphy)) {
|
|
|
|
ret = PTR_ERR(slave_data->ifphy);
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"%d: Error retrieving port phy: %d\n", i, ret);
|
2019-07-16 11:18:43 +05:30
|
|
|
goto err_node_put;
|
2018-11-25 18:15:25 -06:00
|
|
|
}
|
|
|
|
|
2019-06-23 14:11:43 +02:00
|
|
|
slave_data->slave_node = slave_node;
|
2016-04-27 21:25:25 -04:00
|
|
|
slave_data->phy_node = of_parse_phandle(slave_node,
|
|
|
|
"phy-handle", 0);
|
2015-12-16 23:02:10 -05:00
|
|
|
parp = of_get_property(slave_node, "phy_id", &lenp);
|
2016-04-27 21:38:26 -04:00
|
|
|
if (slave_data->phy_node) {
|
|
|
|
dev_dbg(&pdev->dev,
|
2017-07-18 16:43:19 -05:00
|
|
|
"slave[%d] using phy-handle=\"%pOF\"\n",
|
|
|
|
i, slave_data->phy_node);
|
2016-04-27 21:38:26 -04:00
|
|
|
} else if (of_phy_is_fixed_link(slave_node)) {
|
2015-12-16 23:02:11 -05:00
|
|
|
/* In the case of a fixed PHY, the DT node associated
|
|
|
|
* to the PHY is the Ethernet MAC DT node.
|
|
|
|
*/
|
2015-11-03 22:09:51 +01:00
|
|
|
ret = of_phy_register_fixed_link(slave_node);
|
2016-11-17 17:40:04 +01:00
|
|
|
if (ret) {
|
2022-09-15 19:42:09 +08:00
|
|
|
dev_err_probe(&pdev->dev, ret, "failed to register fixed-link phy\n");
|
2019-07-16 11:18:43 +05:30
|
|
|
goto err_node_put;
|
2016-11-17 17:40:04 +01:00
|
|
|
}
|
2016-04-27 21:45:45 -04:00
|
|
|
slave_data->phy_node = of_node_get(slave_node);
|
2015-12-16 23:02:10 -05:00
|
|
|
} else if (parp) {
|
|
|
|
u32 phyid;
|
|
|
|
struct device_node *mdio_node;
|
|
|
|
struct platform_device *mdio;
|
|
|
|
|
|
|
|
if (lenp != (sizeof(__be32) * 2)) {
|
|
|
|
dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
|
|
|
|
goto no_phy_slave;
|
|
|
|
}
|
|
|
|
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
|
|
|
|
phyid = be32_to_cpup(parp+1);
|
|
|
|
mdio = of_find_device_by_node(mdio_node);
|
|
|
|
of_node_put(mdio_node);
|
|
|
|
if (!mdio) {
|
|
|
|
dev_err(&pdev->dev, "Missing mdio platform device\n");
|
2019-07-16 11:18:43 +05:30
|
|
|
ret = -EINVAL;
|
|
|
|
goto err_node_put;
|
2015-12-16 23:02:10 -05:00
|
|
|
}
|
|
|
|
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
|
|
|
|
PHY_ID_FMT, mdio->name, phyid);
|
2016-11-17 17:39:59 +01:00
|
|
|
put_device(&mdio->dev);
|
2015-12-16 23:02:10 -05:00
|
|
|
} else {
|
2016-04-27 21:38:26 -04:00
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
|
|
|
|
i);
|
2014-10-24 18:51:33 +05:30
|
|
|
goto no_phy_slave;
|
2012-07-30 10:17:14 +00:00
|
|
|
}
|
net: of_get_phy_mode: Change API to solve int/unit warnings
Before this change of_get_phy_mode() returned an enum,
phy_interface_t. On error, -ENODEV etc, is returned. If the result of
the function is stored in a variable of type phy_interface_t, and the
compiler has decided to represent this as an unsigned int, comparision
with -ENODEV etc, is a signed vs unsigned comparision.
Fix this problem by changing the API. Make the function return an
error, or 0 on success, and pass a pointer, of type phy_interface_t,
where the phy mode should be stored.
v2:
Return with *interface set to PHY_INTERFACE_MODE_NA on error.
Add error checks to all users of of_get_phy_mode()
Fixup a few reverse christmas tree errors
Fixup a few slightly malformed reverse christmas trees
v3:
Fix 0-day reported errors.
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-04 02:40:33 +01:00
|
|
|
ret = of_get_phy_mode(slave_node, &slave_data->phy_if);
|
|
|
|
if (ret) {
|
2014-10-24 18:51:33 +05:30
|
|
|
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
|
|
|
|
i);
|
2019-07-16 11:18:43 +05:30
|
|
|
goto err_node_put;
|
2014-10-24 18:51:33 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
no_phy_slave:
|
of: net: pass the dst buffer to of_get_mac_address()
of_get_mac_address() returns a "const void*" pointer to a MAC address.
Lately, support to fetch the MAC address by an NVMEM provider was added.
But this will only work with platform devices. It will not work with
PCI devices (e.g. of an integrated root complex) and esp. not with DSA
ports.
There is an of_* variant of the nvmem binding which works without
devices. The returned data of a nvmem_cell_read() has to be freed after
use. On the other hand the return of_get_mac_address() points to some
static data without a lifetime. The trick for now, was to allocate a
device resource managed buffer which is then returned. This will only
work if we have an actual device.
Change it, so that the caller of of_get_mac_address() has to supply a
buffer where the MAC address is written to. Unfortunately, this will
touch all drivers which use the of_get_mac_address().
Usually the code looks like:
const char *addr;
addr = of_get_mac_address(np);
if (!IS_ERR(addr))
ether_addr_copy(ndev->dev_addr, addr);
This can then be simply rewritten as:
of_get_mac_address(np, ndev->dev_addr);
Sometimes is_valid_ether_addr() is used to test the MAC address.
of_get_mac_address() already makes sure, it just returns a valid MAC
address. Thus we can just test its return code. But we have to be
careful if there are still other sources for the MAC address before the
of_get_mac_address(). In this case we have to keep the
is_valid_ether_addr() call.
The following coccinelle patch was used to convert common cases to the
new style. Afterwards, I've manually gone over the drivers and fixed the
return code variable: either used a new one or if one was already
available use that. Mansour Moufid, thanks for that coccinelle patch!
<spml>
@a@
identifier x;
expression y, z;
@@
- x = of_get_mac_address(y);
+ x = of_get_mac_address(y, z);
<...
- ether_addr_copy(z, x);
...>
@@
identifier a.x;
@@
- if (<+... x ...+>) {}
@@
identifier a.x;
@@
if (<+... x ...+>) {
...
}
- else {}
@@
identifier a.x;
expression e;
@@
- if (<+... x ...+>@e)
- {}
- else
+ if (!(e))
{...}
@@
expression x, y, z;
@@
- x = of_get_mac_address(y, z);
+ of_get_mac_address(y, z);
... when != x
</spml>
All drivers, except drivers/net/ethernet/aeroflex/greth.c, were
compile-time tested.
Suggested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Michael Walle <michael@walle.cc>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-12 19:47:17 +02:00
|
|
|
ret = of_get_mac_address(slave_node, slave_data->mac_addr);
|
|
|
|
if (ret) {
|
2015-09-21 15:56:50 +05:30
|
|
|
ret = ti_cm_get_macid(&pdev->dev, i,
|
|
|
|
slave_data->mac_addr);
|
|
|
|
if (ret)
|
2019-07-16 11:18:43 +05:30
|
|
|
goto err_node_put;
|
2014-09-29 08:53:17 +02:00
|
|
|
}
|
2013-02-11 09:52:20 +00:00
|
|
|
if (data->dual_emac) {
|
2013-04-15 07:31:28 +00:00
|
|
|
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
|
2013-02-11 09:52:20 +00:00
|
|
|
&prop)) {
|
2014-05-12 10:21:19 +05:30
|
|
|
dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
|
2013-02-11 09:52:20 +00:00
|
|
|
slave_data->dual_emac_res_vlan = i+1;
|
2014-05-12 10:21:19 +05:30
|
|
|
dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
|
|
|
|
slave_data->dual_emac_res_vlan, i);
|
2013-02-11 09:52:20 +00:00
|
|
|
} else {
|
|
|
|
slave_data->dual_emac_res_vlan = prop;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-30 10:17:14 +00:00
|
|
|
i++;
|
2019-07-16 11:18:43 +05:30
|
|
|
if (i == data->slaves) {
|
|
|
|
ret = 0;
|
|
|
|
goto err_node_put;
|
|
|
|
}
|
2012-07-30 10:17:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2019-07-16 11:18:43 +05:30
|
|
|
|
|
|
|
err_node_put:
|
|
|
|
of_node_put(slave_node);
|
|
|
|
return ret;
|
2012-07-30 10:17:14 +00:00
|
|
|
}
|
|
|
|
|
2016-11-17 17:40:00 +01:00
|
|
|
static void cpsw_remove_dt(struct platform_device *pdev)
|
|
|
|
{
|
2019-06-12 00:49:03 +03:00
|
|
|
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
|
2016-11-17 17:40:01 +01:00
|
|
|
struct cpsw_platform_data *data = &cpsw->data;
|
|
|
|
struct device_node *node = pdev->dev.of_node;
|
|
|
|
struct device_node *slave_node;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
for_each_available_child_of_node(node, slave_node) {
|
|
|
|
struct cpsw_slave_data *slave_data = &data->slave_data[i];
|
|
|
|
|
2018-12-05 13:50:32 -06:00
|
|
|
if (!of_node_name_eq(slave_node, "slave"))
|
2016-11-17 17:40:01 +01:00
|
|
|
continue;
|
|
|
|
|
2016-11-28 19:24:55 +01:00
|
|
|
if (of_phy_is_fixed_link(slave_node))
|
|
|
|
of_phy_deregister_fixed_link(slave_node);
|
2016-11-17 17:40:01 +01:00
|
|
|
|
|
|
|
of_node_put(slave_data->phy_node);
|
|
|
|
|
|
|
|
i++;
|
2019-07-16 11:18:43 +05:30
|
|
|
if (i == data->slaves) {
|
|
|
|
of_node_put(slave_node);
|
2016-11-17 17:40:01 +01:00
|
|
|
break;
|
2019-07-16 11:18:43 +05:30
|
|
|
}
|
2016-11-17 17:40:01 +01:00
|
|
|
}
|
|
|
|
|
2016-11-17 17:40:00 +01:00
|
|
|
of_platform_depopulate(&pdev->dev);
|
|
|
|
}
|
|
|
|
|
2016-08-10 02:22:38 +03:00
|
|
|
static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
|
2013-02-11 09:52:20 +00:00
|
|
|
{
|
2016-08-10 02:22:42 +03:00
|
|
|
struct cpsw_common *cpsw = priv->cpsw;
|
|
|
|
struct cpsw_platform_data *data = &cpsw->data;
|
2013-02-11 09:52:20 +00:00
|
|
|
struct net_device *ndev;
|
|
|
|
struct cpsw_priv *priv_sl2;
|
2016-08-10 02:22:41 +03:00
|
|
|
int ret = 0;
|
2013-02-11 09:52:20 +00:00
|
|
|
|
2019-04-26 20:12:29 +03:00
|
|
|
ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
|
|
|
|
CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
|
2013-02-11 09:52:20 +00:00
|
|
|
if (!ndev) {
|
2016-08-10 02:22:38 +03:00
|
|
|
dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
|
2013-02-11 09:52:20 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv_sl2 = netdev_priv(ndev);
|
2016-08-10 02:22:42 +03:00
|
|
|
priv_sl2->cpsw = cpsw;
|
2013-02-11 09:52:20 +00:00
|
|
|
priv_sl2->ndev = ndev;
|
|
|
|
priv_sl2->dev = &ndev->dev;
|
|
|
|
priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
|
|
|
|
|
|
|
|
if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
|
|
|
|
memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
|
|
|
|
ETH_ALEN);
|
2016-08-10 02:22:38 +03:00
|
|
|
dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
|
|
|
|
priv_sl2->mac_addr);
|
2013-02-11 09:52:20 +00:00
|
|
|
} else {
|
2018-06-22 10:51:00 -07:00
|
|
|
eth_random_addr(priv_sl2->mac_addr);
|
2016-08-10 02:22:38 +03:00
|
|
|
dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
|
|
|
|
priv_sl2->mac_addr);
|
2013-02-11 09:52:20 +00:00
|
|
|
}
|
2021-10-01 14:32:20 -07:00
|
|
|
eth_hw_addr_set(ndev, priv_sl2->mac_addr);
|
2013-02-11 09:52:20 +00:00
|
|
|
|
|
|
|
priv_sl2->emac_port = 1;
|
2016-08-10 02:22:42 +03:00
|
|
|
cpsw->slaves[1].ndev = ndev;
|
2018-07-27 19:54:39 +03:00
|
|
|
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
|
drivers: net: turn on XDP features
A summary of the flags being set for various drivers is given below.
Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features
that can be turned off and on at runtime. This means that these flags
may be set and unset under RTNL lock protection by the driver. Hence,
READ_ONCE must be used by code loading the flag value.
Also, these flags are not used for synchronization against the availability
of XDP resources on a device. It is merely a hint, and hence the read
may race with the actual teardown of XDP resources on the device. This
may change in the future, e.g. operations taking a reference on the XDP
resources of the driver, and in turn inhibiting turning off this flag.
However, for now, it can only be used as a hint to check whether device
supports becoming a redirection target.
Turn 'hw-offload' feature flag on for:
- netronome (nfp)
- netdevsim.
Turn 'native' and 'zerocopy' features flags on for:
- intel (i40e, ice, ixgbe, igc)
- mellanox (mlx5).
- stmmac
- netronome (nfp)
Turn 'native' features flags on for:
- amazon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2, enetc)
- funeth
- intel (igb)
- marvell (mvneta, mvpp2, octeontx2)
- mellanox (mlx4)
- mtk_eth_soc
- qlogic (qede)
- sfc
- socionext (netsec)
- ti (cpsw)
- tap
- tsnep
- veth
- xen
- virtio_net.
Turn 'basic' (tx, pass, aborted and drop) features flags on for:
- netronome (nfp)
- cavium (thunder)
- hyperv.
Turn 'redirect_target' feature flag on for:
- amanzon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2)
- intel (i40e, ice, igb, ixgbe)
- ti (cpsw)
- marvell (mvneta, mvpp2)
- sfc
- socionext (netsec)
- qlogic (qede)
- mellanox (mlx5)
- tap
- veth
- virtio_net
- xen
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Co-developed-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Marek Majtyka <alardam@gmail.com>
Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-01 11:24:18 +01:00
|
|
|
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
|
|
|
|
NETDEV_XDP_ACT_NDO_XMIT;
|
2013-02-11 09:52:20 +00:00
|
|
|
|
|
|
|
ndev->netdev_ops = &cpsw_netdev_ops;
|
2014-05-11 00:12:32 +00:00
|
|
|
ndev->ethtool_ops = &cpsw_ethtool_ops;
|
2013-02-11 09:52:20 +00:00
|
|
|
|
|
|
|
/* register the network device */
|
2016-08-10 02:22:38 +03:00
|
|
|
SET_NETDEV_DEV(ndev, cpsw->dev);
|
2019-06-23 14:11:43 +02:00
|
|
|
ndev->dev.of_node = cpsw->slaves[1].data->slave_node;
|
2013-02-11 09:52:20 +00:00
|
|
|
ret = register_netdev(ndev);
|
2019-04-26 20:12:29 +03:00
|
|
|
if (ret)
|
2016-08-10 02:22:38 +03:00
|
|
|
dev_err(cpsw->dev, "cpsw: error registering net device\n");
|
2013-02-11 09:52:20 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-08-12 15:22:53 +05:30
|
|
|
static const struct of_device_id cpsw_of_mtable[] = {
|
2018-05-17 01:21:45 +03:00
|
|
|
{ .compatible = "ti,cpsw"},
|
|
|
|
{ .compatible = "ti,am335x-cpsw"},
|
|
|
|
{ .compatible = "ti,am4372-cpsw"},
|
|
|
|
{ .compatible = "ti,dra7-cpsw"},
|
2015-08-12 15:22:53 +05:30
|
|
|
{ /* sentinel */ },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
|
|
|
|
|
2018-05-17 01:21:45 +03:00
|
|
|
static const struct soc_device_attribute cpsw_soc_devices[] = {
|
|
|
|
{ .family = "AM33xx", .revision = "ES1.0"},
|
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
|
2012-12-03 09:23:45 -05:00
|
|
|
static int cpsw_probe(struct platform_device *pdev)
|
2012-03-18 20:17:54 +00:00
|
|
|
{
|
2019-04-26 20:12:27 +03:00
|
|
|
struct device *dev = &pdev->dev;
|
2016-08-10 02:22:35 +03:00
|
|
|
struct clk *clk;
|
2013-04-24 08:48:23 +00:00
|
|
|
struct cpsw_platform_data *data;
|
2012-03-18 20:17:54 +00:00
|
|
|
struct net_device *ndev;
|
|
|
|
struct cpsw_priv *priv;
|
2013-09-21 00:50:38 +05:30
|
|
|
void __iomem *ss_regs;
|
2019-08-21 20:48:50 +08:00
|
|
|
struct resource *ss_res;
|
2015-09-07 15:16:44 +05:30
|
|
|
struct gpio_descs *mode;
|
2018-05-17 01:21:45 +03:00
|
|
|
const struct soc_device_attribute *soc;
|
2016-08-10 02:22:37 +03:00
|
|
|
struct cpsw_common *cpsw;
|
2019-04-26 20:12:39 +03:00
|
|
|
int ret = 0, ch;
|
2015-01-16 10:11:11 -06:00
|
|
|
int irq;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2019-04-26 20:12:27 +03:00
|
|
|
cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
|
2016-11-17 17:40:03 +01:00
|
|
|
if (!cpsw)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-08-21 16:41:23 +02:00
|
|
|
platform_set_drvdata(pdev, cpsw);
|
2019-11-20 00:19:16 +02:00
|
|
|
cpsw_slave_index = cpsw_slave_index_priv;
|
|
|
|
|
2019-04-26 20:12:27 +03:00
|
|
|
cpsw->dev = dev;
|
2016-08-10 02:22:37 +03:00
|
|
|
|
2019-04-26 20:12:27 +03:00
|
|
|
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
|
2015-09-07 15:16:44 +05:30
|
|
|
if (IS_ERR(mode)) {
|
|
|
|
ret = PTR_ERR(mode);
|
2019-04-26 20:12:27 +03:00
|
|
|
dev_err(dev, "gpio request failed, ret %d\n", ret);
|
2019-04-26 20:12:29 +03:00
|
|
|
return ret;
|
2015-09-07 15:16:44 +05:30
|
|
|
}
|
|
|
|
|
2019-04-26 20:12:36 +03:00
|
|
|
clk = devm_clk_get(dev, "fck");
|
|
|
|
if (IS_ERR(clk)) {
|
2019-04-30 01:55:24 +00:00
|
|
|
ret = PTR_ERR(clk);
|
2019-04-26 20:12:36 +03:00
|
|
|
dev_err(dev, "fck is not found %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
|
|
|
|
|
2021-06-09 22:01:52 +08:00
|
|
|
ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
|
2019-04-26 20:12:36 +03:00
|
|
|
if (IS_ERR(ss_regs))
|
|
|
|
return PTR_ERR(ss_regs);
|
|
|
|
cpsw->regs = ss_regs;
|
|
|
|
|
2019-08-21 20:48:50 +08:00
|
|
|
cpsw->wr_regs = devm_platform_ioremap_resource(pdev, 1);
|
2019-04-26 20:12:36 +03:00
|
|
|
if (IS_ERR(cpsw->wr_regs))
|
|
|
|
return PTR_ERR(cpsw->wr_regs);
|
|
|
|
|
|
|
|
/* RX IRQ */
|
|
|
|
irq = platform_get_irq(pdev, 1);
|
|
|
|
if (irq < 0)
|
|
|
|
return irq;
|
|
|
|
cpsw->irqs_table[0] = irq;
|
|
|
|
|
|
|
|
/* TX IRQ */
|
|
|
|
irq = platform_get_irq(pdev, 2);
|
|
|
|
if (irq < 0)
|
|
|
|
return irq;
|
|
|
|
cpsw->irqs_table[1] = irq;
|
|
|
|
|
2020-04-23 17:20:22 +03:00
|
|
|
/* get misc irq*/
|
|
|
|
irq = platform_get_irq(pdev, 3);
|
|
|
|
if (irq <= 0)
|
|
|
|
return irq;
|
|
|
|
cpsw->misc_irq = irq;
|
|
|
|
|
net: cpsw: Add parent<->child relation support between cpsw and mdio
CPGMAC SubSystem consist of various sub-modules, like, mdio, cpdma,
cpsw, etc... These sub-modules are also used in some of Davinci family
of devices. Now based on requirement, use-case and available technology
nodes the integration of these sub-modules varies across devices.
So coming back to Linux net driver, currently separate and independent
platform devices & drivers for CPSW and MDIO is implemented. In case of
Davinci they both has separate control, from resources perspective,
like clock.
In case of AM33XX, the resources are shared and only one register
bit-field is provided to control module/clock enable/disable, makes it
difficult to handle common resource.
So the solution here implemented in this patch is,
Create parent<->child relationship between both the drivers, making
CPSW as a parent and MDIO as its child and enumerate all the child nodes
under CPSW module.
Both the drivers will function exactly the way it was operating before,
including runtime-pm functionality. No change is required in MDIO driver
(for that matter to any child driver).
As this is only supported during DT boot, the parent<->child relationship
is created and populated in DT execution flow. The only required change
is inside DTS file, making MDIO as a child to CPSW node.
Signed-off-by: Vaibhav Hiremath <hvaibhav@ti.com>
Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
Acked-by: Peter Korsgaard <jacmet@sunsite.dk>
Acked-by: Richard Cochran <richardcochran@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-11-14 09:07:55 +00:00
|
|
|
/*
|
|
|
|
* This may be required here for child devices.
|
|
|
|
*/
|
2019-04-26 20:12:27 +03:00
|
|
|
pm_runtime_enable(dev);
|
net: cpsw: Add parent<->child relation support between cpsw and mdio
CPGMAC SubSystem consist of various sub-modules, like, mdio, cpdma,
cpsw, etc... These sub-modules are also used in some of Davinci family
of devices. Now based on requirement, use-case and available technology
nodes the integration of these sub-modules varies across devices.
So coming back to Linux net driver, currently separate and independent
platform devices & drivers for CPSW and MDIO is implemented. In case of
Davinci they both has separate control, from resources perspective,
like clock.
In case of AM33XX, the resources are shared and only one register
bit-field is provided to control module/clock enable/disable, makes it
difficult to handle common resource.
So the solution here implemented in this patch is,
Create parent<->child relationship between both the drivers, making
CPSW as a parent and MDIO as its child and enumerate all the child nodes
under CPSW module.
Both the drivers will function exactly the way it was operating before,
including runtime-pm functionality. No change is required in MDIO driver
(for that matter to any child driver).
As this is only supported during DT boot, the parent<->child relationship
is created and populated in DT execution flow. The only required change
is inside DTS file, making MDIO as a child to CPSW node.
Signed-off-by: Vaibhav Hiremath <hvaibhav@ti.com>
Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
Acked-by: Peter Korsgaard <jacmet@sunsite.dk>
Acked-by: Richard Cochran <richardcochran@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-11-14 09:07:55 +00:00
|
|
|
|
2016-11-17 17:40:00 +01:00
|
|
|
/* Need to enable clocks with runtime PM api to access module
|
|
|
|
* registers
|
|
|
|
*/
|
2022-04-12 08:28:47 +00:00
|
|
|
ret = pm_runtime_resume_and_get(dev);
|
|
|
|
if (ret < 0)
|
2013-09-21 00:50:38 +05:30
|
|
|
goto clean_runtime_disable_ret;
|
2016-11-17 17:40:00 +01:00
|
|
|
|
2016-11-17 17:40:04 +01:00
|
|
|
ret = cpsw_probe_dt(&cpsw->data, pdev);
|
|
|
|
if (ret)
|
2016-11-17 17:40:00 +01:00
|
|
|
goto clean_dt_ret;
|
2016-11-17 17:40:04 +01:00
|
|
|
|
2019-04-26 20:12:36 +03:00
|
|
|
soc = soc_device_match(cpsw_soc_devices);
|
|
|
|
if (soc)
|
2020-09-19 15:46:17 +08:00
|
|
|
cpsw->quirk_irq = true;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2019-04-26 20:12:36 +03:00
|
|
|
data = &cpsw->data;
|
2019-04-26 20:12:27 +03:00
|
|
|
cpsw->slaves = devm_kcalloc(dev,
|
treewide: devm_kzalloc() -> devm_kcalloc()
The devm_kzalloc() function has a 2-factor argument form, devm_kcalloc().
This patch replaces cases of:
devm_kzalloc(handle, a * b, gfp)
with:
devm_kcalloc(handle, a * b, gfp)
as well as handling cases of:
devm_kzalloc(handle, a * b * c, gfp)
with:
devm_kzalloc(handle, array3_size(a, b, c), gfp)
as it's slightly less ugly than:
devm_kcalloc(handle, array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
devm_kzalloc(handle, 4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
Some manual whitespace fixes were needed in this patch, as Coccinelle
really liked to write "=devm_kcalloc..." instead of "= devm_kcalloc...".
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
expression HANDLE;
type TYPE;
expression THING, E;
@@
(
devm_kzalloc(HANDLE,
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
devm_kzalloc(HANDLE,
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression HANDLE;
expression COUNT;
typedef u8;
typedef __u8;
@@
(
devm_kzalloc(HANDLE,
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
expression HANDLE;
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
expression HANDLE;
identifier SIZE, COUNT;
@@
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression HANDLE;
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression HANDLE;
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
expression HANDLE;
identifier STRIDE, SIZE, COUNT;
@@
(
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression HANDLE;
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE,
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression HANDLE;
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, sizeof(THING) * C2, ...)
|
devm_kzalloc(HANDLE, sizeof(TYPE) * C2, ...)
|
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE, C1 * C2, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * E2
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * (E2)
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:07:58 -07:00
|
|
|
data->slaves, sizeof(struct cpsw_slave),
|
2013-09-21 00:50:38 +05:30
|
|
|
GFP_KERNEL);
|
2016-08-10 02:22:42 +03:00
|
|
|
if (!cpsw->slaves) {
|
2013-09-21 00:50:38 +05:30
|
|
|
ret = -ENOMEM;
|
2016-11-17 17:40:00 +01:00
|
|
|
goto clean_dt_ret;
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 20:12:36 +03:00
|
|
|
cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
|
2019-04-26 20:12:42 +03:00
|
|
|
cpsw->descs_pool_size = descs_pool_size;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2019-04-26 20:12:39 +03:00
|
|
|
ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
|
|
|
|
ss_res->start + CPSW2_BD_OFFSET,
|
|
|
|
descs_pool_size);
|
|
|
|
if (ret)
|
2016-11-17 17:40:00 +01:00
|
|
|
goto clean_dt_ret;
|
2018-05-17 01:21:45 +03:00
|
|
|
|
2018-07-24 00:26:29 +03:00
|
|
|
ch = cpsw->quirk_irq ? 0 : 7;
|
|
|
|
cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
|
2017-12-12 23:06:35 +02:00
|
|
|
if (IS_ERR(cpsw->txv[0].ch)) {
|
2019-04-26 20:12:27 +03:00
|
|
|
dev_err(dev, "error initializing tx dma channel\n");
|
2017-12-12 23:06:35 +02:00
|
|
|
ret = PTR_ERR(cpsw->txv[0].ch);
|
2019-04-26 20:12:36 +03:00
|
|
|
goto clean_cpts;
|
2017-12-12 23:06:35 +02:00
|
|
|
}
|
|
|
|
|
2016-11-29 17:00:51 +02:00
|
|
|
cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
|
2017-12-12 23:06:35 +02:00
|
|
|
if (IS_ERR(cpsw->rxv[0].ch)) {
|
2019-04-26 20:12:27 +03:00
|
|
|
dev_err(dev, "error initializing rx dma channel\n");
|
2017-12-12 23:06:35 +02:00
|
|
|
ret = PTR_ERR(cpsw->rxv[0].ch);
|
2019-04-26 20:12:36 +03:00
|
|
|
goto clean_cpts;
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
2019-04-26 20:12:36 +03:00
|
|
|
cpsw_split_res(cpsw);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2019-04-26 20:12:36 +03:00
|
|
|
/* setup netdev */
|
|
|
|
ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
|
|
|
|
CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
|
|
|
|
if (!ndev) {
|
|
|
|
dev_err(dev, "error allocating net_device\n");
|
2020-11-13 14:49:33 +08:00
|
|
|
ret = -ENOMEM;
|
2019-04-26 20:12:36 +03:00
|
|
|
goto clean_cpts;
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 20:12:36 +03:00
|
|
|
priv = netdev_priv(ndev);
|
|
|
|
priv->cpsw = cpsw;
|
|
|
|
priv->ndev = ndev;
|
|
|
|
priv->dev = dev;
|
|
|
|
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
|
|
|
|
priv->emac_port = 0;
|
2016-12-06 18:00:41 -06:00
|
|
|
|
2019-04-26 20:12:36 +03:00
|
|
|
if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
|
|
|
|
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
|
|
|
|
dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
|
|
|
|
} else {
|
|
|
|
eth_random_addr(priv->mac_addr);
|
|
|
|
dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2021-10-01 14:32:20 -07:00
|
|
|
eth_hw_addr_set(ndev, priv->mac_addr);
|
2019-04-26 20:12:36 +03:00
|
|
|
|
|
|
|
cpsw->slaves[0].ndev = ndev;
|
|
|
|
|
2018-03-15 15:15:50 -05:00
|
|
|
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
|
drivers: net: turn on XDP features
A summary of the flags being set for various drivers is given below.
Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features
that can be turned off and on at runtime. This means that these flags
may be set and unset under RTNL lock protection by the driver. Hence,
READ_ONCE must be used by code loading the flag value.
Also, these flags are not used for synchronization against the availability
of XDP resources on a device. It is merely a hint, and hence the read
may race with the actual teardown of XDP resources on the device. This
may change in the future, e.g. operations taking a reference on the XDP
resources of the driver, and in turn inhibiting turning off this flag.
However, for now, it can only be used as a hint to check whether device
supports becoming a redirection target.
Turn 'hw-offload' feature flag on for:
- netronome (nfp)
- netdevsim.
Turn 'native' and 'zerocopy' features flags on for:
- intel (i40e, ice, ixgbe, igc)
- mellanox (mlx5).
- stmmac
- netronome (nfp)
Turn 'native' features flags on for:
- amazon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2, enetc)
- funeth
- intel (igb)
- marvell (mvneta, mvpp2, octeontx2)
- mellanox (mlx4)
- mtk_eth_soc
- qlogic (qede)
- sfc
- socionext (netsec)
- ti (cpsw)
- tap
- tsnep
- veth
- xen
- virtio_net.
Turn 'basic' (tx, pass, aborted and drop) features flags on for:
- netronome (nfp)
- cavium (thunder)
- hyperv.
Turn 'redirect_target' feature flag on for:
- amanzon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2)
- intel (i40e, ice, igb, ixgbe)
- ti (cpsw)
- marvell (mvneta, mvpp2)
- sfc
- socionext (netsec)
- qlogic (qede)
- mellanox (mlx5)
- tap
- veth
- virtio_net
- xen
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Co-developed-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Marek Majtyka <alardam@gmail.com>
Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-01 11:24:18 +01:00
|
|
|
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
|
|
|
|
NETDEV_XDP_ACT_NDO_XMIT;
|
net: cpsw: convert to ndo_hwtstamp_get() and ndo_hwtstamp_set()
New timestamping API was introduced in commit 66f7223039c0 ("net: add
NDOs for configuring hardware timestamping") from kernel v6.6. It is
time to convert the two cpsw drivers to the new API, so that the
ndo_eth_ioctl() path can be removed completely.
The cpsw_hwtstamp_get() and cpsw_hwtstamp_set() methods (and their shim
definitions, for the case where CONFIG_TI_CPTS is not enabled) must have
their prototypes adjusted.
These methods are used by two drivers (cpsw and cpsw_new), with vastly
different configurations:
- cpsw has two operating modes:
- "dual EMAC" - enabled through the "dual_emac" device tree property -
creates one net_device per EMAC / slave interface (but there is no
bridging offload)
- "switch mode" - default - there is a single net_device, with two
EMACs/slaves behind it (and switching between them happens
unbeknownst to the network stack).
- cpsw_new always registers one net_device for each EMAC which doesn't
have status = "disabled". In terms of switching, it has two modes:
- "dual EMAC": default, no switching between ports, no switchdev
offload.
- "switch mode": enabled through the "switch_mode" devlink parameter,
offloads the Linux bridge through switchdev
Essentially, in 3 out of 4 operating modes, there is a bijective
relation between the net_device and the slave. Timestamping can thus be
configured on individual slaves. But in the "switch mode" of the cpsw
driver, ndo_eth_ioctl() targets a single slave, designated using the
"active_slave" device tree property.
To deal with these different cases, the common portion of the drivers,
cpsw_priv.c, has the cpsw_slave_index() function pointer, set to
separate, identically named cpsw_slave_index_priv() by the 2 drivers.
This is all relevant because cpsw_ndo_ioctl() has the old-style
phy_has_hwtstamp() logic which lets the PHY handle the timestamping
ioctls. Normally, that logic should be obsoleted by the more complex
logic in the core, which permits dynamically selecting the timestamp
provider - see dev_set_hwtstamp_phylib().
But I have doubts as to how this works for the "switch mode" of the dual
EMAC driver, because the core logic only engages if the PHY is visible
through ndev->phydev (this is set by phy_attach_direct()).
In cpsw.c, we have:
cpsw_ndo_open()
-> for_each_slave(priv, cpsw_slave_open, priv); // continues on errors
-> of_phy_connect()
-> phy_connect_direct()
-> phy_attach_direct()
OR
-> phy_connect()
-> phy_connect_direct()
-> phy_attach_direct()
The problem for "switch mode" is that the behavior of phy_attach_direct()
called twice in a row for the same net_device (once for each slave) is
probably undefined.
For sure it will overwrite dev->phydev. I don't see any explicit error
checks for this case, and even if there were, the for_each_slave() call
makes them non-fatal to cpsw_ndo_open() anyway.
I have no idea what is the extent to which this provides a usable
result, but the point is: only the last attached PHY will be visible
in dev->phydev, and this may well be a different PHY than
cpsw->slaves[slave_no].phy for the "active_slave".
In dual EMAC mode, as well as in cpsw_new, this should not be a problem.
I don't know whether PHY timestamping is a use case for the cpsw "switch
mode" as well, and I hope that there isn't, because for the sake of
simplicity, I've decided to deliberately break that functionality, by
refusing all PHY timestamping. Keeping it would mean blocking the old
API from ever being removed. In the new dev_set_hwtstamp_phylib() API,
it is not possible to operate on a phylib PHY other than dev->phydev,
and I would very much prefer not adding that much complexity for bizarre
driver decisions.
Final point about the cpsw_hwtstamp_get() conversion: we don't need to
propagate the unnecessary "config.flags = 0;", because dev_get_hwtstamp()
provides a zero-initialized struct kernel_hwtstamp_config.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Link: https://patch.msgid.link/20250512114422.4176010-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-05-12 14:44:21 +03:00
|
|
|
/* Hijack PHY timestamping requests in order to block them */
|
|
|
|
if (!cpsw->data.dual_emac)
|
|
|
|
ndev->see_all_hwtstamp_requests = true;
|
2017-07-20 16:59:52 +05:30
|
|
|
|
|
|
|
ndev->netdev_ops = &cpsw_netdev_ops;
|
|
|
|
ndev->ethtool_ops = &cpsw_ethtool_ops;
|
2018-05-17 01:21:45 +03:00
|
|
|
netif_napi_add(ndev, &cpsw->napi_rx,
|
2022-09-27 06:27:53 -07:00
|
|
|
cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
|
2022-05-04 09:37:24 -07:00
|
|
|
netif_napi_add_tx(ndev, &cpsw->napi_tx,
|
|
|
|
cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll);
|
2017-07-20 16:59:52 +05:30
|
|
|
|
|
|
|
/* register the network device */
|
2019-04-26 20:12:27 +03:00
|
|
|
SET_NETDEV_DEV(ndev, dev);
|
2019-06-23 14:11:43 +02:00
|
|
|
ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
|
2017-07-20 16:59:52 +05:30
|
|
|
ret = register_netdev(ndev);
|
|
|
|
if (ret) {
|
2019-04-26 20:12:27 +03:00
|
|
|
dev_err(dev, "error registering net device\n");
|
2017-07-20 16:59:52 +05:30
|
|
|
ret = -ENODEV;
|
2019-04-26 20:12:36 +03:00
|
|
|
goto clean_cpts;
|
2017-07-20 16:59:52 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
if (cpsw->data.dual_emac) {
|
|
|
|
ret = cpsw_probe_dual_emac(priv);
|
|
|
|
if (ret) {
|
|
|
|
cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
|
|
|
|
goto clean_unregister_netdev_ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-16 10:11:12 -06:00
|
|
|
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
|
|
|
|
* MISC IRQs which are always kept disabled with this driver so
|
|
|
|
* we will not request them.
|
|
|
|
*
|
|
|
|
* If anyone wants to implement support for those, make sure to
|
|
|
|
* first request and append them to irqs_table array.
|
|
|
|
*/
|
2019-04-26 20:12:36 +03:00
|
|
|
ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
|
2019-04-26 20:12:27 +03:00
|
|
|
0, dev_name(dev), cpsw);
|
2015-01-16 10:11:11 -06:00
|
|
|
if (ret < 0) {
|
2019-04-26 20:12:27 +03:00
|
|
|
dev_err(dev, "error attaching irq (%d)\n", ret);
|
2019-04-26 20:12:36 +03:00
|
|
|
goto clean_unregister_netdev_ret;
|
2015-01-16 10:11:11 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-04-26 20:12:36 +03:00
|
|
|
ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
|
2016-08-10 02:22:43 +03:00
|
|
|
0, dev_name(&pdev->dev), cpsw);
|
2015-01-16 10:11:11 -06:00
|
|
|
if (ret < 0) {
|
2019-04-26 20:12:27 +03:00
|
|
|
dev_err(dev, "error attaching irq (%d)\n", ret);
|
2019-04-26 20:12:36 +03:00
|
|
|
goto clean_unregister_netdev_ret;
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
2014-09-04 09:00:23 +02:00
|
|
|
|
2020-04-23 17:20:22 +03:00
|
|
|
if (!cpsw->cpts)
|
|
|
|
goto skip_cpts;
|
|
|
|
|
|
|
|
ret = devm_request_irq(&pdev->dev, cpsw->misc_irq, cpsw_misc_interrupt,
|
|
|
|
0, dev_name(&pdev->dev), cpsw);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(dev, "error attaching misc irq (%d)\n", ret);
|
|
|
|
goto clean_unregister_netdev_ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable misc CPTS evnt_pend IRQ */
|
|
|
|
cpts_set_irqpoll(cpsw->cpts, false);
|
|
|
|
|
|
|
|
skip_cpts:
|
2017-01-06 14:07:33 -06:00
|
|
|
cpsw_notice(priv, probe,
|
|
|
|
"initialized device (regs %pa, irq %d, pool size %d)\n",
|
2019-04-26 20:12:36 +03:00
|
|
|
&ss_res->start, cpsw->irqs_table[0], descs_pool_size);
|
2013-02-11 09:52:20 +00:00
|
|
|
|
2016-11-17 17:39:58 +01:00
|
|
|
pm_runtime_put(&pdev->dev);
|
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
return 0;
|
|
|
|
|
2016-11-17 17:40:02 +01:00
|
|
|
clean_unregister_netdev_ret:
|
|
|
|
unregister_netdev(ndev);
|
2019-04-26 20:12:36 +03:00
|
|
|
clean_cpts:
|
|
|
|
cpts_release(cpsw->cpts);
|
2016-08-10 02:22:40 +03:00
|
|
|
cpdma_ctlr_destroy(cpsw->dma);
|
2016-11-17 17:40:00 +01:00
|
|
|
clean_dt_ret:
|
|
|
|
cpsw_remove_dt(pdev);
|
2016-11-17 17:39:58 +01:00
|
|
|
pm_runtime_put_sync(&pdev->dev);
|
2013-09-21 00:50:38 +05:30
|
|
|
clean_runtime_disable_ret:
|
2012-07-17 08:09:50 +00:00
|
|
|
pm_runtime_disable(&pdev->dev);
|
2012-03-18 20:17:54 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-11-28 18:38:26 +01:00
|
|
|
static void cpsw_remove(struct platform_device *pdev)
|
2012-03-18 20:17:54 +00:00
|
|
|
{
|
2019-06-12 00:49:03 +03:00
|
|
|
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
|
|
|
|
int i, ret;
|
2016-07-28 20:50:35 +03:00
|
|
|
|
2022-04-12 08:28:47 +00:00
|
|
|
ret = pm_runtime_resume_and_get(&pdev->dev);
|
2023-11-28 18:38:26 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
/* Note, if this error path is taken, we're leaking some
|
|
|
|
* resources.
|
|
|
|
*/
|
|
|
|
dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
|
|
|
|
ERR_PTR(ret));
|
|
|
|
return;
|
|
|
|
}
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2019-06-12 00:49:03 +03:00
|
|
|
for (i = 0; i < cpsw->data.slaves; i++)
|
|
|
|
if (cpsw->slaves[i].ndev)
|
|
|
|
unregister_netdev(cpsw->slaves[i].ndev);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2016-12-06 18:00:41 -06:00
|
|
|
cpts_release(cpsw->cpts);
|
2016-08-10 02:22:40 +03:00
|
|
|
cpdma_ctlr_destroy(cpsw->dma);
|
2016-11-17 17:40:00 +01:00
|
|
|
cpsw_remove_dt(pdev);
|
2016-07-28 20:50:35 +03:00
|
|
|
pm_runtime_put_sync(&pdev->dev);
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
2012-03-18 20:17:54 +00:00
|
|
|
}
|
|
|
|
|
2015-02-27 13:19:45 +02:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
2012-03-18 20:17:54 +00:00
|
|
|
static int cpsw_suspend(struct device *dev)
|
|
|
|
{
|
2019-06-24 10:46:19 +05:30
|
|
|
struct cpsw_common *cpsw = dev_get_drvdata(dev);
|
|
|
|
int i;
|
2013-11-15 08:29:16 +01:00
|
|
|
|
2020-05-22 20:09:28 +03:00
|
|
|
rtnl_lock();
|
|
|
|
|
2019-06-24 10:46:19 +05:30
|
|
|
for (i = 0; i < cpsw->data.slaves; i++)
|
|
|
|
if (cpsw->slaves[i].ndev)
|
2016-08-10 02:22:42 +03:00
|
|
|
if (netif_running(cpsw->slaves[i].ndev))
|
|
|
|
cpsw_ndo_stop(cpsw->slaves[i].ndev);
|
2013-11-15 08:29:16 +01:00
|
|
|
|
2020-05-22 20:09:28 +03:00
|
|
|
rtnl_unlock();
|
|
|
|
|
2013-06-06 23:45:14 +05:30
|
|
|
/* Select sleep pin state */
|
2016-08-10 02:22:38 +03:00
|
|
|
pinctrl_pm_select_sleep_state(dev);
|
2013-06-06 23:45:14 +05:30
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpsw_resume(struct device *dev)
|
|
|
|
{
|
2019-06-24 10:46:19 +05:30
|
|
|
struct cpsw_common *cpsw = dev_get_drvdata(dev);
|
|
|
|
int i;
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2013-06-06 23:45:14 +05:30
|
|
|
/* Select default pin state */
|
2016-08-10 02:22:38 +03:00
|
|
|
pinctrl_pm_select_default_state(dev);
|
2013-06-06 23:45:14 +05:30
|
|
|
|
2016-11-29 16:27:03 -06:00
|
|
|
/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
|
|
|
|
rtnl_lock();
|
2014-09-11 22:52:38 +05:30
|
|
|
|
2019-06-24 10:46:19 +05:30
|
|
|
for (i = 0; i < cpsw->data.slaves; i++)
|
|
|
|
if (cpsw->slaves[i].ndev)
|
2016-08-10 02:22:42 +03:00
|
|
|
if (netif_running(cpsw->slaves[i].ndev))
|
|
|
|
cpsw_ndo_open(cpsw->slaves[i].ndev);
|
2019-06-24 10:46:19 +05:30
|
|
|
|
2016-11-29 16:27:03 -06:00
|
|
|
rtnl_unlock();
|
|
|
|
|
2012-03-18 20:17:54 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2015-02-27 13:19:45 +02:00
|
|
|
#endif
|
2012-03-18 20:17:54 +00:00
|
|
|
|
2015-02-27 13:19:45 +02:00
|
|
|
static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
static struct platform_driver cpsw_driver = {
|
|
|
|
.driver = {
|
|
|
|
.name = "cpsw",
|
|
|
|
.pm = &cpsw_pm_ops,
|
2013-09-30 09:55:12 +05:30
|
|
|
.of_match_table = cpsw_of_mtable,
|
2012-03-18 20:17:54 +00:00
|
|
|
},
|
|
|
|
.probe = cpsw_probe,
|
2024-10-03 12:01:03 +02:00
|
|
|
.remove = cpsw_remove,
|
2012-03-18 20:17:54 +00:00
|
|
|
};
|
|
|
|
|
2015-10-23 14:41:12 +03:00
|
|
|
module_platform_driver(cpsw_driver);
|
2012-03-18 20:17:54 +00:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
|
|
|
|
MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
|
|
|
|
MODULE_DESCRIPTION("TI CPSW Ethernet driver");
|