2025-04-07 15:59:13 +05:30
|
|
|
// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
|
2014-06-05 09:15:06 -05:00
|
|
|
/*
|
2025-04-07 15:59:13 +05:30
|
|
|
* Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
|
|
|
|
* Copyright (c) 2014, Synopsys, Inc.
|
|
|
|
* All rights reserved
|
2014-06-05 09:15:06 -05:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/io.h>
|
2017-08-18 09:02:57 -05:00
|
|
|
#include <linux/notifier.h>
|
2014-06-05 09:15:06 -05:00
|
|
|
|
|
|
|
#include "xgbe.h"
|
|
|
|
#include "xgbe-common.h"
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
MODULE_DESCRIPTION(XGBE_DRV_DESC);
|
|
|
|
|
2015-05-14 11:44:03 -05:00
|
|
|
static int debug = -1;
|
2018-03-23 16:34:44 -07:00
|
|
|
module_param(debug, int, 0644);
|
2015-05-14 11:44:03 -05:00
|
|
|
MODULE_PARM_DESC(debug, " Network interface message level setting");
|
|
|
|
|
|
|
|
static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
|
|
|
|
NETIF_MSG_IFUP);
|
|
|
|
|
2014-06-05 09:15:06 -05:00
|
|
|
static void xgbe_default_config(struct xgbe_prv_data *pdata)
|
|
|
|
{
|
|
|
|
DBGPR("-->xgbe_default_config\n");
|
|
|
|
|
2017-06-28 13:43:26 -05:00
|
|
|
pdata->blen = DMA_SBMR_BLEN_64;
|
2017-06-28 13:43:18 -05:00
|
|
|
pdata->pbl = DMA_PBL_128;
|
2017-06-28 13:43:26 -05:00
|
|
|
pdata->aal = 1;
|
|
|
|
pdata->rd_osr_limit = 8;
|
|
|
|
pdata->wr_osr_limit = 8;
|
2014-06-05 09:15:06 -05:00
|
|
|
pdata->tx_sf_mode = MTL_TSF_ENABLE;
|
|
|
|
pdata->tx_threshold = MTL_TX_THRESHOLD_64;
|
|
|
|
pdata->tx_osp_mode = DMA_OSP_ENABLE;
|
|
|
|
pdata->rx_sf_mode = MTL_RSF_DISABLE;
|
|
|
|
pdata->rx_threshold = MTL_RX_THRESHOLD_64;
|
|
|
|
pdata->pause_autoneg = 1;
|
|
|
|
pdata->tx_pause = 1;
|
|
|
|
pdata->rx_pause = 1;
|
2015-01-16 12:46:45 -06:00
|
|
|
pdata->phy_speed = SPEED_UNKNOWN;
|
2014-06-05 09:15:06 -05:00
|
|
|
pdata->power_down = 0;
|
|
|
|
|
|
|
|
DBGPR("<--xgbe_default_config\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
|
|
|
|
{
|
|
|
|
xgbe_init_function_ptrs_dev(&pdata->hw_if);
|
2015-05-14 11:44:15 -05:00
|
|
|
xgbe_init_function_ptrs_phy(&pdata->phy_if);
|
2016-11-10 17:10:36 -06:00
|
|
|
xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
|
2014-06-05 09:15:06 -05:00
|
|
|
xgbe_init_function_ptrs_desc(&pdata->desc_if);
|
2016-11-03 13:18:27 -05:00
|
|
|
|
|
|
|
pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
|
2014-06-05 09:15:06 -05:00
|
|
|
}
|
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
|
2014-06-05 09:15:06 -05:00
|
|
|
{
|
|
|
|
struct xgbe_prv_data *pdata;
|
|
|
|
struct net_device *netdev;
|
|
|
|
|
|
|
|
netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
|
2014-06-09 09:19:32 -05:00
|
|
|
XGBE_MAX_DMA_CHANNELS);
|
2014-06-05 09:15:06 -05:00
|
|
|
if (!netdev) {
|
2016-11-03 13:19:27 -05:00
|
|
|
dev_err(dev, "alloc_etherdev_mq failed\n");
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2014-06-05 09:15:06 -05:00
|
|
|
}
|
|
|
|
SET_NETDEV_DEV(netdev, dev);
|
|
|
|
pdata = netdev_priv(netdev);
|
|
|
|
pdata->netdev = netdev;
|
|
|
|
pdata->dev = dev;
|
|
|
|
|
|
|
|
spin_lock_init(&pdata->lock);
|
2016-02-17 11:49:28 -06:00
|
|
|
spin_lock_init(&pdata->xpcs_lock);
|
2014-11-04 16:07:02 -06:00
|
|
|
mutex_init(&pdata->rss_mutex);
|
2014-07-29 08:57:19 -05:00
|
|
|
spin_lock_init(&pdata->tstamp_lock);
|
2016-11-10 17:10:36 -06:00
|
|
|
mutex_init(&pdata->i2c_mutex);
|
|
|
|
init_completion(&pdata->i2c_complete);
|
2016-11-10 17:11:14 -06:00
|
|
|
init_completion(&pdata->mdio_complete);
|
2014-06-05 09:15:06 -05:00
|
|
|
|
2015-05-14 11:44:03 -05:00
|
|
|
pdata->msg_enable = netif_msg_init(debug, default_msg_level);
|
|
|
|
|
2015-05-14 11:44:15 -05:00
|
|
|
set_bit(XGBE_DOWN, &pdata->dev_state);
|
2016-11-10 17:10:26 -06:00
|
|
|
set_bit(XGBE_STOPPED, &pdata->dev_state);
|
2015-05-14 11:44:15 -05:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
return pdata;
|
|
|
|
}
|
2015-01-16 12:47:16 -06:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
void xgbe_free_pdata(struct xgbe_prv_data *pdata)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = pdata->netdev;
|
2016-11-03 13:18:27 -05:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
free_netdev(netdev);
|
|
|
|
}
|
2015-05-14 11:44:15 -05:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
void xgbe_set_counts(struct xgbe_prv_data *pdata)
|
|
|
|
{
|
|
|
|
/* Set all the function pointers */
|
|
|
|
xgbe_init_all_fptrs(pdata);
|
2014-06-05 09:15:06 -05:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
/* Populate the hardware features */
|
|
|
|
xgbe_get_all_hw_features(pdata);
|
2014-06-05 09:15:06 -05:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
/* Set default max values if not provided */
|
|
|
|
if (!pdata->tx_max_channel_count)
|
|
|
|
pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
|
|
|
|
if (!pdata->rx_max_channel_count)
|
|
|
|
pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
|
2015-01-16 12:47:16 -06:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
if (!pdata->tx_max_q_count)
|
|
|
|
pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
|
|
|
|
if (!pdata->rx_max_q_count)
|
|
|
|
pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
|
2015-01-16 12:47:16 -06:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
/* Calculate the number of Tx and Rx rings to be created
|
|
|
|
* -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
|
|
|
|
* the number of Tx queues to the number of Tx channels
|
|
|
|
* enabled
|
|
|
|
* -Rx (DMA) Channels do not map 1-to-1 so use the actual
|
|
|
|
* number of Rx queues or maximum allowed
|
|
|
|
*/
|
|
|
|
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
|
|
|
|
pdata->hw_feat.tx_ch_cnt);
|
|
|
|
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
|
|
|
|
pdata->tx_max_channel_count);
|
|
|
|
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
|
|
|
|
pdata->tx_max_q_count);
|
2014-07-02 13:04:57 -05:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
pdata->tx_q_count = pdata->tx_ring_count;
|
|
|
|
|
2016-11-10 17:10:05 -06:00
|
|
|
pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
|
2016-11-03 13:19:27 -05:00
|
|
|
pdata->hw_feat.rx_ch_cnt);
|
|
|
|
pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
|
|
|
|
pdata->rx_max_channel_count);
|
|
|
|
|
|
|
|
pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
|
|
|
|
pdata->rx_max_q_count);
|
2014-11-04 16:06:56 -06:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
if (netif_msg_probe(pdata)) {
|
|
|
|
dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
|
|
|
|
pdata->tx_ring_count, pdata->rx_ring_count);
|
|
|
|
dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
|
|
|
|
pdata->tx_q_count, pdata->rx_q_count);
|
2015-05-14 11:44:15 -05:00
|
|
|
}
|
2016-11-03 13:19:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
int xgbe_config_netdev(struct xgbe_prv_data *pdata)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = pdata->netdev;
|
|
|
|
struct device *dev = pdata->dev;
|
|
|
|
int ret;
|
2015-05-14 11:44:15 -05:00
|
|
|
|
2014-11-04 16:06:56 -06:00
|
|
|
netdev->irq = pdata->dev_irq;
|
2014-06-05 09:15:06 -05:00
|
|
|
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
|
2021-10-04 09:05:21 -07:00
|
|
|
eth_hw_addr_set(netdev, pdata->mac_addr);
|
2014-06-05 09:15:06 -05:00
|
|
|
|
2016-11-10 17:10:26 -06:00
|
|
|
/* Initialize ECC timestamps */
|
|
|
|
pdata->tx_sec_period = jiffies;
|
|
|
|
pdata->tx_ded_period = jiffies;
|
|
|
|
pdata->rx_sec_period = jiffies;
|
|
|
|
pdata->rx_ded_period = jiffies;
|
|
|
|
pdata->desc_sec_period = jiffies;
|
|
|
|
pdata->desc_ded_period = jiffies;
|
|
|
|
|
2014-06-05 09:15:06 -05:00
|
|
|
/* Issue software reset to device */
|
2017-06-28 13:42:16 -05:00
|
|
|
ret = pdata->hw_if.exit(pdata);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "software reset failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2014-06-05 09:15:06 -05:00
|
|
|
|
|
|
|
/* Set default configuration data */
|
|
|
|
xgbe_default_config(pdata);
|
|
|
|
|
2015-03-20 11:50:22 -05:00
|
|
|
/* Set the DMA mask */
|
|
|
|
ret = dma_set_mask_and_coherent(dev,
|
|
|
|
DMA_BIT_MASK(pdata->hw_feat.dma_width));
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "dma_set_mask_and_coherent failed\n");
|
2016-11-03 13:19:27 -05:00
|
|
|
return ret;
|
2015-03-20 11:50:22 -05:00
|
|
|
}
|
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
/* Set default max values if not provided */
|
|
|
|
if (!pdata->tx_max_fifo_size)
|
|
|
|
pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
|
|
|
|
if (!pdata->rx_max_fifo_size)
|
|
|
|
pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
|
|
|
|
|
|
|
|
/* Set and validate the number of descriptors for a ring */
|
|
|
|
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
|
|
|
|
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
|
|
|
|
|
|
|
|
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
|
|
|
|
pdata->rx_desc_count = XGBE_RX_DESC_CNT;
|
|
|
|
|
2016-11-10 17:09:55 -06:00
|
|
|
/* Adjust the number of queues based on interrupts assigned */
|
|
|
|
if (pdata->channel_irq_count) {
|
|
|
|
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
|
|
|
|
pdata->channel_irq_count);
|
|
|
|
pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
|
|
|
|
pdata->channel_irq_count);
|
|
|
|
|
|
|
|
if (netif_msg_probe(pdata))
|
|
|
|
dev_dbg(pdata->dev,
|
|
|
|
"adjusted TX/RX DMA channel count = %u/%u\n",
|
|
|
|
pdata->tx_ring_count, pdata->rx_ring_count);
|
|
|
|
}
|
|
|
|
|
2018-05-23 11:39:04 -05:00
|
|
|
/* Initialize RSS hash key */
|
2014-11-16 06:23:06 -08:00
|
|
|
netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
|
2014-11-04 16:07:02 -06:00
|
|
|
|
|
|
|
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
|
|
|
|
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
|
|
|
|
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
|
|
|
|
|
2015-05-14 11:44:15 -05:00
|
|
|
/* Call MDIO/PHY initialization routine */
|
2018-04-23 11:43:17 -05:00
|
|
|
pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
|
2016-11-03 13:18:27 -05:00
|
|
|
ret = pdata->phy_if.phy_init(pdata);
|
|
|
|
if (ret)
|
2016-11-03 13:19:27 -05:00
|
|
|
return ret;
|
2014-06-05 09:15:06 -05:00
|
|
|
|
2014-07-29 08:57:55 -05:00
|
|
|
/* Set device operations */
|
2014-06-05 09:15:06 -05:00
|
|
|
netdev->netdev_ops = xgbe_get_netdev_ops();
|
|
|
|
netdev->ethtool_ops = xgbe_get_ethtool_ops();
|
2014-07-29 08:57:55 -05:00
|
|
|
#ifdef CONFIG_AMD_XGBE_DCB
|
|
|
|
netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
|
|
|
|
#endif
|
2014-06-05 09:15:06 -05:00
|
|
|
|
|
|
|
/* Set device features */
|
|
|
|
netdev->hw_features = NETIF_F_SG |
|
|
|
|
NETIF_F_IP_CSUM |
|
|
|
|
NETIF_F_IPV6_CSUM |
|
|
|
|
NETIF_F_RXCSUM |
|
|
|
|
NETIF_F_TSO |
|
|
|
|
NETIF_F_TSO6 |
|
|
|
|
NETIF_F_GRO |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_RX |
|
2014-06-24 16:19:24 -05:00
|
|
|
NETIF_F_HW_VLAN_CTAG_TX |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_FILTER;
|
2014-06-05 09:15:06 -05:00
|
|
|
|
2014-11-04 16:07:02 -06:00
|
|
|
if (pdata->hw_feat.rss)
|
|
|
|
netdev->hw_features |= NETIF_F_RXHASH;
|
|
|
|
|
2017-08-18 09:04:04 -05:00
|
|
|
if (pdata->hw_feat.vxn) {
|
|
|
|
netdev->hw_enc_features = NETIF_F_SG |
|
|
|
|
NETIF_F_IP_CSUM |
|
|
|
|
NETIF_F_IPV6_CSUM |
|
|
|
|
NETIF_F_RXCSUM |
|
|
|
|
NETIF_F_TSO |
|
|
|
|
NETIF_F_TSO6 |
|
|
|
|
NETIF_F_GRO |
|
|
|
|
NETIF_F_GSO_UDP_TUNNEL |
|
2020-07-14 12:18:22 -07:00
|
|
|
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
2017-08-18 09:04:04 -05:00
|
|
|
|
|
|
|
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
|
2020-07-14 12:18:22 -07:00
|
|
|
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
2017-08-18 09:04:04 -05:00
|
|
|
|
2020-07-14 12:18:22 -07:00
|
|
|
netdev->udp_tunnel_nic_info = xgbe_get_udp_tunnel_info();
|
2017-08-18 09:04:04 -05:00
|
|
|
}
|
|
|
|
|
2014-06-05 09:15:06 -05:00
|
|
|
netdev->vlan_features |= NETIF_F_SG |
|
|
|
|
NETIF_F_IP_CSUM |
|
|
|
|
NETIF_F_IPV6_CSUM |
|
|
|
|
NETIF_F_TSO |
|
|
|
|
NETIF_F_TSO6;
|
|
|
|
|
|
|
|
netdev->features |= netdev->hw_features;
|
|
|
|
pdata->netdev_features = netdev->features;
|
|
|
|
|
2014-06-24 16:19:29 -05:00
|
|
|
netdev->priv_flags |= IFF_UNICAST_FLT;
|
ethernet: use net core MTU range checking in more drivers
Somehow, I missed a healthy number of ethernet drivers in the last pass.
Most of these drivers either were in need of an updated max_mtu to make
jumbo frames possible to enable again. In a few cases, also setting a
different min_mtu to match previous lower bounds. There are also a few
drivers that had no upper bounds checking, so they're getting a brand new
ETH_MAX_MTU that is identical to IP_MAX_MTU, but accessible by includes
all ethernet and ethernet-like drivers all have already.
acenic:
- min_mtu = 0, max_mtu = 9000
amazon/ena:
- min_mtu = 128, max_mtu = adapter->max_mtu
amd/xgbe:
- min_mtu = 0, max_mtu = 9000
sb1250:
- min_mtu = 0, max_mtu = 1518
cxgb3:
- min_mtu = 81, max_mtu = 65535
cxgb4:
- min_mtu = 81, max_mtu = 9600
cxgb4vf:
- min_mtu = 81, max_mtu = 65535
benet:
- min_mtu = 256, max_mtu = 9000
ibmveth:
- min_mtu = 68, max_mtu = 65535
ibmvnic:
- min_mtu = adapter->min_mtu, max_mtu = adapter->max_mtu
- remove now redundant ibmvnic_change_mtu
jme:
- min_mtu = 1280, max_mtu = 9202
mv643xx_eth:
- min_mtu = 64, max_mtu = 9500
mlxsw:
- min_mtu = 0, max_mtu = 65535
- Basically bypassing the core checks, and instead relying on dynamic
checks in the respective switch drivers' ndo_change_mtu functions
ns83820:
- min_mtu = 0
- remove redundant ns83820_change_mtu, only checked for mtu > 1500
netxen:
- min_mtu = 0, max_mtu = 8000 (P2), max_mtu = 9600 (P3)
qlge:
- min_mtu = 1500, max_mtu = 9000
- driver only supports setting mtu to 1500 or 9000, so the core check only
rules out < 1500 and > 9000, qlge_change_mtu still needs to check that
the value is 1500 or 9000
qualcomm/emac:
- min_mtu = 46, max_mtu = 9194
xilinx_axienet:
- min_mtu = 64, max_mtu = 9000
Fixes: 61e84623ace3 ("net: centralize net_device min/max MTU checking")
CC: netdev@vger.kernel.org
CC: Jes Sorensen <jes@trained-monkey.org>
CC: Netanel Belgazal <netanel@annapurnalabs.com>
CC: Tom Lendacky <thomas.lendacky@amd.com>
CC: Santosh Raspatur <santosh@chelsio.com>
CC: Hariprasad S <hariprasad@chelsio.com>
CC: Sathya Perla <sathya.perla@broadcom.com>
CC: Ajit Khaparde <ajit.khaparde@broadcom.com>
CC: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
CC: Somnath Kotur <somnath.kotur@broadcom.com>
CC: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
CC: John Allen <jallen@linux.vnet.ibm.com>
CC: Guo-Fu Tseng <cooldavid@cooldavid.org>
CC: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
CC: Jiri Pirko <jiri@mellanox.com>
CC: Ido Schimmel <idosch@mellanox.com>
CC: Manish Chopra <manish.chopra@qlogic.com>
CC: Sony Chacko <sony.chacko@qlogic.com>
CC: Rajesh Borundia <rajesh.borundia@qlogic.com>
CC: Timur Tabi <timur@codeaurora.org>
CC: Anirudha Sarangi <anirudh@xilinx.com>
CC: John Linn <John.Linn@xilinx.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-20 13:55:16 -04:00
|
|
|
netdev->min_mtu = 0;
|
2025-07-01 17:49:29 +05:30
|
|
|
netdev->max_mtu = XGMAC_GIANT_PACKET_MTU - XGBE_ETH_FRAME_HDR;
|
2014-06-24 16:19:29 -05:00
|
|
|
|
2015-04-09 12:12:03 -05:00
|
|
|
/* Use default watchdog timeout */
|
|
|
|
netdev->watchdog_timeo = 0;
|
|
|
|
|
2014-06-05 09:15:06 -05:00
|
|
|
xgbe_init_rx_coalesce(pdata);
|
|
|
|
xgbe_init_tx_coalesce(pdata);
|
|
|
|
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
ret = register_netdev(netdev);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "net device registration failed\n");
|
2016-11-03 13:19:27 -05:00
|
|
|
return ret;
|
2015-05-14 11:44:15 -05:00
|
|
|
}
|
|
|
|
|
2016-11-11 00:10:07 -05:00
|
|
|
if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
|
|
|
|
xgbe_ptp_register(pdata);
|
2014-07-29 08:57:19 -05:00
|
|
|
|
2014-06-05 09:15:06 -05:00
|
|
|
xgbe_debugfs_init(pdata);
|
|
|
|
|
2016-11-10 17:09:55 -06:00
|
|
|
netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
|
|
|
|
pdata->tx_ring_count);
|
|
|
|
netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
|
|
|
|
pdata->rx_ring_count);
|
|
|
|
|
2014-06-05 09:15:06 -05:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
|
2014-06-05 09:15:06 -05:00
|
|
|
{
|
2016-11-03 13:19:27 -05:00
|
|
|
struct net_device *netdev = pdata->netdev;
|
2014-06-05 09:15:06 -05:00
|
|
|
|
|
|
|
xgbe_debugfs_exit(pdata);
|
|
|
|
|
2016-11-11 00:10:07 -05:00
|
|
|
if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
|
|
|
|
xgbe_ptp_unregister(pdata);
|
2014-07-29 08:57:19 -05:00
|
|
|
|
2017-08-18 09:02:40 -05:00
|
|
|
unregister_netdev(netdev);
|
|
|
|
|
2016-11-03 13:18:27 -05:00
|
|
|
pdata->phy_if.phy_exit(pdata);
|
2017-08-18 09:02:57 -05:00
|
|
|
}
|
2016-11-03 13:18:27 -05:00
|
|
|
|
2017-08-18 09:02:57 -05:00
|
|
|
static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = netdev_notifier_info_to_dev(data);
|
|
|
|
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
2014-06-05 09:15:06 -05:00
|
|
|
|
2017-08-18 09:02:57 -05:00
|
|
|
if (netdev->netdev_ops != xgbe_get_netdev_ops())
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDEV_CHANGENAME:
|
|
|
|
xgbe_debugfs_rename(pdata);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return NOTIFY_DONE;
|
2014-06-05 09:15:06 -05:00
|
|
|
}
|
|
|
|
|
2017-08-18 09:02:57 -05:00
|
|
|
static struct notifier_block xgbe_netdev_notifier = {
|
|
|
|
.notifier_call = xgbe_netdev_event,
|
|
|
|
};
|
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
static int __init xgbe_mod_init(void)
|
2014-06-05 09:15:06 -05:00
|
|
|
{
|
2016-11-03 13:19:27 -05:00
|
|
|
int ret;
|
2014-06-05 09:15:06 -05:00
|
|
|
|
2017-08-18 09:02:57 -05:00
|
|
|
ret = register_netdevice_notifier(&xgbe_netdev_notifier);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
ret = xgbe_platform_init();
|
|
|
|
if (ret)
|
2019-08-29 10:46:00 +08:00
|
|
|
goto err_platform_init;
|
2014-06-05 09:15:06 -05:00
|
|
|
|
2016-11-10 17:09:55 -06:00
|
|
|
ret = xgbe_pci_init();
|
|
|
|
if (ret)
|
2019-08-29 10:46:00 +08:00
|
|
|
goto err_pci_init;
|
2016-11-10 17:09:55 -06:00
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
return 0;
|
2019-08-29 10:46:00 +08:00
|
|
|
|
|
|
|
err_pci_init:
|
|
|
|
xgbe_platform_exit();
|
|
|
|
err_platform_init:
|
|
|
|
unregister_netdevice_notifier(&xgbe_netdev_notifier);
|
|
|
|
return ret;
|
2014-06-05 09:15:06 -05:00
|
|
|
}
|
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
static void __exit xgbe_mod_exit(void)
|
2014-06-05 09:15:06 -05:00
|
|
|
{
|
2016-11-10 17:09:55 -06:00
|
|
|
xgbe_pci_exit();
|
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
xgbe_platform_exit();
|
2017-08-18 09:02:57 -05:00
|
|
|
|
|
|
|
unregister_netdevice_notifier(&xgbe_netdev_notifier);
|
2014-06-05 09:15:06 -05:00
|
|
|
}
|
|
|
|
|
2016-11-03 13:19:27 -05:00
|
|
|
module_init(xgbe_mod_init);
|
|
|
|
module_exit(xgbe_mod_exit);
|