2018-10-11 08:57:42 -07:00
|
|
|
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
2022-03-21 11:42:01 +01:00
|
|
|
/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* nfp_net_common.c
|
|
|
|
* Netronome network device driver: Common functions between PF and VF
|
|
|
|
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
|
|
|
|
* Jason McMullan <jason.mcmullan@netronome.com>
|
|
|
|
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
|
|
|
|
* Brad Petrus <brad.petrus@netronome.com>
|
|
|
|
* Chris Telfer <chris.telfer@netronome.com>
|
|
|
|
*/
|
|
|
|
|
2017-03-08 08:57:01 -08:00
|
|
|
#include <linux/bitfield.h>
|
2016-11-03 17:12:07 +00:00
|
|
|
#include <linux/bpf.h>
|
2015-12-01 14:55:22 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/ipv6.h>
|
2018-07-25 19:40:35 -07:00
|
|
|
#include <linux/mm.h>
|
2018-07-25 19:40:37 -07:00
|
|
|
#include <linux/overflow.h>
|
2016-10-31 20:43:15 +00:00
|
|
|
#include <linux/page_ref.h>
|
2015-12-01 14:55:22 +00:00
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/pci_regs.h>
|
|
|
|
#include <linux/ethtool.h>
|
|
|
|
#include <linux/log2.h>
|
|
|
|
#include <linux/if_vlan.h>
|
2022-06-24 09:38:15 +02:00
|
|
|
#include <linux/if_bridge.h>
|
2015-12-01 14:55:22 +00:00
|
|
|
#include <linux/random.h>
|
2017-06-05 17:01:48 -07:00
|
|
|
#include <linux/vmalloc.h>
|
2015-12-01 14:55:22 +00:00
|
|
|
#include <linux/ktime.h>
|
|
|
|
|
2019-06-05 14:11:41 -07:00
|
|
|
#include <net/tls.h>
|
2015-12-01 14:55:22 +00:00
|
|
|
#include <net/vxlan.h>
|
2022-03-04 11:22:13 +01:00
|
|
|
#include <net/xdp_sock_drv.h>
|
2023-03-02 10:58:30 +01:00
|
|
|
#include <net/xfrm.h>
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2022-03-11 11:43:04 +01:00
|
|
|
#include "nfpcore/nfp_dev.h"
|
2017-04-04 16:12:30 -07:00
|
|
|
#include "nfpcore/nfp_nsp.h"
|
2019-06-10 21:40:05 -07:00
|
|
|
#include "ccm.h"
|
2017-05-31 08:06:49 -07:00
|
|
|
#include "nfp_app.h"
|
2015-12-01 14:55:22 +00:00
|
|
|
#include "nfp_net_ctrl.h"
|
|
|
|
#include "nfp_net.h"
|
2022-03-21 11:42:01 +01:00
|
|
|
#include "nfp_net_dp.h"
|
2017-08-24 21:31:49 -07:00
|
|
|
#include "nfp_net_sriov.h"
|
2022-03-04 11:22:14 +01:00
|
|
|
#include "nfp_net_xsk.h"
|
2017-05-22 10:59:28 -07:00
|
|
|
#include "nfp_port.h"
|
2019-06-05 14:11:36 -07:00
|
|
|
#include "crypto/crypto.h"
|
2019-12-17 14:12:02 -08:00
|
|
|
#include "crypto/fw.h"
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2023-07-05 07:28:18 +02:00
|
|
|
static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr);
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/**
|
|
|
|
* nfp_net_get_fw_version() - Read and parse the FW version
|
|
|
|
* @fw_ver: Output fw_version structure to read to
|
|
|
|
* @ctrl_bar: Mapped address of the control BAR
|
|
|
|
*/
|
|
|
|
void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
|
|
|
|
void __iomem *ctrl_bar)
|
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
|
|
|
|
put_unaligned_le32(reg, fw_ver);
|
|
|
|
}
|
|
|
|
|
2022-03-11 11:43:04 +01:00
|
|
|
u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue)
|
|
|
|
{
|
|
|
|
queue &= dev_info->qc_idx_mask;
|
|
|
|
return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * queue;
|
|
|
|
}
|
|
|
|
|
2016-04-16 11:25:54 +01:00
|
|
|
/* Firmware reconfig
|
|
|
|
*
|
|
|
|
* Firmware reconfig may take a while so we have two versions of it -
|
|
|
|
* synchronous and asynchronous (posted). All synchronous callers are holding
|
|
|
|
* RTNL so we don't have to worry about serializing them.
|
|
|
|
*/
|
|
|
|
static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
|
|
|
|
{
|
|
|
|
nn_writel(nn, NFP_NET_CFG_UPDATE, update);
|
|
|
|
/* ensure update is written before pinging HW */
|
|
|
|
nn_pci_flush(nn);
|
|
|
|
nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
|
2018-11-27 22:24:58 -08:00
|
|
|
nn->reconfig_in_progress_update = update;
|
2016-04-16 11:25:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Pass 0 as update to run posted reconfigs. */
|
|
|
|
static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
|
|
|
|
{
|
|
|
|
update |= nn->reconfig_posted;
|
|
|
|
nn->reconfig_posted = 0;
|
|
|
|
|
|
|
|
nfp_net_reconfig_start(nn, update);
|
|
|
|
|
|
|
|
nn->reconfig_timer_active = true;
|
|
|
|
mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
|
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
|
|
|
|
if (reg == 0)
|
|
|
|
return true;
|
|
|
|
if (reg & NFP_NET_CFG_UPDATE_ERR) {
|
2018-11-27 22:24:58 -08:00
|
|
|
nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
|
|
|
|
reg, nn->reconfig_in_progress_update,
|
|
|
|
nn_readl(nn, NFP_NET_CFG_CTRL));
|
2016-04-16 11:25:54 +01:00
|
|
|
return true;
|
|
|
|
} else if (last_check) {
|
2018-11-27 22:24:58 -08:00
|
|
|
nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
|
|
|
|
reg, nn->reconfig_in_progress_update,
|
|
|
|
nn_readl(nn, NFP_NET_CFG_CTRL));
|
2016-04-16 11:25:54 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-11 20:27:04 -07:00
|
|
|
static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
|
2016-04-16 11:25:54 +01:00
|
|
|
{
|
|
|
|
bool timed_out = false;
|
2019-04-11 20:27:04 -07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Poll update field, waiting for NFP to ack the config.
|
|
|
|
* Do an opportunistic wait-busy loop, afterward sleep.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 50; i++) {
|
|
|
|
if (nfp_net_reconfig_check_done(nn, false))
|
|
|
|
return false;
|
|
|
|
udelay(4);
|
|
|
|
}
|
2016-04-16 11:25:54 +01:00
|
|
|
|
|
|
|
while (!nfp_net_reconfig_check_done(nn, timed_out)) {
|
2019-04-11 20:27:04 -07:00
|
|
|
usleep_range(250, 500);
|
2016-04-16 11:25:54 +01:00
|
|
|
timed_out = time_is_before_eq_jiffies(deadline);
|
|
|
|
}
|
|
|
|
|
2019-04-11 20:27:04 -07:00
|
|
|
return timed_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
|
|
|
|
{
|
|
|
|
if (__nfp_net_reconfig_wait(nn, deadline))
|
|
|
|
return -EIO;
|
|
|
|
|
2016-04-16 11:25:54 +01:00
|
|
|
if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
|
|
|
|
return -EIO;
|
|
|
|
|
2019-04-11 20:27:04 -07:00
|
|
|
return 0;
|
2016-04-16 11:25:54 +01:00
|
|
|
}
|
|
|
|
|
2017-10-25 03:51:38 -07:00
|
|
|
static void nfp_net_reconfig_timer(struct timer_list *t)
|
2016-04-16 11:25:54 +01:00
|
|
|
{
|
2025-05-09 07:51:14 +02:00
|
|
|
struct nfp_net *nn = timer_container_of(nn, t, reconfig_timer);
|
2016-04-16 11:25:54 +01:00
|
|
|
|
|
|
|
spin_lock_bh(&nn->reconfig_lock);
|
|
|
|
|
|
|
|
nn->reconfig_timer_active = false;
|
|
|
|
|
|
|
|
/* If sync caller is present it will take over from us */
|
|
|
|
if (nn->reconfig_sync_present)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/* Read reconfig status and report errors */
|
|
|
|
nfp_net_reconfig_check_done(nn, true);
|
|
|
|
|
|
|
|
if (nn->reconfig_posted)
|
|
|
|
nfp_net_reconfig_start_async(nn, 0);
|
|
|
|
done:
|
|
|
|
spin_unlock_bh(&nn->reconfig_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_reconfig_post() - Post async reconfig request
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
* @update: The value for the update field in the BAR config
|
|
|
|
*
|
|
|
|
* Record FW reconfiguration request. Reconfiguration will be kicked off
|
|
|
|
* whenever reconfiguration machinery is idle. Multiple requests can be
|
|
|
|
* merged together!
|
|
|
|
*/
|
|
|
|
static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
|
|
|
|
{
|
|
|
|
spin_lock_bh(&nn->reconfig_lock);
|
|
|
|
|
|
|
|
/* Sync caller will kick off async reconf when it's done, just post */
|
|
|
|
if (nn->reconfig_sync_present) {
|
|
|
|
nn->reconfig_posted |= update;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Opportunistically check if the previous command is done */
|
|
|
|
if (!nn->reconfig_timer_active ||
|
|
|
|
nfp_net_reconfig_check_done(nn, false))
|
|
|
|
nfp_net_reconfig_start_async(nn, update);
|
|
|
|
else
|
|
|
|
nn->reconfig_posted |= update;
|
|
|
|
done:
|
|
|
|
spin_unlock_bh(&nn->reconfig_lock);
|
|
|
|
}
|
|
|
|
|
2018-08-29 12:46:08 -07:00
|
|
|
static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2016-04-16 11:25:54 +01:00
|
|
|
bool cancelled_timer = false;
|
|
|
|
u32 pre_posted_requests;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&nn->reconfig_lock);
|
|
|
|
|
2019-06-10 21:40:05 -07:00
|
|
|
WARN_ON(nn->reconfig_sync_present);
|
2016-04-16 11:25:54 +01:00
|
|
|
nn->reconfig_sync_present = true;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2016-04-16 11:25:54 +01:00
|
|
|
if (nn->reconfig_timer_active) {
|
|
|
|
nn->reconfig_timer_active = false;
|
|
|
|
cancelled_timer = true;
|
|
|
|
}
|
|
|
|
pre_posted_requests = nn->reconfig_posted;
|
|
|
|
nn->reconfig_posted = 0;
|
|
|
|
|
|
|
|
spin_unlock_bh(&nn->reconfig_lock);
|
|
|
|
|
2018-08-29 12:46:08 -07:00
|
|
|
if (cancelled_timer) {
|
2025-04-05 10:17:26 +02:00
|
|
|
timer_delete_sync(&nn->reconfig_timer);
|
2016-04-16 11:25:54 +01:00
|
|
|
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
|
2018-08-29 12:46:08 -07:00
|
|
|
}
|
2016-04-16 11:25:54 +01:00
|
|
|
|
|
|
|
/* Run the posted reconfigs which were issued before we started */
|
|
|
|
if (pre_posted_requests) {
|
|
|
|
nfp_net_reconfig_start(nn, pre_posted_requests);
|
|
|
|
nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
2018-08-29 12:46:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
nfp_net_reconfig_sync_enter(nn);
|
|
|
|
|
|
|
|
spin_lock_bh(&nn->reconfig_lock);
|
|
|
|
nn->reconfig_sync_present = false;
|
|
|
|
spin_unlock_bh(&nn->reconfig_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-04-11 20:27:05 -07:00
|
|
|
* __nfp_net_reconfig() - Reconfigure the firmware
|
2018-08-29 12:46:08 -07:00
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
* @update: The value for the update field in the BAR config
|
|
|
|
*
|
|
|
|
* Write the update word to the BAR and ping the reconfig queue. The
|
|
|
|
* poll until the firmware has acknowledged the update by zeroing the
|
|
|
|
* update word.
|
|
|
|
*
|
|
|
|
* Return: Negative errno on error, 0 on success
|
|
|
|
*/
|
2019-06-05 14:11:36 -07:00
|
|
|
int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
|
2018-08-29 12:46:08 -07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
nfp_net_reconfig_sync_enter(nn);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2016-04-16 11:25:54 +01:00
|
|
|
nfp_net_reconfig_start(nn, update);
|
|
|
|
ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
|
|
|
|
|
|
|
|
spin_lock_bh(&nn->reconfig_lock);
|
|
|
|
|
|
|
|
if (nn->reconfig_posted)
|
|
|
|
nfp_net_reconfig_start_async(nn, 0);
|
|
|
|
|
|
|
|
nn->reconfig_sync_present = false;
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
spin_unlock_bh(&nn->reconfig_lock);
|
2016-04-16 11:25:54 +01:00
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-04-11 20:27:05 -07:00
|
|
|
int nfp_net_reconfig(struct nfp_net *nn, u32 update)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
nn_ctrl_bar_lock(nn);
|
|
|
|
ret = __nfp_net_reconfig(nn, update);
|
|
|
|
nn_ctrl_bar_unlock(nn);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
|
|
|
|
{
|
|
|
|
if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
|
|
|
|
nn_err(nn, "mailbox too small for %u of data (%u)\n",
|
|
|
|
data_size, nn->tlv_caps.mbox_len);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
nn_ctrl_bar_lock(nn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-15 16:22:15 -07:00
|
|
|
/**
|
2019-04-11 20:27:05 -07:00
|
|
|
* nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
|
2017-06-15 16:22:15 -07:00
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
* @mbox_cmd: The value for the mailbox command
|
|
|
|
*
|
|
|
|
* Helper function for mailbox updates
|
|
|
|
*
|
|
|
|
* Return: Negative errno on error, 0 on success
|
|
|
|
*/
|
2019-04-11 20:27:05 -07:00
|
|
|
int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
|
2017-06-15 16:22:15 -07:00
|
|
|
{
|
2018-01-17 18:51:03 -08:00
|
|
|
u32 mbox = nn->tlv_caps.mbox_off;
|
2017-06-15 16:22:15 -07:00
|
|
|
int ret;
|
|
|
|
|
2018-01-17 18:51:03 -08:00
|
|
|
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
|
2017-06-15 16:22:15 -07:00
|
|
|
|
2019-04-11 20:27:05 -07:00
|
|
|
ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
|
2017-06-15 16:22:15 -07:00
|
|
|
if (ret) {
|
|
|
|
nn_err(nn, "Mailbox update error\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-17 18:51:03 -08:00
|
|
|
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
|
2017-06-15 16:22:15 -07:00
|
|
|
}
|
|
|
|
|
2019-06-10 21:40:05 -07:00
|
|
|
void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
|
|
|
|
{
|
|
|
|
u32 mbox = nn->tlv_caps.mbox_off;
|
|
|
|
|
|
|
|
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
|
|
|
|
|
|
|
|
nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
u32 mbox = nn->tlv_caps.mbox_off;
|
|
|
|
|
|
|
|
nfp_net_reconfig_wait_posted(nn);
|
|
|
|
|
|
|
|
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
|
|
|
|
}
|
|
|
|
|
2019-04-11 20:27:05 -07:00
|
|
|
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
|
|
|
|
nn_ctrl_bar_unlock(nn);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/* Interrupt configuration and handling
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
2017-02-09 09:17:37 -08:00
|
|
|
* nfp_net_irqs_alloc() - allocates MSI-X irqs
|
|
|
|
* @pdev: PCI device structure
|
|
|
|
* @irq_entries: Array to be initialized and used to hold the irq entries
|
|
|
|
* @min_irqs: Minimal acceptable number of interrupts
|
|
|
|
* @wanted_irqs: Target number of interrupts to allocate
|
2015-12-01 14:55:22 +00:00
|
|
|
*
|
2017-02-09 09:17:37 -08:00
|
|
|
* Return: Number of irqs obtained or 0 on error.
|
2015-12-01 14:55:22 +00:00
|
|
|
*/
|
2017-02-09 09:17:37 -08:00
|
|
|
unsigned int
|
|
|
|
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
|
|
|
|
unsigned int min_irqs, unsigned int wanted_irqs)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2017-02-09 09:17:37 -08:00
|
|
|
unsigned int i;
|
|
|
|
int got_irqs;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-02-09 09:17:37 -08:00
|
|
|
for (i = 0; i < wanted_irqs; i++)
|
|
|
|
irq_entries[i].entry = i;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-02-09 09:17:37 -08:00
|
|
|
got_irqs = pci_enable_msix_range(pdev, irq_entries,
|
|
|
|
min_irqs, wanted_irqs);
|
|
|
|
if (got_irqs < 0) {
|
|
|
|
dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
|
|
|
|
min_irqs, wanted_irqs, got_irqs);
|
2015-12-01 14:55:22 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-09 09:17:37 -08:00
|
|
|
if (got_irqs < wanted_irqs)
|
|
|
|
dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
|
|
|
|
wanted_irqs, got_irqs);
|
|
|
|
|
|
|
|
return got_irqs;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-02-09 09:17:37 -08:00
|
|
|
* nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
|
|
|
|
* @nn: NFP Network structure
|
|
|
|
* @irq_entries: Table of allocated interrupts
|
|
|
|
* @n: Size of @irq_entries (number of entries to grab)
|
2015-12-01 14:55:22 +00:00
|
|
|
*
|
2017-02-09 09:17:37 -08:00
|
|
|
* After interrupts are allocated with nfp_net_irqs_alloc() this function
|
|
|
|
* should be called to assign them to a specific netdev (port).
|
2015-12-01 14:55:22 +00:00
|
|
|
*/
|
2017-02-09 09:17:37 -08:00
|
|
|
void
|
|
|
|
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
|
|
|
|
unsigned int n)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2017-03-10 10:38:27 -08:00
|
|
|
struct nfp_net_dp *dp = &nn->dp;
|
|
|
|
|
2016-10-31 20:43:21 +00:00
|
|
|
nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
|
2017-03-10 10:38:27 -08:00
|
|
|
dp->num_r_vecs = nn->max_r_vecs;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-02-09 09:17:37 -08:00
|
|
|
memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
if (dp->num_rx_rings > dp->num_r_vecs ||
|
|
|
|
dp->num_tx_rings > dp->num_r_vecs)
|
2017-03-21 17:59:17 -07:00
|
|
|
dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
|
|
|
|
dp->num_rx_rings, dp->num_tx_rings,
|
|
|
|
dp->num_r_vecs);
|
2017-02-09 09:17:37 -08:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
|
|
|
|
dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
|
|
|
|
dp->num_stack_tx_rings = dp->num_tx_rings;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_irqs_disable() - Disable interrupts
|
2017-02-09 09:17:37 -08:00
|
|
|
* @pdev: PCI device structure
|
2015-12-01 14:55:22 +00:00
|
|
|
*
|
|
|
|
* Undoes what @nfp_net_irqs_alloc() does.
|
|
|
|
*/
|
2017-02-09 09:17:37 -08:00
|
|
|
void nfp_net_irqs_disable(struct pci_dev *pdev)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2017-02-09 09:17:37 -08:00
|
|
|
pci_disable_msix(pdev);
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
|
|
|
|
* @irq: Interrupt
|
|
|
|
* @data: Opaque data structure
|
|
|
|
*
|
|
|
|
* Return: Indicate if the interrupt has been handled.
|
|
|
|
*/
|
|
|
|
static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct nfp_net_r_vector *r_vec = data;
|
|
|
|
|
2021-07-26 13:16:34 +02:00
|
|
|
/* Currently we cannot tell if it's a rx or tx interrupt,
|
|
|
|
* since dim does not need accurate event_ctr to calculate,
|
|
|
|
* we just use this counter for both rx and tx dim.
|
|
|
|
*/
|
|
|
|
r_vec->event_ctr++;
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
napi_schedule_irqoff(&r_vec->napi);
|
|
|
|
|
|
|
|
/* The FW auto-masks any interrupt, either via the MASK bit in
|
|
|
|
* the MSI-X table or via the per entry ICR field. So there
|
|
|
|
* is no need to disable interrupts here.
|
|
|
|
*/
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2017-06-05 17:01:50 -07:00
|
|
|
static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct nfp_net_r_vector *r_vec = data;
|
|
|
|
|
|
|
|
tasklet_schedule(&r_vec->tasklet);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/**
|
|
|
|
* nfp_net_read_link_status() - Reread link status from control BAR
|
|
|
|
* @nn: NFP Network structure
|
|
|
|
*/
|
|
|
|
static void nfp_net_read_link_status(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
bool link_up;
|
2022-08-25 16:12:21 +02:00
|
|
|
u16 sts;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&nn->link_status_lock, flags);
|
|
|
|
|
2022-08-25 16:12:21 +02:00
|
|
|
sts = nn_readw(nn, NFP_NET_CFG_STS);
|
2015-12-01 14:55:22 +00:00
|
|
|
link_up = !!(sts & NFP_NET_CFG_STS_LINK);
|
|
|
|
|
|
|
|
if (nn->link_up == link_up)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
nn->link_up = link_up;
|
2022-08-25 16:12:21 +02:00
|
|
|
if (nn->port) {
|
2017-05-22 10:59:30 -07:00
|
|
|
set_bit(NFP_PORT_CHANGED, &nn->port->flags);
|
2022-08-25 16:12:21 +02:00
|
|
|
if (nn->port->link_cb)
|
|
|
|
nn->port->link_cb(nn->port);
|
|
|
|
}
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
if (nn->link_up) {
|
2017-03-10 10:38:27 -08:00
|
|
|
netif_carrier_on(nn->dp.netdev);
|
|
|
|
netdev_info(nn->dp.netdev, "NIC Link is Up\n");
|
2015-12-01 14:55:22 +00:00
|
|
|
} else {
|
2017-03-10 10:38:27 -08:00
|
|
|
netif_carrier_off(nn->dp.netdev);
|
|
|
|
netdev_info(nn->dp.netdev, "NIC Link is Down\n");
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&nn->link_status_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_irq_lsc() - Interrupt service routine for link state changes
|
|
|
|
* @irq: Interrupt
|
|
|
|
* @data: Opaque data structure
|
|
|
|
*
|
|
|
|
* Return: Indicate if the interrupt has been handled.
|
|
|
|
*/
|
|
|
|
static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = data;
|
2017-02-09 09:17:37 -08:00
|
|
|
struct msix_entry *entry;
|
|
|
|
|
|
|
|
entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
nfp_net_read_link_status(nn);
|
|
|
|
|
2017-02-09 09:17:37 -08:00
|
|
|
nfp_net_irq_unmask(nn, entry->entry);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_irq_exn() - Interrupt service routine for exceptions
|
|
|
|
* @irq: Interrupt
|
|
|
|
* @data: Opaque data structure
|
|
|
|
*
|
|
|
|
* Return: Indicate if the interrupt has been handled.
|
|
|
|
*/
|
|
|
|
static irqreturn_t nfp_net_irq_exn(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = data;
|
|
|
|
|
|
|
|
nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
|
|
|
|
/* XXX TO BE IMPLEMENTED */
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
|
|
|
|
* @nn: NFP Network structure
|
|
|
|
* @ctrl_offset: Control BAR offset where IRQ configuration should be written
|
|
|
|
* @format: printf-style format to construct the interrupt name
|
|
|
|
* @name: Pointer to allocated space for interrupt name
|
|
|
|
* @name_sz: Size of space for interrupt name
|
|
|
|
* @vector_idx: Index of MSI-X vector used for this interrupt
|
|
|
|
* @handler: IRQ handler to register for this interrupt
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
|
|
|
|
const char *format, char *name, size_t name_sz,
|
|
|
|
unsigned int vector_idx, irq_handler_t handler)
|
|
|
|
{
|
|
|
|
struct msix_entry *entry;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
entry = &nn->irq_entries[vector_idx];
|
|
|
|
|
2017-06-05 17:01:50 -07:00
|
|
|
snprintf(name, name_sz, format, nfp_net_name(nn));
|
2015-12-01 14:55:22 +00:00
|
|
|
err = request_irq(entry->vector, handler, 0, name, nn);
|
|
|
|
if (err) {
|
|
|
|
nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
|
|
|
|
entry->vector, err);
|
|
|
|
return err;
|
|
|
|
}
|
2017-02-09 09:17:37 -08:00
|
|
|
nn_writeb(nn, ctrl_offset, entry->entry);
|
2018-01-09 18:14:28 -08:00
|
|
|
nfp_net_irq_unmask(nn, entry->entry);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
|
|
|
|
* @nn: NFP Network structure
|
|
|
|
* @ctrl_offset: Control BAR offset where IRQ configuration should be written
|
|
|
|
* @vector_idx: Index of MSI-X vector used for this interrupt
|
|
|
|
*/
|
|
|
|
static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
|
|
|
|
unsigned int vector_idx)
|
|
|
|
{
|
|
|
|
nn_writeb(nn, ctrl_offset, 0xff);
|
2018-01-09 18:14:28 -08:00
|
|
|
nn_pci_flush(nn);
|
2015-12-01 14:55:22 +00:00
|
|
|
free_irq(nn->irq_entries[vector_idx].vector, nn);
|
|
|
|
}
|
|
|
|
|
2022-03-21 11:42:01 +01:00
|
|
|
struct sk_buff *
|
2019-06-05 14:11:43 -07:00
|
|
|
nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
|
|
|
|
struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
|
2019-06-05 14:11:41 -07:00
|
|
|
{
|
2019-07-08 19:53:16 -07:00
|
|
|
#ifdef CONFIG_TLS_DEVICE
|
2019-06-05 14:11:41 -07:00
|
|
|
struct nfp_net_tls_offload_ctx *ntls;
|
|
|
|
struct sk_buff *nskb;
|
2019-06-10 21:40:10 -07:00
|
|
|
bool resync_pending;
|
2019-06-05 14:11:41 -07:00
|
|
|
u32 datalen, seq;
|
|
|
|
|
|
|
|
if (likely(!dp->ktls_tx))
|
|
|
|
return skb;
|
2023-06-13 13:50:06 -07:00
|
|
|
if (!tls_is_skb_tx_device_offloaded(skb))
|
2019-06-05 14:11:41 -07:00
|
|
|
return skb;
|
|
|
|
|
2022-06-30 15:07:50 +00:00
|
|
|
datalen = skb->len - skb_tcp_all_headers(skb);
|
2019-06-05 14:11:41 -07:00
|
|
|
seq = ntohl(tcp_hdr(skb)->seq);
|
|
|
|
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
|
2019-06-10 21:40:10 -07:00
|
|
|
resync_pending = tls_offload_tx_resync_pending(skb->sk);
|
|
|
|
if (unlikely(resync_pending || ntls->next_seq != seq)) {
|
2019-06-05 14:11:41 -07:00
|
|
|
/* Pure ACK out of order already */
|
|
|
|
if (!datalen)
|
|
|
|
return skb;
|
|
|
|
|
2019-06-05 14:11:43 -07:00
|
|
|
u64_stats_update_begin(&r_vec->tx_sync);
|
|
|
|
r_vec->tls_tx_fallback++;
|
|
|
|
u64_stats_update_end(&r_vec->tx_sync);
|
|
|
|
|
2019-06-05 14:11:41 -07:00
|
|
|
nskb = tls_encrypt_skb(skb);
|
2019-06-05 14:11:43 -07:00
|
|
|
if (!nskb) {
|
|
|
|
u64_stats_update_begin(&r_vec->tx_sync);
|
|
|
|
r_vec->tls_tx_no_fallback++;
|
|
|
|
u64_stats_update_end(&r_vec->tx_sync);
|
2019-06-05 14:11:41 -07:00
|
|
|
return NULL;
|
2019-06-05 14:11:43 -07:00
|
|
|
}
|
2019-06-05 14:11:41 -07:00
|
|
|
/* encryption wasn't necessary */
|
|
|
|
if (nskb == skb)
|
|
|
|
return skb;
|
|
|
|
/* we don't re-check ring space */
|
|
|
|
if (unlikely(skb_is_nonlinear(nskb))) {
|
|
|
|
nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
|
2019-06-05 14:11:43 -07:00
|
|
|
u64_stats_update_begin(&r_vec->tx_sync);
|
|
|
|
r_vec->tx_errors++;
|
|
|
|
u64_stats_update_end(&r_vec->tx_sync);
|
2019-06-05 14:11:41 -07:00
|
|
|
dev_kfree_skb_any(nskb);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* jump forward, a TX may have gotten lost, need to sync TX */
|
2019-06-10 21:40:10 -07:00
|
|
|
if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
|
2019-10-04 16:19:22 -07:00
|
|
|
tls_offload_tx_resync_request(nskb->sk, seq,
|
|
|
|
ntls->next_seq);
|
2019-06-05 14:11:41 -07:00
|
|
|
|
|
|
|
*nr_frags = 0;
|
|
|
|
return nskb;
|
|
|
|
}
|
|
|
|
|
2019-06-05 14:11:43 -07:00
|
|
|
if (datalen) {
|
|
|
|
u64_stats_update_begin(&r_vec->tx_sync);
|
2019-07-08 19:53:12 -07:00
|
|
|
if (!skb_is_gso(skb))
|
|
|
|
r_vec->hw_tls_tx++;
|
|
|
|
else
|
|
|
|
r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
|
2019-06-05 14:11:43 -07:00
|
|
|
u64_stats_update_end(&r_vec->tx_sync);
|
|
|
|
}
|
|
|
|
|
2019-06-05 14:11:41 -07:00
|
|
|
memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
|
|
|
|
ntls->next_seq += datalen;
|
2019-07-08 19:53:16 -07:00
|
|
|
#endif
|
2019-06-05 14:11:41 -07:00
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2022-03-21 11:42:01 +01:00
|
|
|
void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
|
2019-07-08 19:53:17 -07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_TLS_DEVICE
|
|
|
|
struct nfp_net_tls_offload_ctx *ntls;
|
|
|
|
u32 datalen, seq;
|
|
|
|
|
|
|
|
if (!tls_handle)
|
|
|
|
return;
|
2023-06-13 13:50:06 -07:00
|
|
|
if (WARN_ON_ONCE(!tls_is_skb_tx_device_offloaded(skb)))
|
2019-07-08 19:53:17 -07:00
|
|
|
return;
|
|
|
|
|
2022-06-30 15:07:50 +00:00
|
|
|
datalen = skb->len - skb_tcp_all_headers(skb);
|
2019-07-08 19:53:17 -07:00
|
|
|
seq = ntohl(tcp_hdr(skb)->seq);
|
|
|
|
|
|
|
|
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
|
|
|
|
if (ntls->next_seq == seq + datalen)
|
|
|
|
ntls->next_seq = seq;
|
|
|
|
else
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
netdev: pass the stuck queue to the timeout handler
This allows incrementing the correct timeout statistic without any mess.
Down the road, devices can learn to reset just the specific queue.
The patch was generated with the following script:
use strict;
use warnings;
our $^I = '.bak';
my @work = (
["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"],
["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"],
["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"],
["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"],
["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"],
["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"],
["drivers/net/appletalk/cops.c", "cops_timeout"],
["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"],
["drivers/net/arcnet/arcnet.c", "arcnet_timeout"],
["drivers/net/arcnet/com20020.c", "arcnet_timeout"],
["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"],
["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"],
["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"],
["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"],
["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"],
["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"],
["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"],
["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"],
["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"],
["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"],
["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"],
["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"],
["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"],
["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"],
["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"],
["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"],
["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"],
["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"],
["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"],
["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"],
["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"],
["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"],
["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"],
["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"],
["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"],
["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"],
["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"],
["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"],
["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"],
["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"],
["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"],
["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"],
["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"],
["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"],
["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"],
["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"],
["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"],
["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"],
["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"],
["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"],
["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"],
["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"],
["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"],
["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"],
["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"],
["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"],
["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"],
["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"],
["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"],
["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"],
["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"],
["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"],
["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"],
["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"],
["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"],
["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"],
["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"],
["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"],
["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"],
["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"],
["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"],
["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"],
["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"],
["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"],
["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"],
["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"],
["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"],
["drivers/net/ethernet/jme.c", "jme_tx_timeout"],
["drivers/net/ethernet/korina.c", "korina_tx_timeout"],
["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"],
["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"],
["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"],
["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"],
["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"],
["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"],
["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"],
["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"],
["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"],
["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"],
["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"],
["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"],
["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"],
["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"],
["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"],
["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"],
["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"],
["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"],
["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"],
["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"],
["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"],
["drivers/net/ethernet/realtek/atp.c", "tx_timeout"],
["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"],
["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"],
["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"],
["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"],
["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"],
["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"],
["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"],
["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"],
["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"],
["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"],
["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"],
["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"],
["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"],
["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"],
["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"],
["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"],
["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"],
["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"],
["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"],
["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"],
["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"],
["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"],
["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"],
["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"],
["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"],
["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"],
["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"],
["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"],
["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"],
["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"],
["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"],
["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"],
["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"],
["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"],
["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"],
["drivers/net/slip/slip.c", "sl_tx_timeout"],
["include/linux/usb/usbnet.h", "usbnet_tx_timeout"],
["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"],
["drivers/net/usb/catc.c", "catc_tx_timeout"],
["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"],
["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"],
["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"],
["drivers/net/usb/hso.c", "hso_net_tx_timeout"],
["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"],
["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"],
["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"],
["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"],
["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"],
["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"],
["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"],
["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"],
["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"],
["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"],
["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"],
["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"],
["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"],
["drivers/net/wan/cosa.c", "cosa_net_timeout"],
["drivers/net/wan/farsync.c", "fst_tx_timeout"],
["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"],
["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"],
["drivers/net/wan/x25_asy.c", "x25_asy_timeout"],
["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"],
["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"],
["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"],
["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"],
["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"],
["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"],
["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"],
["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"],
["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"],
["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"],
["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"],
["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"],
["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"],
["drivers/tty/synclink.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"],
["net/atm/lec.c", "lec_tx_timeout"],
["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"]
);
for my $p (@work) {
my @pair = @$p;
my $file = $pair[0];
my $func = $pair[1];
print STDERR $file , ": ", $func,"\n";
our @ARGV = ($file);
while (<ARGV>) {
if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) {
print STDERR "found $1+$2 in $file\n";
}
if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) {
print STDERR "$func found in $file\n";
}
print;
}
}
where the list of files and functions is simply from:
git grep ndo_tx_timeout, with manual addition of headers
in the rare cases where the function is from a header,
then manually changing the few places which actually
call ndo_tx_timeout.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Heiner Kallweit <hkallweit1@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Shannon Nelson <snelson@pensando.io>
Reviewed-by: Martin Habets <mhabets@solarflare.com>
changes from v9:
fixup a forward declaration
changes from v9:
more leftovers from v3 change
changes from v8:
fix up a missing direct call to timeout
rebased on net-next
changes from v7:
fixup leftovers from v3 change
changes from v6:
fix typo in rtl driver
changes from v5:
add missing files (allow any net device argument name)
changes from v4:
add a missing driver header
changes from v3:
change queue # to unsigned
Changes from v2:
added headers
Changes from v1:
Fix errors found by kbuild:
generalize the pattern a bit, to pick up
a couple of instances missed by the previous
version.
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 09:23:51 -05:00
|
|
|
static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
|
2019-12-10 09:26:00 -05:00
|
|
|
nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
2022-03-21 11:42:01 +01:00
|
|
|
/* Receive processing */
|
2016-10-31 20:43:13 +00:00
|
|
|
static unsigned int
|
2022-03-04 11:22:13 +01:00
|
|
|
nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
|
2016-10-31 20:43:13 +00:00
|
|
|
{
|
2022-03-04 11:22:13 +01:00
|
|
|
unsigned int fl_bufsz = 0;
|
2016-10-31 20:43:13 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
|
2016-10-31 20:43:15 +00:00
|
|
|
fl_bufsz += NFP_NET_MAX_PREPEND;
|
2016-10-31 20:43:13 +00:00
|
|
|
else
|
2017-03-10 10:38:27 -08:00
|
|
|
fl_bufsz += dp->rx_offset;
|
2017-03-10 10:38:32 -08:00
|
|
|
fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
|
2016-10-31 20:43:13 +00:00
|
|
|
|
2022-03-04 11:22:13 +01:00
|
|
|
return fl_bufsz;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
|
|
|
|
{
|
|
|
|
unsigned int fl_bufsz;
|
|
|
|
|
|
|
|
fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
|
|
|
|
fl_bufsz += dp->rx_dma_off;
|
|
|
|
fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
|
|
|
|
|
2016-10-31 20:43:15 +00:00
|
|
|
fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
|
|
|
|
fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
|
|
|
|
2016-10-31 20:43:13 +00:00
|
|
|
return fl_bufsz;
|
|
|
|
}
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2022-03-04 11:22:13 +01:00
|
|
|
static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
|
|
|
|
{
|
|
|
|
unsigned int fl_bufsz;
|
|
|
|
|
|
|
|
fl_bufsz = XDP_PACKET_HEADROOM;
|
|
|
|
fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
|
|
|
|
|
|
|
|
return fl_bufsz;
|
|
|
|
}
|
|
|
|
|
2022-03-21 11:42:01 +01:00
|
|
|
/* Setup and Configuration
|
|
|
|
*/
|
2016-11-03 17:12:07 +00:00
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/**
|
2022-03-21 11:42:01 +01:00
|
|
|
* nfp_net_vecs_init() - Assign IRQs and setup rvecs.
|
|
|
|
* @nn: NFP Network structure
|
2015-12-01 14:55:22 +00:00
|
|
|
*/
|
2022-03-21 11:42:01 +01:00
|
|
|
static void nfp_net_vecs_init(struct nfp_net *nn)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2022-11-09 15:04:42 -05:00
|
|
|
int numa_node = dev_to_node(&nn->pdev->dev);
|
2022-03-21 11:42:01 +01:00
|
|
|
struct nfp_net_r_vector *r_vec;
|
2022-11-09 15:04:42 -05:00
|
|
|
unsigned int r;
|
2017-10-10 09:16:23 -07:00
|
|
|
|
2022-03-21 11:42:01 +01:00
|
|
|
nn->lsc_handler = nfp_net_irq_lsc;
|
|
|
|
nn->exn_handler = nfp_net_irq_exn;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2022-03-21 11:42:01 +01:00
|
|
|
for (r = 0; r < nn->max_r_vecs; r++) {
|
|
|
|
struct msix_entry *entry;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2022-03-21 11:42:01 +01:00
|
|
|
entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2022-03-21 11:42:01 +01:00
|
|
|
r_vec = &nn->r_vecs[r];
|
|
|
|
r_vec->nfp_net = nn;
|
|
|
|
r_vec->irq_entry = entry->entry;
|
|
|
|
r_vec->irq_vector = entry->vector;
|
2016-10-31 20:43:16 +00:00
|
|
|
|
2022-03-21 11:42:01 +01:00
|
|
|
if (nn->dp.netdev) {
|
|
|
|
r_vec->handler = nfp_net_irq_rxtx;
|
|
|
|
} else {
|
|
|
|
r_vec->handler = nfp_ctrl_irq_rxtx;
|
2017-06-05 17:01:50 -07:00
|
|
|
|
|
|
|
__skb_queue_head_init(&r_vec->queue);
|
|
|
|
spin_lock_init(&r_vec->lock);
|
2022-03-21 11:42:02 +01:00
|
|
|
tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
|
2017-06-05 17:01:50 -07:00
|
|
|
tasklet_disable(&r_vec->tasklet);
|
|
|
|
}
|
|
|
|
|
2022-11-09 15:04:42 -05:00
|
|
|
cpumask_set_cpu(cpumask_local_spread(r, numa_node), &r_vec->affinity_mask);
|
2017-06-05 17:01:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-04 11:22:11 +01:00
|
|
|
static void
|
2022-03-04 11:22:14 +01:00
|
|
|
nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
|
2022-03-04 11:22:11 +01:00
|
|
|
{
|
|
|
|
if (dp->netdev)
|
|
|
|
netif_napi_add(dp->netdev, &r_vec->napi,
|
2022-09-27 06:27:53 -07:00
|
|
|
nfp_net_has_xsk_pool_slow(dp, idx) ? dp->ops->xsk_poll : dp->ops->poll);
|
2022-03-04 11:22:11 +01:00
|
|
|
else
|
|
|
|
tasklet_enable(&r_vec->tasklet);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec)
|
|
|
|
{
|
|
|
|
if (dp->netdev)
|
|
|
|
netif_napi_del(&r_vec->napi);
|
|
|
|
else
|
|
|
|
tasklet_disable(&r_vec->tasklet);
|
|
|
|
}
|
|
|
|
|
2016-11-03 17:12:02 +00:00
|
|
|
static void
|
2017-03-10 10:38:27 -08:00
|
|
|
nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
|
|
|
|
struct nfp_net_r_vector *r_vec, int idx)
|
2016-11-03 17:12:02 +00:00
|
|
|
{
|
2017-03-10 10:38:27 -08:00
|
|
|
r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
|
2016-11-03 17:12:07 +00:00
|
|
|
r_vec->tx_ring =
|
2017-03-10 10:38:27 -08:00
|
|
|
idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
|
2016-11-03 17:12:07 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
|
|
|
|
&dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
|
2022-03-04 11:22:14 +01:00
|
|
|
|
|
|
|
if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) {
|
|
|
|
r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL;
|
|
|
|
|
|
|
|
if (r_vec->xsk_pool)
|
|
|
|
xsk_pool_set_rxq_info(r_vec->xsk_pool,
|
|
|
|
&r_vec->rx_ring->xdp_rxq);
|
|
|
|
|
|
|
|
nfp_net_napi_del(dp, r_vec);
|
|
|
|
nfp_net_napi_add(dp, r_vec, idx);
|
|
|
|
}
|
2016-11-03 17:12:02 +00:00
|
|
|
}
|
|
|
|
|
2016-04-07 19:39:36 +01:00
|
|
|
static int
|
|
|
|
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
|
|
int idx)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2016-04-07 19:39:36 +01:00
|
|
|
int err;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2022-03-04 11:22:14 +01:00
|
|
|
nfp_net_napi_add(&nn->dp, r_vec, idx);
|
2016-11-03 17:12:04 +00:00
|
|
|
|
2016-04-07 19:39:36 +01:00
|
|
|
snprintf(r_vec->name, sizeof(r_vec->name),
|
2017-06-05 17:01:50 -07:00
|
|
|
"%s-rxtx-%d", nfp_net_name(nn), idx);
|
2024-09-11 17:44:45 +08:00
|
|
|
err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN,
|
|
|
|
r_vec->name, r_vec);
|
2016-04-07 19:39:36 +01:00
|
|
|
if (err) {
|
2022-03-04 11:22:11 +01:00
|
|
|
nfp_net_napi_del(&nn->dp, r_vec);
|
2017-02-09 09:17:37 -08:00
|
|
|
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
|
2016-04-07 19:39:36 +01:00
|
|
|
return err;
|
|
|
|
}
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2024-11-07 13:50:02 +02:00
|
|
|
irq_update_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-02-09 09:17:37 -08:00
|
|
|
nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
|
|
|
|
r_vec->irq_entry);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2016-04-07 19:39:36 +01:00
|
|
|
return 0;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
2016-04-07 19:39:36 +01:00
|
|
|
static void
|
|
|
|
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2024-11-07 13:50:02 +02:00
|
|
|
irq_update_affinity_hint(r_vec->irq_vector, NULL);
|
2022-03-04 11:22:11 +01:00
|
|
|
nfp_net_napi_del(&nn->dp, r_vec);
|
2017-02-09 09:17:37 -08:00
|
|
|
free_irq(r_vec->irq_vector, r_vec);
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_rss_write_itbl() - Write RSS indirection table to device
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
void nfp_net_rss_write_itbl(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
|
|
|
|
nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
|
|
|
|
get_unaligned_le32(nn->rss_itbl + i));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_rss_write_key() - Write RSS hash key to device
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
void nfp_net_rss_write_key(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2017-03-08 08:57:01 -08:00
|
|
|
for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
|
2015-12-01 14:55:22 +00:00
|
|
|
nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
|
|
|
|
get_unaligned_le32(nn->rss_key + i));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
u8 i;
|
|
|
|
u32 factor;
|
|
|
|
u32 value;
|
|
|
|
|
|
|
|
/* Compute factor used to convert coalesce '_usecs' parameters to
|
|
|
|
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
|
|
|
|
* count.
|
|
|
|
*/
|
2018-01-17 18:51:02 -08:00
|
|
|
factor = nn->tlv_caps.me_freq_mhz / 16;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
/* copy RX interrupt coalesce parameters */
|
|
|
|
value = (nn->rx_coalesce_max_frames << 16) |
|
|
|
|
(factor * nn->rx_coalesce_usecs);
|
2017-03-10 10:38:27 -08:00
|
|
|
for (i = 0; i < nn->dp.num_rx_rings; i++)
|
2015-12-01 14:55:22 +00:00
|
|
|
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
|
|
|
|
|
|
|
|
/* copy TX interrupt coalesce parameters */
|
|
|
|
value = (nn->tx_coalesce_max_frames << 16) |
|
|
|
|
(factor * nn->tx_coalesce_usecs);
|
2017-03-10 10:38:27 -08:00
|
|
|
for (i = 0; i < nn->dp.num_tx_rings; i++)
|
2015-12-01 14:55:22 +00:00
|
|
|
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-06-29 21:55:54 +01:00
|
|
|
* nfp_net_write_mac_addr() - Write mac address to the device control BAR
|
2015-12-01 14:55:22 +00:00
|
|
|
* @nn: NFP Net device to reconfigure
|
2017-05-28 17:52:53 -07:00
|
|
|
* @addr: MAC address to write
|
2015-12-01 14:55:22 +00:00
|
|
|
*
|
2016-06-29 21:55:54 +01:00
|
|
|
* Writes the MAC address from the netdev to the device control BAR. Does not
|
|
|
|
* perform the required reconfig. We do a bit of byte swapping dance because
|
|
|
|
* firmware is LE.
|
2015-12-01 14:55:22 +00:00
|
|
|
*/
|
2017-05-28 17:52:53 -07:00
|
|
|
static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2017-05-28 17:52:53 -07:00
|
|
|
nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
|
|
|
|
nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
|
|
|
|
* @nn: NFP Net device to reconfigure
|
2018-07-20 21:14:39 -07:00
|
|
|
*
|
|
|
|
* Warning: must be fully idempotent.
|
2015-12-01 14:55:22 +00:00
|
|
|
*/
|
|
|
|
static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
|
|
|
|
{
|
2023-07-12 14:35:51 +02:00
|
|
|
u32 new_ctrl, new_ctrl_w1, update;
|
2016-04-07 19:39:42 +01:00
|
|
|
unsigned int r;
|
2015-12-01 14:55:22 +00:00
|
|
|
int err;
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
new_ctrl = nn->dp.ctrl;
|
2015-12-01 14:55:22 +00:00
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
|
|
|
|
update = NFP_NET_CFG_UPDATE_GEN;
|
|
|
|
update |= NFP_NET_CFG_UPDATE_MSIX;
|
|
|
|
update |= NFP_NET_CFG_UPDATE_RING;
|
|
|
|
|
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
|
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
|
|
|
|
|
2023-07-12 14:35:51 +02:00
|
|
|
if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)) {
|
|
|
|
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
|
|
|
|
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
|
|
|
|
}
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
|
|
|
|
err = nfp_net_reconfig(nn, update);
|
2016-04-07 19:39:44 +01:00
|
|
|
if (err)
|
2015-12-01 14:55:22 +00:00
|
|
|
nn_err(nn, "Could not disable device: %d\n", err);
|
|
|
|
|
2023-07-12 14:35:51 +02:00
|
|
|
if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
|
|
|
|
new_ctrl_w1 = nn->dp.ctrl_w1;
|
|
|
|
new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_FREELIST_EN;
|
|
|
|
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
|
|
|
|
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
|
|
|
|
|
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
|
|
|
|
err = nfp_net_reconfig(nn, update);
|
|
|
|
if (err)
|
|
|
|
nn_err(nn, "Could not disable FREELIST_EN: %d\n", err);
|
|
|
|
nn->dp.ctrl_w1 = new_ctrl_w1;
|
|
|
|
}
|
|
|
|
|
2022-03-04 11:22:14 +01:00
|
|
|
for (r = 0; r < nn->dp.num_rx_rings; r++) {
|
2017-03-10 10:38:27 -08:00
|
|
|
nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
|
2022-03-04 11:22:14 +01:00
|
|
|
if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
|
|
|
|
nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
|
|
|
|
}
|
2017-03-10 10:38:27 -08:00
|
|
|
for (r = 0; r < nn->dp.num_tx_rings; r++)
|
|
|
|
nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
|
|
|
|
for (r = 0; r < nn->dp.num_r_vecs; r++)
|
2016-04-07 19:39:42 +01:00
|
|
|
nfp_net_vec_clear_ring_data(nn, r);
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.ctrl = new_ctrl;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
2017-03-21 17:59:21 -07:00
|
|
|
/**
|
|
|
|
* nfp_net_set_config_and_enable() - Write control BAR and enable NFP
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
static int nfp_net_set_config_and_enable(struct nfp_net *nn)
|
2016-04-07 19:39:43 +01:00
|
|
|
{
|
2023-07-12 14:35:51 +02:00
|
|
|
u32 bufsz, new_ctrl, new_ctrl_w1, update = 0;
|
2016-04-07 19:39:43 +01:00
|
|
|
unsigned int r;
|
|
|
|
int err;
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
new_ctrl = nn->dp.ctrl;
|
2023-07-12 14:35:51 +02:00
|
|
|
new_ctrl_w1 = nn->dp.ctrl_w1;
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2017-05-15 17:55:19 -07:00
|
|
|
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
|
2016-04-07 19:39:43 +01:00
|
|
|
nfp_net_rss_write_key(nn);
|
|
|
|
nfp_net_rss_write_itbl(nn);
|
|
|
|
nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
|
|
|
|
update |= NFP_NET_CFG_UPDATE_RSS;
|
|
|
|
}
|
|
|
|
|
2017-05-15 17:55:18 -07:00
|
|
|
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
|
2016-04-07 19:39:43 +01:00
|
|
|
nfp_net_coalesce_write_cfg(nn);
|
|
|
|
update |= NFP_NET_CFG_UPDATE_IRQMOD;
|
|
|
|
}
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
for (r = 0; r < nn->dp.num_tx_rings; r++)
|
|
|
|
nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
|
|
|
|
for (r = 0; r < nn->dp.num_rx_rings; r++)
|
|
|
|
nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2022-03-21 11:42:00 +01:00
|
|
|
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE,
|
|
|
|
U64_MAX >> (64 - nn->dp.num_tx_rings));
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2022-03-21 11:42:00 +01:00
|
|
|
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE,
|
|
|
|
U64_MAX >> (64 - nn->dp.num_rx_rings));
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2017-06-05 17:01:49 -07:00
|
|
|
if (nn->dp.netdev)
|
|
|
|
nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2017-06-05 17:01:49 -07:00
|
|
|
nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
|
2017-04-22 20:17:55 -07:00
|
|
|
|
|
|
|
bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
|
|
|
|
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2023-07-12 14:35:51 +02:00
|
|
|
/* Enable device
|
|
|
|
* Step 1: Replace the CTRL_ENABLE by NFP_NET_CFG_CTRL_FREELIST_EN if
|
|
|
|
* FREELIST_EN exits.
|
|
|
|
*/
|
|
|
|
if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)
|
|
|
|
new_ctrl_w1 |= NFP_NET_CFG_CTRL_FREELIST_EN;
|
|
|
|
else
|
|
|
|
new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
|
2016-04-07 19:39:43 +01:00
|
|
|
update |= NFP_NET_CFG_UPDATE_GEN;
|
|
|
|
update |= NFP_NET_CFG_UPDATE_MSIX;
|
|
|
|
update |= NFP_NET_CFG_UPDATE_RING;
|
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
|
|
|
|
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
|
|
|
|
|
2023-07-12 14:35:51 +02:00
|
|
|
/* Step 2: Send the configuration and write the freelist.
|
|
|
|
* - The freelist only need to be written once.
|
|
|
|
*/
|
2016-04-07 19:39:43 +01:00
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
|
2023-07-12 14:35:51 +02:00
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
|
2016-04-07 19:39:43 +01:00
|
|
|
err = nfp_net_reconfig(nn, update);
|
2017-03-21 17:59:21 -07:00
|
|
|
if (err) {
|
|
|
|
nfp_net_clear_config_and_disable(nn);
|
|
|
|
return err;
|
|
|
|
}
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.ctrl = new_ctrl;
|
2023-07-12 14:35:51 +02:00
|
|
|
nn->dp.ctrl_w1 = new_ctrl_w1;
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
for (r = 0; r < nn->dp.num_rx_rings; r++)
|
2017-03-10 10:38:39 -08:00
|
|
|
nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
|
2016-04-07 19:39:44 +01:00
|
|
|
|
2023-07-12 14:35:51 +02:00
|
|
|
/* Step 3: Do the NFP_NET_CFG_CTRL_ENABLE. Send the configuration.
|
|
|
|
*/
|
|
|
|
if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
|
|
|
|
new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
|
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
|
|
|
|
|
|
|
|
err = nfp_net_reconfig(nn, update);
|
|
|
|
if (err) {
|
|
|
|
nfp_net_clear_config_and_disable(nn);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
nn->dp.ctrl = new_ctrl;
|
|
|
|
}
|
|
|
|
|
2017-03-21 17:59:21 -07:00
|
|
|
return 0;
|
2016-04-07 19:39:43 +01:00
|
|
|
}
|
|
|
|
|
2017-06-05 17:01:42 -07:00
|
|
|
/**
|
|
|
|
* nfp_net_close_stack() - Quiesce the stack (part of close)
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
static void nfp_net_close_stack(struct nfp_net *nn)
|
|
|
|
{
|
2021-07-26 13:16:34 +02:00
|
|
|
struct nfp_net_r_vector *r_vec;
|
2017-06-05 17:01:42 -07:00
|
|
|
unsigned int r;
|
|
|
|
|
|
|
|
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
|
|
|
|
netif_carrier_off(nn->dp.netdev);
|
|
|
|
nn->link_up = false;
|
|
|
|
|
|
|
|
for (r = 0; r < nn->dp.num_r_vecs; r++) {
|
2021-07-26 13:16:34 +02:00
|
|
|
r_vec = &nn->r_vecs[r];
|
|
|
|
|
|
|
|
disable_irq(r_vec->irq_vector);
|
|
|
|
napi_disable(&r_vec->napi);
|
|
|
|
|
|
|
|
if (r_vec->rx_ring)
|
|
|
|
cancel_work_sync(&r_vec->rx_dim.work);
|
|
|
|
|
|
|
|
if (r_vec->tx_ring)
|
|
|
|
cancel_work_sync(&r_vec->tx_dim.work);
|
2017-06-05 17:01:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
netif_tx_disable(nn->dp.netdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_close_free_all() - Free all runtime resources
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
static void nfp_net_close_free_all(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
unsigned int r;
|
|
|
|
|
2017-06-05 17:01:44 -07:00
|
|
|
nfp_net_tx_rings_free(&nn->dp);
|
|
|
|
nfp_net_rx_rings_free(&nn->dp);
|
|
|
|
|
2017-06-05 17:01:42 -07:00
|
|
|
for (r = 0; r < nn->dp.num_r_vecs; r++)
|
|
|
|
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
|
|
|
|
|
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
|
|
|
|
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_netdev_close() - Called when the device is downed
|
|
|
|
* @netdev: netdev structure
|
|
|
|
*/
|
|
|
|
static int nfp_net_netdev_close(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
|
|
|
|
/* Step 1: Disable RX and TX rings from the Linux kernel perspective
|
|
|
|
*/
|
|
|
|
nfp_net_close_stack(nn);
|
|
|
|
|
|
|
|
/* Step 2: Tell NFP
|
|
|
|
*/
|
2023-07-05 07:28:18 +02:00
|
|
|
if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
|
|
|
|
__dev_mc_unsync(netdev, nfp_net_mc_unsync);
|
|
|
|
|
2017-06-05 17:01:42 -07:00
|
|
|
nfp_net_clear_config_and_disable(nn);
|
2017-07-25 00:51:08 -07:00
|
|
|
nfp_port_configure(netdev, false);
|
2017-06-05 17:01:42 -07:00
|
|
|
|
|
|
|
/* Step 3: Free resources
|
|
|
|
*/
|
|
|
|
nfp_net_close_free_all(nn);
|
|
|
|
|
|
|
|
nn_dbg(nn, "%s down", netdev->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-05 17:01:50 -07:00
|
|
|
void nfp_ctrl_close(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
|
|
|
|
for (r = 0; r < nn->dp.num_r_vecs; r++) {
|
|
|
|
disable_irq(nn->r_vecs[r].irq_vector);
|
|
|
|
tasklet_disable(&nn->r_vecs[r].tasklet);
|
|
|
|
}
|
|
|
|
|
|
|
|
nfp_net_clear_config_and_disable(nn);
|
|
|
|
|
|
|
|
nfp_net_close_free_all(nn);
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
2021-07-26 13:16:34 +02:00
|
|
|
static void nfp_net_rx_dim_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct nfp_net_r_vector *r_vec;
|
|
|
|
unsigned int factor, value;
|
|
|
|
struct dim_cq_moder moder;
|
|
|
|
struct nfp_net *nn;
|
|
|
|
struct dim *dim;
|
|
|
|
|
|
|
|
dim = container_of(work, struct dim, work);
|
|
|
|
moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
|
|
|
|
r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim);
|
|
|
|
nn = r_vec->nfp_net;
|
|
|
|
|
|
|
|
/* Compute factor used to convert coalesce '_usecs' parameters to
|
|
|
|
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
|
|
|
|
* count.
|
|
|
|
*/
|
|
|
|
factor = nn->tlv_caps.me_freq_mhz / 16;
|
2023-12-06 17:12:08 +02:00
|
|
|
if (nfp_net_coalesce_para_check(factor * moder.usec) ||
|
|
|
|
nfp_net_coalesce_para_check(moder.pkts))
|
2021-07-26 13:16:34 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* copy RX interrupt coalesce parameters */
|
|
|
|
value = (moder.pkts << 16) | (factor * moder.usec);
|
|
|
|
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value);
|
|
|
|
(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
|
|
|
|
|
|
|
|
dim->state = DIM_START_MEASURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfp_net_tx_dim_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct nfp_net_r_vector *r_vec;
|
|
|
|
unsigned int factor, value;
|
|
|
|
struct dim_cq_moder moder;
|
|
|
|
struct nfp_net *nn;
|
|
|
|
struct dim *dim;
|
|
|
|
|
|
|
|
dim = container_of(work, struct dim, work);
|
|
|
|
moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
|
|
|
|
r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim);
|
|
|
|
nn = r_vec->nfp_net;
|
|
|
|
|
|
|
|
/* Compute factor used to convert coalesce '_usecs' parameters to
|
|
|
|
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
|
|
|
|
* count.
|
|
|
|
*/
|
|
|
|
factor = nn->tlv_caps.me_freq_mhz / 16;
|
2023-12-06 17:12:08 +02:00
|
|
|
if (nfp_net_coalesce_para_check(factor * moder.usec) ||
|
|
|
|
nfp_net_coalesce_para_check(moder.pkts))
|
2021-07-26 13:16:34 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* copy TX interrupt coalesce parameters */
|
|
|
|
value = (moder.pkts << 16) | (factor * moder.usec);
|
|
|
|
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value);
|
|
|
|
(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
|
|
|
|
|
|
|
|
dim->state = DIM_START_MEASURE;
|
|
|
|
}
|
|
|
|
|
2016-04-07 19:39:43 +01:00
|
|
|
/**
|
|
|
|
* nfp_net_open_stack() - Start the device from stack's perspective
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
static void nfp_net_open_stack(struct nfp_net *nn)
|
|
|
|
{
|
2021-07-26 13:16:34 +02:00
|
|
|
struct nfp_net_r_vector *r_vec;
|
2016-04-07 19:39:43 +01:00
|
|
|
unsigned int r;
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
for (r = 0; r < nn->dp.num_r_vecs; r++) {
|
2021-07-26 13:16:34 +02:00
|
|
|
r_vec = &nn->r_vecs[r];
|
|
|
|
|
|
|
|
if (r_vec->rx_ring) {
|
|
|
|
INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work);
|
|
|
|
r_vec->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r_vec->tx_ring) {
|
|
|
|
INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work);
|
|
|
|
r_vec->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
|
|
|
}
|
|
|
|
|
|
|
|
napi_enable(&r_vec->napi);
|
|
|
|
enable_irq(r_vec->irq_vector);
|
2016-04-07 19:39:44 +01:00
|
|
|
}
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
netif_tx_wake_all_queues(nn->dp.netdev);
|
2016-04-07 19:39:43 +01:00
|
|
|
|
2016-06-16 14:42:50 +01:00
|
|
|
enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
|
2016-04-07 19:39:43 +01:00
|
|
|
nfp_net_read_link_status(nn);
|
|
|
|
}
|
|
|
|
|
2017-06-05 17:01:43 -07:00
|
|
|
static int nfp_net_open_alloc_all(struct nfp_net *nn)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
|
|
|
int err, r;
|
|
|
|
|
|
|
|
err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
|
|
|
|
nn->exn_name, sizeof(nn->exn_name),
|
|
|
|
NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2016-04-07 19:39:35 +01:00
|
|
|
err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
|
|
|
|
nn->lsc_name, sizeof(nn->lsc_name),
|
|
|
|
NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
|
|
|
|
if (err)
|
|
|
|
goto err_free_exn;
|
2016-06-16 14:42:50 +01:00
|
|
|
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
for (r = 0; r < nn->dp.num_r_vecs; r++) {
|
2016-04-07 19:39:36 +01:00
|
|
|
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
|
|
|
|
if (err)
|
2016-10-31 20:43:22 +00:00
|
|
|
goto err_cleanup_vec_p;
|
|
|
|
}
|
2016-11-03 17:12:00 +00:00
|
|
|
|
2017-03-10 10:38:34 -08:00
|
|
|
err = nfp_net_rx_rings_prepare(nn, &nn->dp);
|
|
|
|
if (err)
|
2016-11-03 17:12:00 +00:00
|
|
|
goto err_cleanup_vec;
|
2016-04-07 19:39:41 +01:00
|
|
|
|
2017-03-10 10:38:34 -08:00
|
|
|
err = nfp_net_tx_rings_prepare(nn, &nn->dp);
|
|
|
|
if (err)
|
2016-11-03 17:12:00 +00:00
|
|
|
goto err_free_rx_rings;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2016-11-03 17:12:02 +00:00
|
|
|
for (r = 0; r < nn->max_r_vecs; r++)
|
2017-03-10 10:38:27 -08:00
|
|
|
nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
|
2016-11-03 17:12:02 +00:00
|
|
|
|
2017-06-05 17:01:43 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_rx_rings:
|
|
|
|
nfp_net_rx_rings_free(&nn->dp);
|
|
|
|
err_cleanup_vec:
|
|
|
|
r = nn->dp.num_r_vecs;
|
|
|
|
err_cleanup_vec_p:
|
|
|
|
while (r--)
|
|
|
|
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
|
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
|
|
|
|
err_free_exn:
|
|
|
|
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfp_net_netdev_open(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Step 1: Allocate resources for rings and the like
|
|
|
|
* - Request interrupts
|
|
|
|
* - Allocate RX and TX ring resources
|
|
|
|
* - Setup initial RSS table
|
|
|
|
*/
|
|
|
|
err = nfp_net_open_alloc_all(nn);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
|
2015-12-01 14:55:22 +00:00
|
|
|
if (err)
|
2017-06-05 17:01:43 -07:00
|
|
|
goto err_free_all;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
|
2015-12-01 14:55:22 +00:00
|
|
|
if (err)
|
2017-06-05 17:01:43 -07:00
|
|
|
goto err_free_all;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
/* Step 2: Configure the NFP
|
2017-07-25 00:51:08 -07:00
|
|
|
* - Ifup the physical interface if it exists
|
2015-12-01 14:55:22 +00:00
|
|
|
* - Enable rings from 0 to tx_rings/rx_rings - 1.
|
|
|
|
* - Write MAC address (in case it changed)
|
|
|
|
* - Set the MTU
|
|
|
|
* - Set the Freelist buffer size
|
|
|
|
* - Enable the FW
|
|
|
|
*/
|
2017-07-25 00:51:08 -07:00
|
|
|
err = nfp_port_configure(netdev, true);
|
2015-12-01 14:55:22 +00:00
|
|
|
if (err)
|
2017-06-05 17:01:43 -07:00
|
|
|
goto err_free_all;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-07-25 00:51:08 -07:00
|
|
|
err = nfp_net_set_config_and_enable(nn);
|
|
|
|
if (err)
|
|
|
|
goto err_port_disable;
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/* Step 3: Enable for kernel
|
|
|
|
* - put some freelist descriptors on each RX ring
|
|
|
|
* - enable NAPI on each ring
|
|
|
|
* - enable all TX queues
|
|
|
|
* - set link state
|
|
|
|
*/
|
2016-04-07 19:39:43 +01:00
|
|
|
nfp_net_open_stack(nn);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2017-07-25 00:51:08 -07:00
|
|
|
err_port_disable:
|
|
|
|
nfp_port_configure(netdev, false);
|
2017-06-05 17:01:43 -07:00
|
|
|
err_free_all:
|
|
|
|
nfp_net_close_free_all(nn);
|
2015-12-01 14:55:22 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-05 17:01:50 -07:00
|
|
|
int nfp_ctrl_open(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
int err, r;
|
|
|
|
|
|
|
|
/* ring dumping depends on vNICs being opened/closed under rtnl */
|
|
|
|
rtnl_lock();
|
|
|
|
|
|
|
|
err = nfp_net_open_alloc_all(nn);
|
|
|
|
if (err)
|
|
|
|
goto err_unlock;
|
|
|
|
|
|
|
|
err = nfp_net_set_config_and_enable(nn);
|
|
|
|
if (err)
|
|
|
|
goto err_free_all;
|
|
|
|
|
|
|
|
for (r = 0; r < nn->dp.num_r_vecs; r++)
|
|
|
|
enable_irq(nn->r_vecs[r].irq_vector);
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_all:
|
|
|
|
nfp_net_close_free_all(nn);
|
|
|
|
err_unlock:
|
|
|
|
rtnl_unlock();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-02-08 11:22:58 +01:00
|
|
|
int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
|
|
|
|
int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
|
|
|
|
{
|
|
|
|
struct nfp_mbox_amsg_entry *entry;
|
|
|
|
|
|
|
|
entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
|
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memcpy(entry->msg, data, len);
|
|
|
|
entry->cmd = cmd;
|
|
|
|
entry->cfg = cb;
|
|
|
|
|
|
|
|
spin_lock_bh(&nn->mbox_amsg.lock);
|
|
|
|
list_add_tail(&entry->list, &nn->mbox_amsg.list);
|
|
|
|
spin_unlock_bh(&nn->mbox_amsg.lock);
|
|
|
|
|
|
|
|
schedule_work(&nn->mbox_amsg.work);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfp_net_mbox_amsg_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
|
|
|
|
struct nfp_mbox_amsg_entry *entry, *tmp;
|
|
|
|
struct list_head tmp_list;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&tmp_list);
|
|
|
|
|
|
|
|
spin_lock_bh(&nn->mbox_amsg.lock);
|
|
|
|
list_splice_init(&nn->mbox_amsg.list, &tmp_list);
|
|
|
|
spin_unlock_bh(&nn->mbox_amsg.lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
|
|
|
|
int err = entry->cfg(nn, entry);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
|
|
|
|
|
|
|
|
list_del(&entry->list);
|
|
|
|
kfree(entry);
|
|
|
|
}
|
|
|
|
}
|
2022-12-20 16:21:00 +01:00
|
|
|
|
2023-02-08 11:22:58 +01:00
|
|
|
static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
|
2022-12-02 10:42:14 +01:00
|
|
|
{
|
2023-02-08 11:22:58 +01:00
|
|
|
unsigned char *addr = entry->msg;
|
2022-12-02 10:42:14 +01:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_HI,
|
|
|
|
get_unaligned_be32(addr));
|
|
|
|
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
|
|
|
|
get_unaligned_be16(addr + 4));
|
|
|
|
|
2023-02-08 11:22:58 +01:00
|
|
|
return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
|
2022-12-20 16:21:00 +01:00
|
|
|
}
|
|
|
|
|
2022-12-02 10:42:14 +01:00
|
|
|
static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
|
|
|
|
if (netdev_mc_count(netdev) > NFP_NET_CFG_MAC_MC_MAX) {
|
|
|
|
nn_err(nn, "Requested number of MC addresses (%d) exceeds maximum (%d).\n",
|
|
|
|
netdev_mc_count(netdev), NFP_NET_CFG_MAC_MC_MAX);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2023-02-08 11:22:58 +01:00
|
|
|
return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
|
|
|
|
NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
|
2022-12-02 10:42:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
|
|
|
|
{
|
2022-12-20 16:21:00 +01:00
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
|
2023-02-08 11:22:58 +01:00
|
|
|
return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
|
|
|
|
NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
|
2022-12-02 10:42:14 +01:00
|
|
|
}
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
static void nfp_net_set_rx_mode(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
2022-12-02 10:42:14 +01:00
|
|
|
u32 new_ctrl, new_ctrl_w1;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
new_ctrl = nn->dp.ctrl;
|
2022-12-02 10:42:14 +01:00
|
|
|
new_ctrl_w1 = nn->dp.ctrl_w1;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2018-01-04 16:10:19 +01:00
|
|
|
if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
|
|
|
|
new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
|
|
|
|
else
|
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
|
|
|
|
|
2022-12-02 10:42:14 +01:00
|
|
|
if (netdev->flags & IFF_ALLMULTI)
|
|
|
|
new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_MCAST_FILTER;
|
|
|
|
else
|
|
|
|
new_ctrl_w1 |= nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER;
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
if (netdev->flags & IFF_PROMISC) {
|
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
|
|
|
|
new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
|
|
|
|
else
|
|
|
|
nn_warn(nn, "FW does not support promiscuous mode\n");
|
|
|
|
} else {
|
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
|
|
|
|
}
|
|
|
|
|
2022-12-02 10:42:14 +01:00
|
|
|
if ((nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) &&
|
|
|
|
__dev_mc_sync(netdev, nfp_net_mc_sync, nfp_net_mc_unsync))
|
|
|
|
netdev_err(netdev, "Sync mc address failed\n");
|
|
|
|
|
|
|
|
if (new_ctrl == nn->dp.ctrl && new_ctrl_w1 == nn->dp.ctrl_w1)
|
2015-12-01 14:55:22 +00:00
|
|
|
return;
|
|
|
|
|
2022-12-02 10:42:14 +01:00
|
|
|
if (new_ctrl != nn->dp.ctrl)
|
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
|
|
|
|
if (new_ctrl_w1 != nn->dp.ctrl_w1)
|
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
|
2016-04-16 11:25:54 +01:00
|
|
|
nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.ctrl = new_ctrl;
|
2022-12-02 10:42:14 +01:00
|
|
|
nn->dp.ctrl_w1 = new_ctrl_w1;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
2016-11-03 17:12:03 +00:00
|
|
|
static void nfp_net_rss_init_itbl(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sizeof(nn->rss_itbl); i++)
|
|
|
|
nn->rss_itbl[i] =
|
2017-03-10 10:38:27 -08:00
|
|
|
ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
|
2016-11-03 17:12:03 +00:00
|
|
|
}
|
|
|
|
|
2017-03-10 10:38:30 -08:00
|
|
|
static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
|
|
|
|
{
|
|
|
|
struct nfp_net_dp new_dp = *dp;
|
|
|
|
|
|
|
|
*dp = nn->dp;
|
|
|
|
nn->dp = new_dp;
|
2017-03-10 10:38:32 -08:00
|
|
|
|
2024-05-06 10:28:12 +00:00
|
|
|
WRITE_ONCE(nn->dp.netdev->mtu, new_dp.mtu);
|
2017-03-10 10:38:34 -08:00
|
|
|
|
|
|
|
if (!netif_is_rxfh_configured(nn->dp.netdev))
|
|
|
|
nfp_net_rss_init_itbl(nn);
|
2017-03-10 10:38:30 -08:00
|
|
|
}
|
|
|
|
|
2017-03-10 10:38:34 -08:00
|
|
|
static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2016-11-03 17:12:02 +00:00
|
|
|
unsigned int r;
|
2016-11-03 17:12:04 +00:00
|
|
|
int err;
|
2016-11-03 17:12:02 +00:00
|
|
|
|
2017-03-10 10:38:34 -08:00
|
|
|
nfp_net_dp_swap(nn, dp);
|
2016-11-03 17:12:04 +00:00
|
|
|
|
2016-11-03 17:12:02 +00:00
|
|
|
for (r = 0; r < nn->max_r_vecs; r++)
|
2017-03-10 10:38:27 -08:00
|
|
|
nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
|
2016-11-03 17:12:02 +00:00
|
|
|
|
2021-08-03 06:05:27 -07:00
|
|
|
err = netif_set_real_num_queues(nn->dp.netdev,
|
|
|
|
nn->dp.num_stack_tx_rings,
|
|
|
|
nn->dp.num_rx_rings);
|
2016-11-16 15:10:49 +01:00
|
|
|
if (err)
|
|
|
|
return err;
|
2016-11-03 17:12:04 +00:00
|
|
|
|
2017-03-21 17:59:21 -07:00
|
|
|
return nfp_net_set_config_and_enable(nn);
|
2016-11-03 17:11:58 +00:00
|
|
|
}
|
2016-04-07 19:39:46 +01:00
|
|
|
|
2017-03-10 10:38:29 -08:00
|
|
|
struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
struct nfp_net_dp *new;
|
|
|
|
|
|
|
|
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
|
|
|
if (!new)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
*new = nn->dp;
|
|
|
|
|
2022-03-04 11:22:12 +01:00
|
|
|
new->xsk_pools = kmemdup(new->xsk_pools,
|
|
|
|
array_size(nn->max_r_vecs,
|
|
|
|
sizeof(new->xsk_pools)),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!new->xsk_pools) {
|
|
|
|
kfree(new);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-03-10 10:38:29 -08:00
|
|
|
/* Clear things which need to be recomputed */
|
|
|
|
new->fl_bufsz = 0;
|
|
|
|
new->tx_rings = NULL;
|
|
|
|
new->rx_rings = NULL;
|
|
|
|
new->num_r_vecs = 0;
|
|
|
|
new->num_stack_tx_rings = 0;
|
2022-03-21 11:42:05 +01:00
|
|
|
new->txrwb = NULL;
|
|
|
|
new->txrwb_dma = 0;
|
2017-03-10 10:38:29 -08:00
|
|
|
|
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
2022-03-04 11:22:12 +01:00
|
|
|
static void nfp_net_free_dp(struct nfp_net_dp *dp)
|
|
|
|
{
|
|
|
|
kfree(dp->xsk_pools);
|
|
|
|
kfree(dp);
|
|
|
|
}
|
|
|
|
|
2017-04-30 21:46:47 -07:00
|
|
|
static int
|
|
|
|
nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-11-03 17:12:07 +00:00
|
|
|
{
|
2022-03-04 11:22:13 +01:00
|
|
|
unsigned int r, xsk_min_fl_bufsz;
|
|
|
|
|
2016-11-03 17:12:07 +00:00
|
|
|
/* XDP-enabled tests */
|
2017-03-10 10:38:33 -08:00
|
|
|
if (!dp->xdp_prog)
|
2016-11-03 17:12:07 +00:00
|
|
|
return 0;
|
2017-03-10 10:38:31 -08:00
|
|
|
if (dp->fl_bufsz > PAGE_SIZE) {
|
2017-05-03 00:39:17 +02:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
|
2016-11-03 17:12:07 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-03-10 10:38:34 -08:00
|
|
|
if (dp->num_tx_rings > nn->max_tx_rings) {
|
2017-05-03 00:39:17 +02:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
|
2016-11-03 17:12:07 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-03-04 11:22:13 +01:00
|
|
|
xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp);
|
|
|
|
for (r = 0; r < nn->max_r_vecs; r++) {
|
|
|
|
if (!dp->xsk_pools[r])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
2022-03-07 17:07:59 +08:00
|
|
|
"XSK buffer pool chunk size too small");
|
2022-03-04 11:22:13 +01:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-03 17:12:07 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-30 21:46:47 -07:00
|
|
|
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-07 19:39:48 +01:00
|
|
|
{
|
2017-03-10 10:38:30 -08:00
|
|
|
int r, err;
|
2016-04-07 19:39:48 +01:00
|
|
|
|
2017-03-10 10:38:32 -08:00
|
|
|
dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
|
2017-03-10 10:38:31 -08:00
|
|
|
|
2017-03-10 10:38:34 -08:00
|
|
|
dp->num_stack_tx_rings = dp->num_tx_rings;
|
2017-03-10 10:38:33 -08:00
|
|
|
if (dp->xdp_prog)
|
2017-03-10 10:38:34 -08:00
|
|
|
dp->num_stack_tx_rings -= dp->num_rx_rings;
|
2016-11-03 17:12:07 +00:00
|
|
|
|
2017-03-10 10:38:34 -08:00
|
|
|
dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
|
2016-11-03 17:12:07 +00:00
|
|
|
|
2017-04-30 21:46:47 -07:00
|
|
|
err = nfp_net_check_config(nn, dp, extack);
|
2016-11-03 17:12:07 +00:00
|
|
|
if (err)
|
2017-03-10 10:38:29 -08:00
|
|
|
goto exit_free_dp;
|
2016-11-03 17:12:04 +00:00
|
|
|
|
2017-03-10 10:38:29 -08:00
|
|
|
if (!netif_running(dp->netdev)) {
|
2017-03-10 10:38:34 -08:00
|
|
|
nfp_net_dp_swap(nn, dp);
|
2017-03-10 10:38:29 -08:00
|
|
|
err = 0;
|
|
|
|
goto exit_free_dp;
|
2016-04-07 19:39:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Prepare new rings */
|
2017-03-10 10:38:30 -08:00
|
|
|
for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
|
2016-11-03 17:12:04 +00:00
|
|
|
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
|
|
|
|
if (err) {
|
2017-03-10 10:38:30 -08:00
|
|
|
dp->num_r_vecs = r;
|
2016-11-03 17:12:04 +00:00
|
|
|
goto err_cleanup_vecs;
|
|
|
|
}
|
|
|
|
}
|
2017-03-10 10:38:34 -08:00
|
|
|
|
|
|
|
err = nfp_net_rx_rings_prepare(nn, dp);
|
|
|
|
if (err)
|
|
|
|
goto err_cleanup_vecs;
|
|
|
|
|
|
|
|
err = nfp_net_tx_rings_prepare(nn, dp);
|
|
|
|
if (err)
|
|
|
|
goto err_free_rx;
|
2016-04-07 19:39:48 +01:00
|
|
|
|
|
|
|
/* Stop device, swap in new rings, try to start the firmware */
|
|
|
|
nfp_net_close_stack(nn);
|
|
|
|
nfp_net_clear_config_and_disable(nn);
|
|
|
|
|
2017-03-10 10:38:34 -08:00
|
|
|
err = nfp_net_dp_swap_enable(nn, dp);
|
2016-04-07 19:39:48 +01:00
|
|
|
if (err) {
|
2016-11-03 17:11:58 +00:00
|
|
|
int err2;
|
2016-04-07 19:39:48 +01:00
|
|
|
|
2016-11-03 17:11:58 +00:00
|
|
|
nfp_net_clear_config_and_disable(nn);
|
2016-04-07 19:39:48 +01:00
|
|
|
|
2016-11-03 17:11:58 +00:00
|
|
|
/* Try with old configuration and old rings */
|
2017-03-10 10:38:34 -08:00
|
|
|
err2 = nfp_net_dp_swap_enable(nn, dp);
|
2016-11-03 17:11:58 +00:00
|
|
|
if (err2)
|
2016-04-07 19:39:48 +01:00
|
|
|
nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
|
2016-11-03 17:11:58 +00:00
|
|
|
err, err2);
|
2016-04-07 19:39:48 +01:00
|
|
|
}
|
2017-03-10 10:38:30 -08:00
|
|
|
for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
|
2016-11-03 17:12:04 +00:00
|
|
|
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
2016-04-07 19:39:48 +01:00
|
|
|
|
2017-03-10 10:38:34 -08:00
|
|
|
nfp_net_rx_rings_free(dp);
|
|
|
|
nfp_net_tx_rings_free(dp);
|
2016-04-07 19:39:48 +01:00
|
|
|
|
|
|
|
nfp_net_open_stack(nn);
|
2017-03-10 10:38:29 -08:00
|
|
|
exit_free_dp:
|
2022-03-04 11:22:12 +01:00
|
|
|
nfp_net_free_dp(dp);
|
2016-04-07 19:39:48 +01:00
|
|
|
|
|
|
|
return err;
|
2016-11-03 17:11:58 +00:00
|
|
|
|
|
|
|
err_free_rx:
|
2017-03-10 10:38:34 -08:00
|
|
|
nfp_net_rx_rings_free(dp);
|
2016-11-03 17:12:04 +00:00
|
|
|
err_cleanup_vecs:
|
2017-03-10 10:38:30 -08:00
|
|
|
for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
|
2016-11-03 17:12:04 +00:00
|
|
|
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
2022-03-04 11:22:12 +01:00
|
|
|
nfp_net_free_dp(dp);
|
2016-11-03 17:11:58 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
2017-03-10 10:38:29 -08:00
|
|
|
struct nfp_net_dp *dp;
|
2018-01-10 12:25:57 +00:00
|
|
|
int err;
|
|
|
|
|
2018-03-28 18:50:06 -07:00
|
|
|
err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
|
2018-01-10 12:25:57 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
2017-03-10 10:38:29 -08:00
|
|
|
|
|
|
|
dp = nfp_net_clone_dp(nn);
|
|
|
|
if (!dp)
|
|
|
|
return -ENOMEM;
|
2016-11-03 17:11:58 +00:00
|
|
|
|
2017-03-10 10:38:32 -08:00
|
|
|
dp->mtu = new_mtu;
|
|
|
|
|
2017-04-30 21:46:47 -07:00
|
|
|
return nfp_net_ring_reconfig(nn, dp, NULL);
|
2016-04-07 19:39:48 +01:00
|
|
|
}
|
|
|
|
|
2017-06-15 16:22:15 -07:00
|
|
|
static int
|
|
|
|
nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
|
|
|
{
|
2019-04-11 20:27:05 -07:00
|
|
|
const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
|
2017-06-15 16:22:15 -07:00
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
2019-04-11 20:27:05 -07:00
|
|
|
int err;
|
2017-06-15 16:22:15 -07:00
|
|
|
|
|
|
|
/* Priority tagged packets with vlan id 0 are processed by the
|
|
|
|
* NFP as untagged packets
|
|
|
|
*/
|
|
|
|
if (!vid)
|
|
|
|
return 0;
|
|
|
|
|
2019-04-11 20:27:05 -07:00
|
|
|
err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2018-01-17 18:51:03 -08:00
|
|
|
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
|
|
|
|
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
|
|
|
|
ETH_P_8021Q);
|
2017-06-15 16:22:15 -07:00
|
|
|
|
2019-04-11 20:27:05 -07:00
|
|
|
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
|
2017-06-15 16:22:15 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
|
|
|
{
|
2019-04-11 20:27:05 -07:00
|
|
|
const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
|
2017-06-15 16:22:15 -07:00
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
2019-04-11 20:27:05 -07:00
|
|
|
int err;
|
2017-06-15 16:22:15 -07:00
|
|
|
|
|
|
|
/* Priority tagged packets with vlan id 0 are processed by the
|
|
|
|
* NFP as untagged packets
|
|
|
|
*/
|
|
|
|
if (!vid)
|
|
|
|
return 0;
|
|
|
|
|
2019-04-11 20:27:05 -07:00
|
|
|
err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2018-01-17 18:51:03 -08:00
|
|
|
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
|
|
|
|
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
|
|
|
|
ETH_P_8021Q);
|
2017-06-15 16:22:15 -07:00
|
|
|
|
2019-04-11 20:27:05 -07:00
|
|
|
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
|
2017-06-15 16:22:15 -07:00
|
|
|
}
|
|
|
|
|
2023-11-17 09:11:14 +02:00
|
|
|
static void
|
|
|
|
nfp_net_fs_fill_v4(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
__be16 loc;
|
|
|
|
u8 k_proto, m_proto;
|
|
|
|
__be32 k_sip, m_sip, k_dip, m_dip;
|
|
|
|
__be16 k_sport, m_sport, k_dport, m_dport;
|
|
|
|
};
|
|
|
|
__be32 val[7];
|
|
|
|
} v4_rule;
|
|
|
|
|
|
|
|
nn_writel(nn, *addr, op);
|
|
|
|
*addr += sizeof(u32);
|
|
|
|
|
|
|
|
v4_rule.loc = cpu_to_be16(entry->loc);
|
|
|
|
v4_rule.k_proto = entry->key.l4_proto;
|
|
|
|
v4_rule.m_proto = entry->msk.l4_proto;
|
|
|
|
v4_rule.k_sip = entry->key.sip4;
|
|
|
|
v4_rule.m_sip = entry->msk.sip4;
|
|
|
|
v4_rule.k_dip = entry->key.dip4;
|
|
|
|
v4_rule.m_dip = entry->msk.dip4;
|
|
|
|
v4_rule.k_sport = entry->key.sport;
|
|
|
|
v4_rule.m_sport = entry->msk.sport;
|
|
|
|
v4_rule.k_dport = entry->key.dport;
|
|
|
|
v4_rule.m_dport = entry->msk.dport;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(v4_rule.val); i++, *addr += sizeof(__be32))
|
|
|
|
nn_writel(nn, *addr, be32_to_cpu(v4_rule.val[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nfp_net_fs_fill_v6(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
__be16 loc;
|
|
|
|
u8 k_proto, m_proto;
|
|
|
|
__be32 k_sip[4], m_sip[4], k_dip[4], m_dip[4];
|
|
|
|
__be16 k_sport, m_sport, k_dport, m_dport;
|
|
|
|
};
|
|
|
|
__be32 val[19];
|
|
|
|
} v6_rule;
|
|
|
|
|
|
|
|
nn_writel(nn, *addr, op);
|
|
|
|
*addr += sizeof(u32);
|
|
|
|
|
|
|
|
v6_rule.loc = cpu_to_be16(entry->loc);
|
|
|
|
v6_rule.k_proto = entry->key.l4_proto;
|
|
|
|
v6_rule.m_proto = entry->msk.l4_proto;
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
v6_rule.k_sip[i] = entry->key.sip6[i];
|
|
|
|
v6_rule.m_sip[i] = entry->msk.sip6[i];
|
|
|
|
v6_rule.k_dip[i] = entry->key.dip6[i];
|
|
|
|
v6_rule.m_dip[i] = entry->msk.dip6[i];
|
|
|
|
}
|
|
|
|
v6_rule.k_sport = entry->key.sport;
|
|
|
|
v6_rule.m_sport = entry->msk.sport;
|
|
|
|
v6_rule.k_dport = entry->key.dport;
|
|
|
|
v6_rule.m_dport = entry->msk.dport;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(v6_rule.val); i++, *addr += sizeof(__be32))
|
|
|
|
nn_writel(nn, *addr, be32_to_cpu(v6_rule.val[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define NFP_FS_QUEUE_ID GENMASK(22, 16)
|
|
|
|
#define NFP_FS_ACT GENMASK(15, 0)
|
|
|
|
#define NFP_FS_ACT_DROP BIT(0)
|
|
|
|
#define NFP_FS_ACT_Q BIT(1)
|
|
|
|
static void
|
|
|
|
nfp_net_fs_fill_act(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 addr)
|
|
|
|
{
|
|
|
|
u32 action = 0; /* 0 means default passthrough */
|
|
|
|
|
|
|
|
if (entry->action == RX_CLS_FLOW_DISC)
|
|
|
|
action = NFP_FS_ACT_DROP;
|
|
|
|
else if (!(entry->flow_type & FLOW_RSS))
|
|
|
|
action = FIELD_PREP(NFP_FS_QUEUE_ID, entry->action) | NFP_FS_ACT_Q;
|
|
|
|
|
|
|
|
nn_writel(nn, addr, action);
|
|
|
|
}
|
|
|
|
|
2023-11-17 09:11:13 +02:00
|
|
|
int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
|
|
|
|
{
|
2023-11-17 09:11:14 +02:00
|
|
|
u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
switch (entry->flow_type & ~FLOW_RSS) {
|
|
|
|
case TCP_V4_FLOW:
|
|
|
|
case UDP_V4_FLOW:
|
|
|
|
case SCTP_V4_FLOW:
|
|
|
|
case IPV4_USER_FLOW:
|
|
|
|
nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V4, &addr);
|
|
|
|
break;
|
|
|
|
case TCP_V6_FLOW:
|
|
|
|
case UDP_V6_FLOW:
|
|
|
|
case SCTP_V6_FLOW:
|
|
|
|
case IPV6_USER_FLOW:
|
|
|
|
nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V6, &addr);
|
|
|
|
break;
|
|
|
|
case ETHER_FLOW:
|
|
|
|
nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE);
|
|
|
|
addr += sizeof(u32);
|
|
|
|
nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
|
|
|
|
addr += sizeof(u32);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
nfp_net_fs_fill_act(nn, entry, addr);
|
|
|
|
|
|
|
|
err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
|
|
|
|
if (err) {
|
|
|
|
nn_err(nn, "Add new fs rule failed with %d\n", err);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2023-11-17 09:11:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
|
|
|
|
{
|
2023-11-17 09:11:14 +02:00
|
|
|
u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
switch (entry->flow_type & ~FLOW_RSS) {
|
|
|
|
case TCP_V4_FLOW:
|
|
|
|
case UDP_V4_FLOW:
|
|
|
|
case SCTP_V4_FLOW:
|
|
|
|
case IPV4_USER_FLOW:
|
|
|
|
nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V4, &addr);
|
|
|
|
break;
|
|
|
|
case TCP_V6_FLOW:
|
|
|
|
case UDP_V6_FLOW:
|
|
|
|
case SCTP_V6_FLOW:
|
|
|
|
case IPV6_USER_FLOW:
|
|
|
|
nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V6, &addr);
|
|
|
|
break;
|
|
|
|
case ETHER_FLOW:
|
|
|
|
nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE);
|
|
|
|
addr += sizeof(u32);
|
|
|
|
nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
|
|
|
|
addr += sizeof(u32);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
nfp_net_fs_fill_act(nn, entry, addr);
|
|
|
|
|
|
|
|
err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
|
|
|
|
if (err) {
|
|
|
|
nn_err(nn, "Delete fs rule failed with %d\n", err);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2023-11-17 09:11:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfp_net_fs_clean(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
struct nfp_fs_entry *entry, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(entry, tmp, &nn->fs.list, node) {
|
|
|
|
nfp_net_fs_del_hw(nn, entry);
|
|
|
|
list_del(&entry->node);
|
|
|
|
kfree(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-06 19:12:52 -08:00
|
|
|
static void nfp_net_stat64(struct net_device *netdev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
int r;
|
|
|
|
|
2018-09-11 06:44:08 -07:00
|
|
|
/* Collect software stats */
|
2018-06-11 21:33:36 -07:00
|
|
|
for (r = 0; r < nn->max_r_vecs; r++) {
|
2015-12-01 14:55:22 +00:00
|
|
|
struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
|
|
|
|
u64 data[3];
|
|
|
|
unsigned int start;
|
|
|
|
|
|
|
|
do {
|
2022-10-26 15:22:14 +02:00
|
|
|
start = u64_stats_fetch_begin(&r_vec->rx_sync);
|
2015-12-01 14:55:22 +00:00
|
|
|
data[0] = r_vec->rx_pkts;
|
|
|
|
data[1] = r_vec->rx_bytes;
|
|
|
|
data[2] = r_vec->rx_drops;
|
2022-10-26 15:22:14 +02:00
|
|
|
} while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
|
2015-12-01 14:55:22 +00:00
|
|
|
stats->rx_packets += data[0];
|
|
|
|
stats->rx_bytes += data[1];
|
|
|
|
stats->rx_dropped += data[2];
|
|
|
|
|
|
|
|
do {
|
2022-10-26 15:22:14 +02:00
|
|
|
start = u64_stats_fetch_begin(&r_vec->tx_sync);
|
2015-12-01 14:55:22 +00:00
|
|
|
data[0] = r_vec->tx_pkts;
|
|
|
|
data[1] = r_vec->tx_bytes;
|
|
|
|
data[2] = r_vec->tx_errors;
|
2022-10-26 15:22:14 +02:00
|
|
|
} while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
|
2015-12-01 14:55:22 +00:00
|
|
|
stats->tx_packets += data[0];
|
|
|
|
stats->tx_bytes += data[1];
|
|
|
|
stats->tx_errors += data[2];
|
|
|
|
}
|
2018-09-11 06:44:08 -07:00
|
|
|
|
|
|
|
/* Add in device stats */
|
|
|
|
stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
|
|
|
|
stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
|
|
|
|
stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
|
|
|
|
|
|
|
|
stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
|
|
|
|
stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfp_net_set_features(struct net_device *netdev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
netdev_features_t changed = netdev->features ^ features;
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
u32 new_ctrl;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Assume this is not called with features we have not advertised */
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
new_ctrl = nn->dp.ctrl;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
if (changed & NETIF_F_RXCSUM) {
|
|
|
|
if (features & NETIF_F_RXCSUM)
|
2017-05-15 17:55:20 -07:00
|
|
|
new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
|
2015-12-01 14:55:22 +00:00
|
|
|
else
|
2017-05-15 17:55:20 -07:00
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
|
|
|
|
if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
|
|
|
|
new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
|
|
|
|
else
|
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
|
|
|
|
if (features & (NETIF_F_TSO | NETIF_F_TSO6))
|
2017-05-15 17:55:17 -07:00
|
|
|
new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
|
|
|
|
NFP_NET_CFG_CTRL_LSO;
|
2015-12-01 14:55:22 +00:00
|
|
|
else
|
2017-05-15 17:55:17 -07:00
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
|
|
|
|
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
2022-07-02 09:35:50 +02:00
|
|
|
new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
|
|
|
|
NFP_NET_CFG_CTRL_RXVLAN;
|
2015-12-01 14:55:22 +00:00
|
|
|
else
|
2022-07-02 09:35:50 +02:00
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN_ANY;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
|
|
|
|
if (features & NETIF_F_HW_VLAN_CTAG_TX)
|
2022-07-02 09:35:51 +02:00
|
|
|
new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
|
|
|
|
NFP_NET_CFG_CTRL_TXVLAN;
|
2015-12-01 14:55:22 +00:00
|
|
|
else
|
2022-07-02 09:35:51 +02:00
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN_ANY;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
2017-06-15 16:22:15 -07:00
|
|
|
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
|
|
|
|
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
|
|
|
new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
|
|
|
|
else
|
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
|
|
|
|
}
|
|
|
|
|
2022-07-02 09:35:50 +02:00
|
|
|
if (changed & NETIF_F_HW_VLAN_STAG_RX) {
|
|
|
|
if (features & NETIF_F_HW_VLAN_STAG_RX)
|
|
|
|
new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
|
|
|
|
else
|
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
|
|
|
|
}
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
if (changed & NETIF_F_SG) {
|
|
|
|
if (features & NETIF_F_SG)
|
|
|
|
new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
|
|
|
|
else
|
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
|
|
|
|
}
|
|
|
|
|
2018-02-07 20:55:24 -08:00
|
|
|
err = nfp_port_set_features(netdev, features);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2016-09-21 11:44:01 +01:00
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
|
|
|
|
netdev->features, features, changed);
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
if (new_ctrl == nn->dp.ctrl)
|
2015-12-01 14:55:22 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
|
2015-12-01 14:55:22 +00:00
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
|
|
|
|
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.ctrl = new_ctrl;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-07-02 09:35:50 +02:00
|
|
|
static netdev_features_t
|
|
|
|
nfp_net_fix_features(struct net_device *netdev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
|
|
|
(features & NETIF_F_HW_VLAN_STAG_RX)) {
|
|
|
|
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
|
|
|
|
features &= ~NETIF_F_HW_VLAN_CTAG_RX;
|
|
|
|
netdev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
|
|
|
|
netdev_warn(netdev,
|
|
|
|
"S-tag and C-tag stripping can't be enabled at the same time. Enabling S-tag stripping and disabling C-tag stripping\n");
|
|
|
|
} else if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) {
|
|
|
|
features &= ~NETIF_F_HW_VLAN_STAG_RX;
|
|
|
|
netdev->wanted_features &= ~NETIF_F_HW_VLAN_STAG_RX;
|
|
|
|
netdev_warn(netdev,
|
|
|
|
"S-tag and C-tag stripping can't be enabled at the same time. Enabling C-tag stripping and disabling S-tag stripping\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return features;
|
|
|
|
}
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
static netdev_features_t
|
|
|
|
nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
u8 l4_hdr;
|
|
|
|
|
|
|
|
/* We can't do TSO over double tagged packets (802.1AD) */
|
|
|
|
features &= vlan_features_check(skb, features);
|
|
|
|
|
|
|
|
if (!skb->encapsulation)
|
|
|
|
return features;
|
|
|
|
|
|
|
|
/* Ensure that inner L4 header offset fits into TX descriptor field */
|
|
|
|
if (skb_is_gso(skb)) {
|
|
|
|
u32 hdrlen;
|
|
|
|
|
2023-12-08 08:03:01 +02:00
|
|
|
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
|
|
|
|
hdrlen = skb_inner_transport_offset(skb) + sizeof(struct udphdr);
|
|
|
|
else
|
|
|
|
hdrlen = skb_inner_tcp_all_headers(skb);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2018-11-27 22:24:52 -08:00
|
|
|
/* Assume worst case scenario of having longest possible
|
|
|
|
* metadata prepend - 8B
|
|
|
|
*/
|
|
|
|
if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
|
2015-12-01 14:55:22 +00:00
|
|
|
features &= ~NETIF_F_GSO_MASK;
|
|
|
|
}
|
|
|
|
|
2023-03-02 10:58:30 +01:00
|
|
|
if (xfrm_offload(skb))
|
|
|
|
return features;
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/* VXLAN/GRE check */
|
|
|
|
switch (vlan_get_protocol(skb)) {
|
|
|
|
case htons(ETH_P_IP):
|
|
|
|
l4_hdr = ip_hdr(skb)->protocol;
|
|
|
|
break;
|
|
|
|
case htons(ETH_P_IPV6):
|
|
|
|
l4_hdr = ipv6_hdr(skb)->nexthdr;
|
|
|
|
break;
|
|
|
|
default:
|
2015-12-14 11:19:43 -08:00
|
|
|
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
|
|
|
|
skb->inner_protocol != htons(ETH_P_TEB) ||
|
|
|
|
(l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
|
|
|
|
(l4_hdr == IPPROTO_UDP &&
|
|
|
|
(skb_inner_mac_header(skb) - skb_transport_header(skb) !=
|
|
|
|
sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
|
2015-12-14 11:19:43 -08:00
|
|
|
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
return features;
|
|
|
|
}
|
|
|
|
|
2018-05-21 22:12:55 -07:00
|
|
|
static int
|
|
|
|
nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
int n;
|
|
|
|
|
2019-03-28 13:56:45 +01:00
|
|
|
/* If port is defined, devlink_port is registered and devlink core
|
|
|
|
* is taking care of name formatting.
|
|
|
|
*/
|
2018-05-21 22:12:55 -07:00
|
|
|
if (nn->port)
|
2019-03-28 13:56:45 +01:00
|
|
|
return -EOPNOTSUPP;
|
2018-05-21 22:12:55 -07:00
|
|
|
|
2018-06-11 21:33:37 -07:00
|
|
|
if (nn->dp.is_vf || nn->vnic_no_name)
|
2018-05-25 21:53:25 -07:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2018-05-25 21:53:26 -07:00
|
|
|
n = snprintf(name, len, "n%d", nn->id);
|
2018-05-25 21:53:25 -07:00
|
|
|
if (n >= len)
|
|
|
|
return -EINVAL;
|
2018-05-21 22:12:55 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-11 20:36:44 -07:00
|
|
|
static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
|
2016-11-03 17:12:07 +00:00
|
|
|
{
|
2018-07-11 20:36:44 -07:00
|
|
|
struct bpf_prog *prog = bpf->prog;
|
2017-03-10 10:38:29 -08:00
|
|
|
struct nfp_net_dp *dp;
|
2018-07-11 20:36:44 -07:00
|
|
|
int err;
|
|
|
|
|
2017-06-21 18:25:07 -07:00
|
|
|
if (!prog == !nn->dp.xdp_prog) {
|
|
|
|
WRITE_ONCE(nn->dp.xdp_prog, prog);
|
2018-07-11 20:36:44 -07:00
|
|
|
xdp_attachment_setup(&nn->xdp, bpf);
|
2016-11-03 17:12:07 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-10 10:38:29 -08:00
|
|
|
dp = nfp_net_clone_dp(nn);
|
|
|
|
if (!dp)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-03-10 10:38:33 -08:00
|
|
|
dp->xdp_prog = prog;
|
2017-03-10 10:38:34 -08:00
|
|
|
dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
|
2017-03-10 10:38:35 -08:00
|
|
|
dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
|
2017-04-27 21:06:20 -07:00
|
|
|
dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
|
2016-11-03 17:12:07 +00:00
|
|
|
|
|
|
|
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
|
2018-07-11 20:36:44 -07:00
|
|
|
err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
xdp_attachment_setup(&nn->xdp, bpf);
|
|
|
|
return 0;
|
2017-06-21 18:25:05 -07:00
|
|
|
}
|
|
|
|
|
2018-07-11 20:36:44 -07:00
|
|
|
static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
|
2017-06-21 18:25:05 -07:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2018-07-11 20:36:44 -07:00
|
|
|
err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
|
2017-06-21 18:25:05 -07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2018-07-11 20:36:44 -07:00
|
|
|
xdp_attachment_setup(&nn->xdp_hw, bpf);
|
2016-11-03 17:12:07 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-03 13:56:16 -07:00
|
|
|
static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
|
2016-11-03 17:12:07 +00:00
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
|
|
|
|
switch (xdp->command) {
|
|
|
|
case XDP_SETUP_PROG:
|
2018-07-11 20:36:44 -07:00
|
|
|
return nfp_net_xdp_setup_drv(nn, xdp);
|
2017-06-21 18:25:08 -07:00
|
|
|
case XDP_SETUP_PROG_HW:
|
2018-07-11 20:36:44 -07:00
|
|
|
return nfp_net_xdp_setup_hw(nn, xdp);
|
2022-03-04 11:22:14 +01:00
|
|
|
case XDP_SETUP_XSK_POOL:
|
|
|
|
return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
|
|
|
|
xdp->xsk.queue_id);
|
2016-11-03 17:12:07 +00:00
|
|
|
default:
|
2018-01-10 12:26:04 +00:00
|
|
|
return nfp_app_bpf(nn->app, nn, xdp);
|
2016-11-03 17:12:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-28 17:52:53 -07:00
|
|
|
static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
struct sockaddr *saddr = addr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = eth_prepare_mac_addr_change(netdev, addr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
nfp_net_write_mac_addr(nn, saddr->sa_data);
|
|
|
|
|
|
|
|
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
eth_commit_mac_addr_change(netdev, addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-24 09:38:15 +02:00
|
|
|
static int nfp_net_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
|
|
|
struct net_device *dev, u32 filter_mask,
|
|
|
|
int nlflags)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(dev);
|
|
|
|
u16 mode;
|
|
|
|
|
|
|
|
if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ?
|
|
|
|
BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
|
|
|
|
|
|
|
|
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
|
|
|
|
nlflags, filter_mask, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
|
|
|
|
u16 flags, struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(dev);
|
|
|
|
struct nlattr *attr, *br_spec;
|
|
|
|
int rem, err;
|
|
|
|
u32 new_ctrl;
|
|
|
|
u16 mode;
|
|
|
|
|
|
|
|
if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
|
|
|
|
if (!br_spec)
|
|
|
|
return -EINVAL;
|
|
|
|
|
netlink: introduce type-checking attribute iteration
There are, especially with multi-attr arrays, many cases
of needing to iterate all attributes of a specific type
in a netlink message or a nested attribute. Add specific
macros to support that case.
Also convert many instances using this spatch:
@@
iterator nla_for_each_attr;
iterator name nla_for_each_attr_type;
identifier nla;
expression head, len, rem;
expression ATTR;
type T;
identifier x;
@@
-nla_for_each_attr(nla, head, len, rem)
+nla_for_each_attr_type(nla, ATTR, head, len, rem)
{
<... T x; ...>
-if (nla_type(nla) == ATTR) {
...
-}
}
@@
identifier nla;
iterator nla_for_each_nested;
iterator name nla_for_each_nested_type;
expression attr, rem;
expression ATTR;
type T;
identifier x;
@@
-nla_for_each_nested(nla, attr, rem)
+nla_for_each_nested_type(nla, ATTR, attr, rem)
{
<... T x; ...>
-if (nla_type(nla) == ATTR) {
...
-}
}
@@
iterator nla_for_each_attr;
iterator name nla_for_each_attr_type;
identifier nla;
expression head, len, rem;
expression ATTR;
type T;
identifier x;
@@
-nla_for_each_attr(nla, head, len, rem)
+nla_for_each_attr_type(nla, ATTR, head, len, rem)
{
<... T x; ...>
-if (nla_type(nla) != ATTR) continue;
...
}
@@
identifier nla;
iterator nla_for_each_nested;
iterator name nla_for_each_nested_type;
expression attr, rem;
expression ATTR;
type T;
identifier x;
@@
-nla_for_each_nested(nla, attr, rem)
+nla_for_each_nested_type(nla, ATTR, attr, rem)
{
<... T x; ...>
-if (nla_type(nla) != ATTR) continue;
...
}
Although I had to undo one bad change this made, and
I also adjusted some other code for whitespace and to
use direct variable initialization now.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Link: https://lore.kernel.org/r/20240328203144.b5a6c895fb80.I1869b44767379f204998ff44dd239803f39c23e0@changeid
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-03-28 20:31:45 +01:00
|
|
|
nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
|
2022-06-24 09:38:15 +02:00
|
|
|
new_ctrl = nn->dp.ctrl;
|
|
|
|
mode = nla_get_u16(attr);
|
|
|
|
if (mode == BRIDGE_MODE_VEPA)
|
|
|
|
new_ctrl |= NFP_NET_CFG_CTRL_VEPA;
|
|
|
|
else if (mode == BRIDGE_MODE_VEB)
|
|
|
|
new_ctrl &= ~NFP_NET_CFG_CTRL_VEPA;
|
|
|
|
else
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (new_ctrl == nn->dp.ctrl)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
|
|
|
|
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
|
|
|
|
if (!err)
|
|
|
|
nn->dp.ctrl = new_ctrl;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-03-21 11:42:07 +01:00
|
|
|
const struct net_device_ops nfp_nfd3_netdev_ops = {
|
2018-07-17 10:53:22 -07:00
|
|
|
.ndo_init = nfp_app_ndo_init,
|
|
|
|
.ndo_uninit = nfp_app_ndo_uninit,
|
2015-12-01 14:55:22 +00:00
|
|
|
.ndo_open = nfp_net_netdev_open,
|
|
|
|
.ndo_stop = nfp_net_netdev_close,
|
2022-03-21 11:42:02 +01:00
|
|
|
.ndo_start_xmit = nfp_net_tx,
|
2015-12-01 14:55:22 +00:00
|
|
|
.ndo_get_stats64 = nfp_net_stat64,
|
2017-06-15 16:22:15 -07:00
|
|
|
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
|
2017-08-24 21:31:49 -07:00
|
|
|
.ndo_set_vf_mac = nfp_app_set_vf_mac,
|
|
|
|
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
|
2022-05-11 13:39:32 +02:00
|
|
|
.ndo_set_vf_rate = nfp_app_set_vf_rate,
|
2017-08-24 21:31:49 -07:00
|
|
|
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
|
2019-04-19 17:20:09 -07:00
|
|
|
.ndo_set_vf_trust = nfp_app_set_vf_trust,
|
2017-08-24 21:31:49 -07:00
|
|
|
.ndo_get_vf_config = nfp_app_get_vf_config,
|
|
|
|
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
|
2017-06-29 22:08:13 +02:00
|
|
|
.ndo_setup_tc = nfp_port_setup_tc,
|
2015-12-01 14:55:22 +00:00
|
|
|
.ndo_tx_timeout = nfp_net_tx_timeout,
|
|
|
|
.ndo_set_rx_mode = nfp_net_set_rx_mode,
|
|
|
|
.ndo_change_mtu = nfp_net_change_mtu,
|
2017-05-28 17:52:53 -07:00
|
|
|
.ndo_set_mac_address = nfp_net_set_mac_address,
|
2015-12-01 14:55:22 +00:00
|
|
|
.ndo_set_features = nfp_net_set_features,
|
2022-07-02 09:35:50 +02:00
|
|
|
.ndo_fix_features = nfp_net_fix_features,
|
2015-12-01 14:55:22 +00:00
|
|
|
.ndo_features_check = nfp_net_features_check,
|
2018-05-21 22:12:55 -07:00
|
|
|
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
|
2017-11-03 13:56:16 -07:00
|
|
|
.ndo_bpf = nfp_net_xdp,
|
2022-03-04 11:22:14 +01:00
|
|
|
.ndo_xsk_wakeup = nfp_net_xsk_wakeup,
|
2022-06-24 09:38:15 +02:00
|
|
|
.ndo_bridge_getlink = nfp_net_bridge_getlink,
|
|
|
|
.ndo_bridge_setlink = nfp_net_bridge_setlink,
|
2015-12-01 14:55:22 +00:00
|
|
|
};
|
|
|
|
|
2022-03-21 11:42:08 +01:00
|
|
|
const struct net_device_ops nfp_nfdk_netdev_ops = {
|
|
|
|
.ndo_init = nfp_app_ndo_init,
|
|
|
|
.ndo_uninit = nfp_app_ndo_uninit,
|
|
|
|
.ndo_open = nfp_net_netdev_open,
|
|
|
|
.ndo_stop = nfp_net_netdev_close,
|
|
|
|
.ndo_start_xmit = nfp_net_tx,
|
|
|
|
.ndo_get_stats64 = nfp_net_stat64,
|
|
|
|
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
|
|
|
|
.ndo_set_vf_mac = nfp_app_set_vf_mac,
|
|
|
|
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
|
2022-06-30 13:21:55 +02:00
|
|
|
.ndo_set_vf_rate = nfp_app_set_vf_rate,
|
2022-03-21 11:42:08 +01:00
|
|
|
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
|
|
|
|
.ndo_set_vf_trust = nfp_app_set_vf_trust,
|
|
|
|
.ndo_get_vf_config = nfp_app_get_vf_config,
|
|
|
|
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
|
|
|
|
.ndo_setup_tc = nfp_port_setup_tc,
|
|
|
|
.ndo_tx_timeout = nfp_net_tx_timeout,
|
|
|
|
.ndo_set_rx_mode = nfp_net_set_rx_mode,
|
|
|
|
.ndo_change_mtu = nfp_net_change_mtu,
|
|
|
|
.ndo_set_mac_address = nfp_net_set_mac_address,
|
|
|
|
.ndo_set_features = nfp_net_set_features,
|
2022-07-02 09:35:50 +02:00
|
|
|
.ndo_fix_features = nfp_net_fix_features,
|
2022-03-21 11:42:08 +01:00
|
|
|
.ndo_features_check = nfp_net_features_check,
|
|
|
|
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
|
|
|
|
.ndo_bpf = nfp_net_xdp,
|
2022-06-24 09:38:15 +02:00
|
|
|
.ndo_bridge_getlink = nfp_net_bridge_getlink,
|
|
|
|
.ndo_bridge_setlink = nfp_net_bridge_setlink,
|
2022-03-21 11:42:08 +01:00
|
|
|
};
|
|
|
|
|
2020-07-14 12:18:19 -07:00
|
|
|
static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
|
|
|
|
{
|
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
|
|
|
|
for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
|
|
|
|
struct udp_tunnel_info ti0, ti1;
|
|
|
|
|
|
|
|
udp_tunnel_nic_get_port(netdev, table, i, &ti0);
|
|
|
|
udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1);
|
|
|
|
|
|
|
|
nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port),
|
|
|
|
be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port));
|
|
|
|
}
|
|
|
|
|
|
|
|
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
|
|
|
|
.sync_table = nfp_udp_tunnel_sync,
|
2025-06-16 09:21:14 -07:00
|
|
|
.flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
|
2020-07-14 12:18:19 -07:00
|
|
|
.tables = {
|
|
|
|
{
|
|
|
|
.n_entries = NFP_NET_N_VXLAN_PORTS,
|
|
|
|
.tunnel_types = UDP_TUNNEL_TYPE_VXLAN,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/**
|
|
|
|
* nfp_net_info() - Print general info about the NIC
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
void nfp_net_info(struct nfp_net *nn)
|
|
|
|
{
|
2022-05-08 19:38:15 +02:00
|
|
|
nn_info(nn, "NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.is_vf ? "VF " : "",
|
|
|
|
nn->dp.num_tx_rings, nn->max_tx_rings,
|
|
|
|
nn->dp.num_rx_rings, nn->max_rx_rings);
|
2015-12-01 14:55:22 +00:00
|
|
|
nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
|
2022-03-21 11:42:07 +01:00
|
|
|
nn->fw_ver.extend, nn->fw_ver.class,
|
2015-12-01 14:55:22 +00:00
|
|
|
nn->fw_ver.major, nn->fw_ver.minor,
|
|
|
|
nn->max_mtu);
|
2023-12-08 08:03:01 +02:00
|
|
|
nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
|
2015-12-01 14:55:22 +00:00
|
|
|
nn->cap,
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
|
2022-07-02 09:35:50 +02:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_RXQINQ ? "RXQINQ " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 " : "",
|
2022-07-11 11:30:48 +02:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ? "TXVLANv2 " : "",
|
2015-12-01 14:55:22 +00:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
|
2017-05-15 17:55:17 -07:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
|
2017-05-15 17:55:19 -07:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
|
2017-06-15 16:22:15 -07:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
|
2015-12-01 14:55:22 +00:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
|
2022-03-21 11:42:05 +01:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_TXRWB ? "TXRWB " : "",
|
2022-06-24 09:38:15 +02:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_VEPA ? "VEPA " : "",
|
2015-12-01 14:55:22 +00:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
|
2016-09-21 11:44:01 +01:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
|
2017-05-15 17:55:20 -07:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
|
2017-05-28 17:52:53 -07:00
|
|
|
"RXCSUM_COMPLETE " : "",
|
2017-05-31 08:06:49 -07:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
|
2022-12-02 10:42:14 +01:00
|
|
|
nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER ? "MULTICAST_FILTER " : "",
|
2023-12-08 08:03:01 +02:00
|
|
|
nn->cap_w1 & NFP_NET_CFG_CTRL_USO ? "USO " : "",
|
2017-05-31 08:06:49 -07:00
|
|
|
nfp_app_extra_cap(nn->app, nn));
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-05-22 10:59:23 -07:00
|
|
|
* nfp_net_alloc() - Allocate netdev and related structure
|
2015-12-01 14:55:22 +00:00
|
|
|
* @pdev: PCI device
|
2022-03-11 11:43:01 +01:00
|
|
|
* @dev_info: NFP ASIC params
|
2018-11-08 19:50:34 -08:00
|
|
|
* @ctrl_bar: PCI IOMEM with vNIC config memory
|
2017-06-05 17:01:48 -07:00
|
|
|
* @needs_netdev: Whether to allocate a netdev for this vNIC
|
2015-12-01 14:55:22 +00:00
|
|
|
* @max_tx_rings: Maximum number of TX rings supported by device
|
|
|
|
* @max_rx_rings: Maximum number of RX rings supported by device
|
|
|
|
*
|
|
|
|
* This function allocates a netdev device and fills in the initial
|
2017-06-05 17:01:48 -07:00
|
|
|
* part of the @struct nfp_net structure. In case of control device
|
|
|
|
* nfp_net structure is allocated without the netdev.
|
2015-12-01 14:55:22 +00:00
|
|
|
*
|
|
|
|
* Return: NFP Net device structure, or ERR_PTR on error.
|
|
|
|
*/
|
2018-11-08 19:50:34 -08:00
|
|
|
struct nfp_net *
|
2022-03-11 11:43:01 +01:00
|
|
|
nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
|
|
|
|
void __iomem *ctrl_bar, bool needs_netdev,
|
2018-11-08 19:50:34 -08:00
|
|
|
unsigned int max_tx_rings, unsigned int max_rx_rings)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2022-06-13 11:58:31 +02:00
|
|
|
u64 dma_mask = dma_get_mask(&pdev->dev);
|
2015-12-01 14:55:22 +00:00
|
|
|
struct nfp_net *nn;
|
2018-11-08 19:50:35 -08:00
|
|
|
int err;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
if (needs_netdev) {
|
|
|
|
struct net_device *netdev;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
|
|
|
|
max_tx_rings, max_rx_rings);
|
|
|
|
if (!netdev)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(netdev, &pdev->dev);
|
|
|
|
nn = netdev_priv(netdev);
|
|
|
|
nn->dp.netdev = netdev;
|
|
|
|
} else {
|
|
|
|
nn = vzalloc(sizeof(*nn));
|
|
|
|
if (!nn)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.dev = &pdev->dev;
|
2018-11-08 19:50:34 -08:00
|
|
|
nn->dp.ctrl_bar = ctrl_bar;
|
2022-03-11 11:43:01 +01:00
|
|
|
nn->dev_info = dev_info;
|
2015-12-01 14:55:22 +00:00
|
|
|
nn->pdev = pdev;
|
2022-03-21 11:42:07 +01:00
|
|
|
nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
|
|
|
|
|
|
|
|
switch (FIELD_GET(NFP_NET_CFG_VERSION_DP_MASK, nn->fw_ver.extend)) {
|
|
|
|
case NFP_NET_CFG_VERSION_DP_NFD3:
|
|
|
|
nn->dp.ops = &nfp_nfd3_ops;
|
|
|
|
break;
|
2022-03-21 11:42:08 +01:00
|
|
|
case NFP_NET_CFG_VERSION_DP_NFDK:
|
|
|
|
if (nn->fw_ver.major < 5) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"NFDK must use ABI 5 or newer, found: %d\n",
|
|
|
|
nn->fw_ver.major);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_free_nn;
|
|
|
|
}
|
|
|
|
nn->dp.ops = &nfp_nfdk_ops;
|
|
|
|
break;
|
2022-03-21 11:42:07 +01:00
|
|
|
default:
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_free_nn;
|
|
|
|
}
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2022-06-13 11:58:31 +02:00
|
|
|
if ((dma_mask & nn->dp.ops->dma_mask) != dma_mask) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"DMA mask of loaded firmware: %llx, required DMA mask: %llx\n",
|
|
|
|
nn->dp.ops->dma_mask, dma_mask);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_free_nn;
|
|
|
|
}
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
nn->max_tx_rings = max_tx_rings;
|
|
|
|
nn->max_rx_rings = max_rx_rings;
|
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.num_tx_rings = min_t(unsigned int,
|
|
|
|
max_tx_rings, num_online_cpus());
|
|
|
|
nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
|
2016-10-31 20:43:22 +00:00
|
|
|
netif_get_num_default_rss_queues());
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
|
|
|
|
nn->dp.num_r_vecs = min_t(unsigned int,
|
|
|
|
nn->dp.num_r_vecs, num_online_cpus());
|
2022-03-04 11:22:14 +01:00
|
|
|
nn->max_r_vecs = nn->dp.num_r_vecs;
|
|
|
|
|
2025-04-25 23:08:42 -07:00
|
|
|
nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(*nn->dp.xsk_pools),
|
2022-03-04 11:22:14 +01:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!nn->dp.xsk_pools) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_free_nn;
|
|
|
|
}
|
2016-10-31 20:43:20 +00:00
|
|
|
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
|
|
|
|
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2019-06-05 14:11:32 -07:00
|
|
|
sema_init(&nn->bar_lock, 1);
|
2019-04-11 20:27:05 -07:00
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
spin_lock_init(&nn->reconfig_lock);
|
|
|
|
spin_lock_init(&nn->link_status_lock);
|
|
|
|
|
2017-10-25 03:51:38 -07:00
|
|
|
timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
|
2016-04-16 11:25:54 +01:00
|
|
|
|
2018-11-08 19:50:35 -08:00
|
|
|
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
|
|
|
|
&nn->tlv_caps);
|
|
|
|
if (err)
|
|
|
|
goto err_free_nn;
|
|
|
|
|
2019-06-10 21:40:05 -07:00
|
|
|
err = nfp_ccm_mbox_alloc(nn);
|
|
|
|
if (err)
|
|
|
|
goto err_free_nn;
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
return nn;
|
2018-11-08 19:50:35 -08:00
|
|
|
|
|
|
|
err_free_nn:
|
|
|
|
if (nn->dp.netdev)
|
|
|
|
free_netdev(nn->dp.netdev);
|
|
|
|
else
|
|
|
|
vfree(nn);
|
|
|
|
return ERR_PTR(err);
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-05-22 10:59:23 -07:00
|
|
|
* nfp_net_free() - Undo what @nfp_net_alloc() did
|
2015-12-01 14:55:22 +00:00
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
2017-05-22 10:59:23 -07:00
|
|
|
void nfp_net_free(struct nfp_net *nn)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2018-08-29 12:46:08 -07:00
|
|
|
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
|
2019-06-10 21:40:05 -07:00
|
|
|
nfp_ccm_mbox_free(nn);
|
2019-04-11 20:27:05 -07:00
|
|
|
|
2022-03-04 11:22:14 +01:00
|
|
|
kfree(nn->dp.xsk_pools);
|
2017-06-05 17:01:48 -07:00
|
|
|
if (nn->dp.netdev)
|
|
|
|
free_netdev(nn->dp.netdev);
|
|
|
|
else
|
|
|
|
vfree(nn);
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
2017-03-08 08:57:01 -08:00
|
|
|
/**
|
|
|
|
* nfp_net_rss_key_sz() - Get current size of the RSS key
|
|
|
|
* @nn: NFP Net device instance
|
|
|
|
*
|
|
|
|
* Return: size of the RSS key for currently selected hash function.
|
|
|
|
*/
|
|
|
|
unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
switch (nn->rss_hfunc) {
|
|
|
|
case ETH_RSS_HASH_TOP:
|
|
|
|
return NFP_NET_CFG_RSS_KEY_SZ;
|
|
|
|
case ETH_RSS_HASH_XOR:
|
|
|
|
return 0;
|
|
|
|
case ETH_RSS_HASH_CRC32:
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/**
|
|
|
|
* nfp_net_rss_init() - Set the initial RSS parameters
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
static void nfp_net_rss_init(struct nfp_net *nn)
|
|
|
|
{
|
2017-03-08 08:57:01 -08:00
|
|
|
unsigned long func_bit, rss_cap_hfunc;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
/* Read the RSS function capability and select first supported func */
|
|
|
|
reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
|
|
|
|
rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
|
|
|
|
if (!rss_cap_hfunc)
|
|
|
|
rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
|
|
|
|
NFP_NET_CFG_RSS_TOEPLITZ);
|
|
|
|
|
|
|
|
func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
|
|
|
|
if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
|
2017-03-10 10:38:27 -08:00
|
|
|
dev_warn(nn->dp.dev,
|
2017-03-08 08:57:01 -08:00
|
|
|
"Bad RSS config, defaulting to Toeplitz hash\n");
|
|
|
|
func_bit = ETH_RSS_HASH_TOP_BIT;
|
|
|
|
}
|
|
|
|
nn->rss_hfunc = 1 << func_bit;
|
|
|
|
|
|
|
|
netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2016-11-03 17:12:03 +00:00
|
|
|
nfp_net_rss_init_itbl(nn);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
/* Enable IPv4/IPv6 TCP by default */
|
|
|
|
nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
|
|
|
|
NFP_NET_CFG_RSS_IPV6_TCP |
|
2023-05-22 16:13:35 +02:00
|
|
|
NFP_NET_CFG_RSS_IPV4_UDP |
|
|
|
|
NFP_NET_CFG_RSS_IPV6_UDP |
|
2017-03-08 08:57:01 -08:00
|
|
|
FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
|
2015-12-01 14:55:22 +00:00
|
|
|
NFP_NET_CFG_RSS_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
|
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
|
*/
|
|
|
|
static void nfp_net_irqmod_init(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
nn->rx_coalesce_usecs = 50;
|
|
|
|
nn->rx_coalesce_max_frames = 64;
|
|
|
|
nn->tx_coalesce_usecs = 50;
|
|
|
|
nn->tx_coalesce_max_frames = 64;
|
2021-07-26 13:16:34 +02:00
|
|
|
|
|
|
|
nn->rx_coalesce_adapt_on = true;
|
|
|
|
nn->tx_coalesce_adapt_on = true;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
static void nfp_net_netdev_init(struct nfp_net *nn)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2017-05-22 10:59:23 -07:00
|
|
|
struct net_device *netdev = nn->dp.netdev;
|
2017-05-15 17:55:19 -07:00
|
|
|
|
2017-05-28 17:52:53 -07:00
|
|
|
nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
netdev->mtu = nn->dp.mtu;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
/* Advertise/enable offloads based on capabilities
|
|
|
|
*
|
|
|
|
* Note: netdev->features show the currently enabled features
|
|
|
|
* and netdev->hw_features advertises which features are
|
|
|
|
* supported. By default we enable most features.
|
|
|
|
*/
|
2017-05-28 17:52:53 -07:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
|
|
|
|
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
netdev->hw_features = NETIF_F_HIGHDMA;
|
2017-05-15 17:55:20 -07:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
|
2015-12-01 14:55:22 +00:00
|
|
|
netdev->hw_features |= NETIF_F_RXCSUM;
|
2017-05-15 17:55:20 -07:00
|
|
|
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
|
|
|
|
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
|
|
|
|
netdev->hw_features |= NETIF_F_SG;
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
2017-05-15 17:55:17 -07:00
|
|
|
if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
|
|
|
|
nn->cap & NFP_NET_CFG_CTRL_LSO2) {
|
2015-12-01 14:55:22 +00:00
|
|
|
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
|
2023-12-08 08:03:01 +02:00
|
|
|
if (nn->cap_w1 & NFP_NET_CFG_CTRL_USO)
|
|
|
|
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
|
2017-05-15 17:55:17 -07:00
|
|
|
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
|
|
|
|
NFP_NET_CFG_CTRL_LSO;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
2017-06-05 17:01:48 -07:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
|
2015-12-01 14:55:22 +00:00
|
|
|
netdev->hw_features |= NETIF_F_RXHASH;
|
2022-11-17 14:21:02 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_NFP_NET_IPSEC
|
|
|
|
if (nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC)
|
|
|
|
netdev->hw_features |= NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM;
|
|
|
|
#endif
|
|
|
|
|
2018-09-04 08:28:33 -07:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
|
2022-05-01 08:11:50 +09:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_LSO) {
|
|
|
|
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
|
|
|
|
NETIF_F_GSO_UDP_TUNNEL_CSUM |
|
|
|
|
NETIF_F_GSO_PARTIAL;
|
|
|
|
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
|
|
|
}
|
2020-07-14 12:18:19 -07:00
|
|
|
netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
|
2018-09-04 08:28:33 -07:00
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
2018-09-04 08:28:33 -07:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
|
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_LSO)
|
|
|
|
netdev->hw_features |= NETIF_F_GSO_GRE;
|
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
|
|
|
|
}
|
|
|
|
if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
|
|
|
|
netdev->hw_enc_features = netdev->hw_features;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
netdev->vlan_features = netdev->hw_features;
|
|
|
|
|
2022-07-02 09:35:50 +02:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN_ANY) {
|
2015-12-01 14:55:22 +00:00
|
|
|
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
|
2022-07-02 09:35:50 +02:00
|
|
|
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
|
|
|
|
NFP_NET_CFG_CTRL_RXVLAN;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
2022-07-02 09:35:51 +02:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
|
2017-05-15 17:55:17 -07:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
|
|
|
|
nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
|
|
|
|
} else {
|
|
|
|
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
|
2022-07-02 09:35:51 +02:00
|
|
|
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
|
|
|
|
NFP_NET_CFG_CTRL_TXVLAN;
|
2017-05-15 17:55:17 -07:00
|
|
|
}
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
2017-06-15 16:22:15 -07:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
|
|
|
|
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
|
|
|
|
}
|
2022-07-02 09:35:50 +02:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_RXQINQ) {
|
|
|
|
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
|
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
|
|
|
|
}
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
netdev->features = netdev->hw_features;
|
|
|
|
|
2018-02-07 20:55:23 -08:00
|
|
|
if (nfp_app_has_tc(nn->app) && nn->port)
|
2016-09-21 11:44:01 +01:00
|
|
|
netdev->hw_features |= NETIF_F_HW_TC;
|
|
|
|
|
2022-07-05 08:36:04 +01:00
|
|
|
/* C-Tag strip and S-Tag strip can't be supported simultaneously,
|
2022-07-02 09:35:50 +02:00
|
|
|
* so enable C-Tag strip and disable S-Tag strip by default.
|
|
|
|
*/
|
2022-07-05 08:36:04 +01:00
|
|
|
netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
|
|
|
|
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
drivers: net: turn on XDP features
A summary of the flags being set for various drivers is given below.
Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features
that can be turned off and on at runtime. This means that these flags
may be set and unset under RTNL lock protection by the driver. Hence,
READ_ONCE must be used by code loading the flag value.
Also, these flags are not used for synchronization against the availability
of XDP resources on a device. It is merely a hint, and hence the read
may race with the actual teardown of XDP resources on the device. This
may change in the future, e.g. operations taking a reference on the XDP
resources of the driver, and in turn inhibiting turning off this flag.
However, for now, it can only be used as a hint to check whether device
supports becoming a redirection target.
Turn 'hw-offload' feature flag on for:
- netronome (nfp)
- netdevsim.
Turn 'native' and 'zerocopy' features flags on for:
- intel (i40e, ice, ixgbe, igc)
- mellanox (mlx5).
- stmmac
- netronome (nfp)
Turn 'native' features flags on for:
- amazon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2, enetc)
- funeth
- intel (igb)
- marvell (mvneta, mvpp2, octeontx2)
- mellanox (mlx4)
- mtk_eth_soc
- qlogic (qede)
- sfc
- socionext (netsec)
- ti (cpsw)
- tap
- tsnep
- veth
- xen
- virtio_net.
Turn 'basic' (tx, pass, aborted and drop) features flags on for:
- netronome (nfp)
- cavium (thunder)
- hyperv.
Turn 'redirect_target' feature flag on for:
- amanzon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2)
- intel (i40e, ice, igb, ixgbe)
- ti (cpsw)
- marvell (mvneta, mvpp2)
- sfc
- socionext (netsec)
- qlogic (qede)
- mellanox (mlx5)
- tap
- veth
- virtio_net
- xen
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Co-developed-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Marek Majtyka <alardam@gmail.com>
Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-01 11:24:18 +01:00
|
|
|
netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
|
|
|
|
if (nn->app && nn->app->type->id == NFP_APP_BPF_NIC)
|
|
|
|
netdev->xdp_features |= NETDEV_XDP_ACT_HW_OFFLOAD;
|
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
/* Finalise the netdev setup */
|
2022-03-21 11:42:07 +01:00
|
|
|
switch (nn->dp.ops->version) {
|
|
|
|
case NFP_NFD_VER_NFD3:
|
|
|
|
netdev->netdev_ops = &nfp_nfd3_netdev_ops;
|
drivers: net: turn on XDP features
A summary of the flags being set for various drivers is given below.
Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features
that can be turned off and on at runtime. This means that these flags
may be set and unset under RTNL lock protection by the driver. Hence,
READ_ONCE must be used by code loading the flag value.
Also, these flags are not used for synchronization against the availability
of XDP resources on a device. It is merely a hint, and hence the read
may race with the actual teardown of XDP resources on the device. This
may change in the future, e.g. operations taking a reference on the XDP
resources of the driver, and in turn inhibiting turning off this flag.
However, for now, it can only be used as a hint to check whether device
supports becoming a redirection target.
Turn 'hw-offload' feature flag on for:
- netronome (nfp)
- netdevsim.
Turn 'native' and 'zerocopy' features flags on for:
- intel (i40e, ice, ixgbe, igc)
- mellanox (mlx5).
- stmmac
- netronome (nfp)
Turn 'native' features flags on for:
- amazon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2, enetc)
- funeth
- intel (igb)
- marvell (mvneta, mvpp2, octeontx2)
- mellanox (mlx4)
- mtk_eth_soc
- qlogic (qede)
- sfc
- socionext (netsec)
- ti (cpsw)
- tap
- tsnep
- veth
- xen
- virtio_net.
Turn 'basic' (tx, pass, aborted and drop) features flags on for:
- netronome (nfp)
- cavium (thunder)
- hyperv.
Turn 'redirect_target' feature flag on for:
- amanzon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2)
- intel (i40e, ice, igb, ixgbe)
- ti (cpsw)
- marvell (mvneta, mvpp2)
- sfc
- socionext (netsec)
- qlogic (qede)
- mellanox (mlx5)
- tap
- veth
- virtio_net
- xen
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Co-developed-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Marek Majtyka <alardam@gmail.com>
Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-01 11:24:18 +01:00
|
|
|
netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
|
2024-02-02 13:37:19 +02:00
|
|
|
netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
|
2022-03-21 11:42:07 +01:00
|
|
|
break;
|
2022-03-21 11:42:08 +01:00
|
|
|
case NFP_NFD_VER_NFDK:
|
|
|
|
netdev->netdev_ops = &nfp_nfdk_netdev_ops;
|
|
|
|
break;
|
2022-03-21 11:42:07 +01:00
|
|
|
}
|
|
|
|
|
2024-12-10 22:56:53 +00:00
|
|
|
netdev->watchdog_timeo = secs_to_jiffies(5);
|
2017-06-05 17:01:48 -07:00
|
|
|
|
|
|
|
/* MTU range: 68 - hw-specific max */
|
|
|
|
netdev->min_mtu = ETH_MIN_MTU;
|
|
|
|
netdev->max_mtu = nn->max_mtu;
|
|
|
|
|
2022-05-05 19:51:33 -07:00
|
|
|
netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
|
2018-02-07 20:55:25 -08:00
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
|
|
|
nfp_net_set_ethtool_ops(netdev);
|
|
|
|
}
|
|
|
|
|
2018-01-17 18:51:04 -08:00
|
|
|
static int nfp_net_read_caps(struct nfp_net *nn)
|
2017-06-05 17:01:48 -07:00
|
|
|
{
|
|
|
|
/* Get some of the read-only fields from the BAR */
|
|
|
|
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
|
2022-12-16 15:31:01 +01:00
|
|
|
nn->cap_w1 = nn_readl(nn, NFP_NET_CFG_CAP_WORD1);
|
2017-06-05 17:01:48 -07:00
|
|
|
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
|
|
|
|
|
2017-07-04 02:27:21 -07:00
|
|
|
/* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
|
|
|
|
* we allow use of non-chained metadata if RSS(v1) is the only
|
|
|
|
* advertised capability requiring metadata.
|
|
|
|
*/
|
2017-06-05 17:01:48 -07:00
|
|
|
nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
|
2017-06-05 17:01:50 -07:00
|
|
|
!nn->dp.netdev ||
|
2017-07-04 02:27:21 -07:00
|
|
|
!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
|
2017-06-05 17:01:48 -07:00
|
|
|
nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
|
2017-07-04 02:27:21 -07:00
|
|
|
/* RSS(v1) uses non-chained metadata format, except in ABI 4.x where
|
|
|
|
* it has the same meaning as RSSv2.
|
|
|
|
*/
|
2017-06-05 17:01:48 -07:00
|
|
|
if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
|
|
|
|
nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
|
|
|
|
|
|
|
|
/* Determine RX packet/metadata boundary offset */
|
|
|
|
if (nn->fw_ver.major >= 2) {
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
|
|
|
|
if (reg > NFP_NET_MAX_PREPEND) {
|
|
|
|
nn_err(nn, "Invalid rx offset: %d\n", reg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
nn->dp.rx_offset = reg;
|
|
|
|
} else {
|
|
|
|
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
|
|
|
|
}
|
|
|
|
|
2022-03-21 11:42:06 +01:00
|
|
|
/* Mask out NFD-version-specific features */
|
|
|
|
nn->cap &= nn->dp.ops->cap_mask;
|
|
|
|
|
2018-01-17 18:51:05 -08:00
|
|
|
/* For control vNICs mask out the capabilities app doesn't want. */
|
|
|
|
if (!nn->dp.netdev)
|
|
|
|
nn->cap &= nn->app->type->ctrl_cap_mask;
|
|
|
|
|
2018-01-17 18:51:04 -08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfp_net_init() - Initialise/finalise the nfp_net structure
|
|
|
|
* @nn: NFP Net device structure
|
|
|
|
*
|
|
|
|
* Return: 0 on success or negative errno on error.
|
|
|
|
*/
|
|
|
|
int nfp_net_init(struct nfp_net *nn)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
|
|
|
|
|
|
|
|
err = nfp_net_read_caps(nn);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
/* Set default MTU and Freelist buffer size */
|
2018-10-01 18:30:33 -07:00
|
|
|
if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
|
2019-08-27 22:36:28 -07:00
|
|
|
nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu);
|
2018-10-01 18:30:33 -07:00
|
|
|
} else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
|
2017-06-05 17:01:48 -07:00
|
|
|
nn->dp.mtu = nn->max_mtu;
|
2018-10-01 18:30:33 -07:00
|
|
|
} else {
|
2017-06-05 17:01:48 -07:00
|
|
|
nn->dp.mtu = NFP_NET_DEFAULT_MTU;
|
2018-10-01 18:30:33 -07:00
|
|
|
}
|
2017-06-05 17:01:48 -07:00
|
|
|
nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
|
|
|
|
|
2018-07-25 19:53:31 -07:00
|
|
|
if (nfp_app_ctrl_uses_data_vnics(nn->app))
|
|
|
|
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
|
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
|
|
|
|
nfp_net_rss_init(nn);
|
|
|
|
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
|
|
|
|
NFP_NET_CFG_CTRL_RSS;
|
|
|
|
}
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/* Allow L2 Broadcast and Multicast through by default, if supported */
|
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
|
2015-12-01 14:55:22 +00:00
|
|
|
|
|
|
|
/* Allow IRQ moderation, if supported */
|
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
|
|
|
|
nfp_net_irqmod_init(nn);
|
2017-03-10 10:38:27 -08:00
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
2022-03-21 11:42:05 +01:00
|
|
|
/* Enable TX pointer writeback, if supported */
|
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
|
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
|
|
|
|
|
2022-12-02 10:42:14 +01:00
|
|
|
if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
|
|
|
|
nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER;
|
|
|
|
|
2015-12-01 14:55:22 +00:00
|
|
|
/* Stash the re-configuration queue away. First odd queue in TX Bar */
|
|
|
|
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
|
|
|
|
|
|
|
|
/* Make sure the FW knows the netdev is supposed to be disabled here */
|
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL, 0);
|
|
|
|
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
|
|
|
|
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
|
2022-12-02 10:42:14 +01:00
|
|
|
nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, 0);
|
2015-12-01 14:55:22 +00:00
|
|
|
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
|
|
|
|
NFP_NET_CFG_UPDATE_GEN);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2019-06-05 14:11:36 -07:00
|
|
|
if (nn->dp.netdev) {
|
2019-04-11 20:27:06 -07:00
|
|
|
nfp_net_netdev_init(nn);
|
|
|
|
|
2019-06-10 21:40:05 -07:00
|
|
|
err = nfp_ccm_mbox_init(nn);
|
2019-06-05 14:11:36 -07:00
|
|
|
if (err)
|
|
|
|
return err;
|
2019-06-10 21:40:05 -07:00
|
|
|
|
|
|
|
err = nfp_net_tls_init(nn);
|
|
|
|
if (err)
|
|
|
|
goto err_clean_mbox;
|
2022-11-17 14:21:01 +01:00
|
|
|
|
|
|
|
nfp_net_ipsec_init(nn);
|
2019-06-05 14:11:36 -07:00
|
|
|
}
|
|
|
|
|
2017-05-22 10:59:23 -07:00
|
|
|
nfp_net_vecs_init(nn);
|
2015-12-01 14:55:22 +00:00
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
if (!nn->dp.netdev)
|
|
|
|
return 0;
|
2022-12-20 16:21:00 +01:00
|
|
|
|
2023-02-08 11:22:58 +01:00
|
|
|
spin_lock_init(&nn->mbox_amsg.lock);
|
|
|
|
INIT_LIST_HEAD(&nn->mbox_amsg.list);
|
|
|
|
INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
|
2022-12-20 16:21:00 +01:00
|
|
|
|
2023-11-17 09:11:13 +02:00
|
|
|
INIT_LIST_HEAD(&nn->fs.list);
|
|
|
|
|
2017-06-05 17:01:48 -07:00
|
|
|
return register_netdev(nn->dp.netdev);
|
2019-06-10 21:40:05 -07:00
|
|
|
|
|
|
|
err_clean_mbox:
|
|
|
|
nfp_ccm_mbox_clean(nn);
|
|
|
|
return err;
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-05-22 10:59:23 -07:00
|
|
|
* nfp_net_clean() - Undo what nfp_net_init() did.
|
|
|
|
* @nn: NFP Net device structure
|
2015-12-01 14:55:22 +00:00
|
|
|
*/
|
2017-05-22 10:59:23 -07:00
|
|
|
void nfp_net_clean(struct nfp_net *nn)
|
2015-12-01 14:55:22 +00:00
|
|
|
{
|
2017-06-05 17:01:48 -07:00
|
|
|
if (!nn->dp.netdev)
|
|
|
|
return;
|
|
|
|
|
2017-04-06 07:25:07 -07:00
|
|
|
unregister_netdev(nn->dp.netdev);
|
2022-11-17 14:21:01 +01:00
|
|
|
nfp_net_ipsec_clean(nn);
|
2019-06-10 21:40:05 -07:00
|
|
|
nfp_ccm_mbox_clean(nn);
|
2023-11-17 09:11:13 +02:00
|
|
|
nfp_net_fs_clean(nn);
|
2023-02-08 11:22:58 +01:00
|
|
|
flush_work(&nn->mbox_amsg.work);
|
2018-08-29 12:46:08 -07:00
|
|
|
nfp_net_reconfig_wait_posted(nn);
|
2015-12-01 14:55:22 +00:00
|
|
|
}
|