2018-03-22 10:08:48 -07:00
// SPDX-License-Identifier: GPL-2.0
2018-04-26 08:08:09 -07:00
/* Copyright(c) 2013 - 2018 Intel Corporation. */
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:48 -07:00
# include "iavf.h"
2018-09-14 17:37:56 -07:00
# include "iavf_prototype.h"
2018-09-14 17:37:48 -07:00
# include "iavf_client.h"
2018-09-14 17:37:46 -07:00
/* All iavf tracepoints are defined by the include below, which must
2017-04-13 04:45:44 -04:00
* be included exactly once across the whole kernel with
* CREATE_TRACE_POINTS defined
*/
# define CREATE_TRACE_POINTS
2018-09-14 17:37:54 -07:00
# include "iavf_trace.h"
2017-04-13 04:45:44 -04:00
2018-09-14 17:37:46 -07:00
static int iavf_setup_all_tx_resources ( struct iavf_adapter * adapter ) ;
static int iavf_setup_all_rx_resources ( struct iavf_adapter * adapter ) ;
static int iavf_close ( struct net_device * netdev ) ;
2021-08-19 08:47:49 +00:00
static void iavf_init_get_resources ( struct iavf_adapter * adapter ) ;
2019-05-14 10:37:07 -07:00
static int iavf_check_reset_complete ( struct iavf_hw * hw ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
char iavf_driver_name [ ] = " iavf " ;
static const char iavf_driver_string [ ] =
2018-09-14 17:37:44 -07:00
" Intel(R) Ethernet Adaptive Virtual Function Network Driver " ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
static const char iavf_copyright [ ] =
2018-09-14 17:37:44 -07:00
" Copyright (c) 2013 - 2018 Intel Corporation. " ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
/* iavf_pci_tbl - PCI Device ID Table
2013-12-21 06:12:45 +00:00
*
* Wildcard entries ( PCI_ANY_ID ) should come last
* Last entry must be all 0 s
*
* { Vendor ID , Device ID , SubVendor ID , SubDevice ID ,
* Class , Class Mask , private data ( not used ) }
*/
2018-09-14 17:37:46 -07:00
static const struct pci_device_id iavf_pci_tbl [ ] = {
2018-09-14 17:37:50 -07:00
{ PCI_VDEVICE ( INTEL , IAVF_DEV_ID_VF ) , 0 } ,
{ PCI_VDEVICE ( INTEL , IAVF_DEV_ID_VF_HV ) , 0 } ,
{ PCI_VDEVICE ( INTEL , IAVF_DEV_ID_X722_VF ) , 0 } ,
{ PCI_VDEVICE ( INTEL , IAVF_DEV_ID_ADAPTIVE_VF ) , 0 } ,
2013-12-21 06:12:45 +00:00
/* required last entry */
{ 0 , }
} ;
2018-09-14 17:37:46 -07:00
MODULE_DEVICE_TABLE ( pci , iavf_pci_tbl ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:44 -07:00
MODULE_ALIAS ( " i40evf " ) ;
2013-12-21 06:12:45 +00:00
MODULE_AUTHOR ( " Intel Corporation, <linux.nics@intel.com> " ) ;
2018-09-14 17:37:57 -07:00
MODULE_DESCRIPTION ( " Intel(R) Ethernet Adaptive Virtual Function Network Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
2013-12-21 06:12:45 +00:00
2019-05-14 10:37:07 -07:00
static const struct net_device_ops iavf_netdev_ops ;
2019-05-14 10:37:05 -07:00
struct workqueue_struct * iavf_wq ;
2015-12-22 14:25:08 -08:00
2021-09-15 08:41:23 +02:00
/**
* iavf_pdev_to_adapter - go from pci_dev to adapter
* @ pdev : pci_dev pointer
*/
static struct iavf_adapter * iavf_pdev_to_adapter ( struct pci_dev * pdev )
{
return netdev_priv ( pci_get_drvdata ( pdev ) ) ;
}
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
2013-12-21 06:12:45 +00:00
* @ hw : pointer to the HW structure
* @ mem : ptr to mem struct to fill out
* @ size : size of memory requested
* @ alignment : what to align the allocation to
* */
2019-04-17 15:17:30 -07:00
enum iavf_status iavf_allocate_dma_mem_d ( struct iavf_hw * hw ,
struct iavf_dma_mem * mem ,
u64 size , u32 alignment )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = ( struct iavf_adapter * ) hw - > back ;
2013-12-21 06:12:45 +00:00
if ( ! mem )
2019-04-17 15:17:33 -07:00
return IAVF_ERR_PARAM ;
2013-12-21 06:12:45 +00:00
mem - > size = ALIGN ( size , alignment ) ;
mem - > va = dma_alloc_coherent ( & adapter - > pdev - > dev , mem - > size ,
( dma_addr_t * ) & mem - > pa , GFP_KERNEL ) ;
if ( mem - > va )
return 0 ;
else
2019-04-17 15:17:33 -07:00
return IAVF_ERR_NO_MEMORY ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_free_dma_mem_d - OS specific memory free for shared code
2013-12-21 06:12:45 +00:00
* @ hw : pointer to the HW structure
* @ mem : ptr to mem struct to free
* */
2019-04-17 15:17:30 -07:00
enum iavf_status iavf_free_dma_mem_d ( struct iavf_hw * hw ,
struct iavf_dma_mem * mem )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = ( struct iavf_adapter * ) hw - > back ;
2013-12-21 06:12:45 +00:00
if ( ! mem | | ! mem - > va )
2019-04-17 15:17:33 -07:00
return IAVF_ERR_PARAM ;
2013-12-21 06:12:45 +00:00
dma_free_coherent ( & adapter - > pdev - > dev , mem - > size ,
mem - > va , ( dma_addr_t ) mem - > pa ) ;
return 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
2013-12-21 06:12:45 +00:00
* @ hw : pointer to the HW structure
* @ mem : ptr to mem struct to fill out
* @ size : size of memory requested
* */
2019-04-17 15:17:30 -07:00
enum iavf_status iavf_allocate_virt_mem_d ( struct iavf_hw * hw ,
struct iavf_virt_mem * mem , u32 size )
2013-12-21 06:12:45 +00:00
{
if ( ! mem )
2019-04-17 15:17:33 -07:00
return IAVF_ERR_PARAM ;
2013-12-21 06:12:45 +00:00
mem - > size = size ;
mem - > va = kzalloc ( size , GFP_KERNEL ) ;
if ( mem - > va )
return 0 ;
else
2019-04-17 15:17:33 -07:00
return IAVF_ERR_NO_MEMORY ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_free_virt_mem_d - OS specific memory free for shared code
2013-12-21 06:12:45 +00:00
* @ hw : pointer to the HW structure
* @ mem : ptr to mem struct to free
* */
2019-04-17 15:17:30 -07:00
enum iavf_status iavf_free_virt_mem_d ( struct iavf_hw * hw ,
struct iavf_virt_mem * mem )
2013-12-21 06:12:45 +00:00
{
if ( ! mem )
2019-04-17 15:17:33 -07:00
return IAVF_ERR_PARAM ;
2013-12-21 06:12:45 +00:00
/* it's ok to kfree a NULL pointer */
kfree ( mem - > va ) ;
return 0 ;
}
2021-03-16 11:01:41 +01:00
/**
2021-08-04 10:22:24 +02:00
* iavf_lock_timeout - try to lock mutex but give up after timeout
* @ lock : mutex that should be locked
2021-03-16 11:01:41 +01:00
* @ msecs : timeout in msecs
*
* Returns 0 on success , negative on failure
* */
iavf: Fix VLAN feature flags after VFR
When a VF goes through a reset, it's possible for the VF's feature set
to change. For example it may lose the VIRTCHNL_VF_OFFLOAD_VLAN
capability after VF reset. Unfortunately, the driver doesn't correctly
deal with this situation and errors are seen from downing/upping the
interface and/or moving the interface in/out of a network namespace.
When setting the interface down/up we see the following errors after the
VIRTCHNL_VF_OFFLOAD_VLAN capability was taken away from the VF:
ice 0000:51:00.1: VF 1 failed opcode 12, retval: -64 iavf 0000:51:09.1:
Failed to add VLAN filter, error IAVF_NOT_SUPPORTED ice 0000:51:00.1: VF
1 failed opcode 13, retval: -64 iavf 0000:51:09.1: Failed to delete VLAN
filter, error IAVF_NOT_SUPPORTED
These add/delete errors are happening because the VLAN filters are
tracked internally to the driver and regardless of the VLAN_ALLOWED()
setting the driver tries to delete/re-add them over virtchnl.
Fix the delete failure by making sure to delete any VLAN filter tracking
in the driver when a removal request is made, while preventing the
virtchnl request. This makes it so the driver's VLAN list is up to date
and the errors are
Fix the add failure by making sure the check for VLAN_ALLOWED() during
reset is done after the VF receives its capability list from the PF via
VIRTCHNL_OP_GET_VF_RESOURCES. If VLAN functionality is not allowed, then
prevent requesting re-adding the filters over virtchnl.
When moving the interface into a network namespace we see the following
errors after the VIRTCHNL_VF_OFFLOAD_VLAN capability was taken away from
the VF:
iavf 0000:51:09.1 enp81s0f1v1: NIC Link is Up Speed is 25 Gbps Full Duplex
iavf 0000:51:09.1 temp_27: renamed from enp81s0f1v1
iavf 0000:51:09.1 mgmt: renamed from temp_27
iavf 0000:51:09.1 dev27: set_features() failed (-22); wanted 0x020190001fd54833, left 0x020190001fd54bb3
These errors are happening because we aren't correctly updating the
netdev capabilities and dealing with ndo_fix_features() and
ndo_set_features() correctly.
Fix this by only reporting errors in the driver's ndo_set_features()
callback when VIRTCHNL_VF_OFFLOAD_VLAN is not allowed and any attempt to
enable the VLAN features is made. Also, make sure to disable VLAN
insertion, filtering, and stripping since the VIRTCHNL_VF_OFFLOAD_VLAN
flag applies to all of them and not just VLAN stripping.
Also, after we process the capabilities in the VF reset path, make sure
to call netdev_update_features() in case the capabilities have changed
in order to update the netdev's feature set to match the VF's actual
capabilities.
Lastly, make sure to always report success on VLAN filter delete when
VIRTCHNL_VF_OFFLOAD_VLAN is not supported. The changed flow in
iavf_del_vlans() allows the stack to delete previosly existing VLAN
filters even if VLAN filtering is not allowed. This makes it so the VLAN
filter list is up to date.
Fixes: 8774370d268f ("i40e/i40evf: support for VF VLAN tag stripping control")
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-05 09:20:25 -07:00
int iavf_lock_timeout ( struct mutex * lock , unsigned int msecs )
2021-03-16 11:01:41 +01:00
{
unsigned int wait , delay = 10 ;
for ( wait = 0 ; wait < msecs ; wait + = delay ) {
2021-08-04 10:22:24 +02:00
if ( mutex_trylock ( lock ) )
2021-03-16 11:01:41 +01:00
return 0 ;
msleep ( delay ) ;
}
return - 1 ;
}
2016-01-15 14:33:10 -08:00
/**
2018-09-14 17:37:46 -07:00
* iavf_schedule_reset - Set the flags and schedule a reset event
2016-01-15 14:33:10 -08:00
* @ adapter : board private structure
* */
2018-09-14 17:37:46 -07:00
void iavf_schedule_reset ( struct iavf_adapter * adapter )
2016-01-15 14:33:10 -08:00
{
if ( ! ( adapter - > flags &
2018-09-14 17:37:46 -07:00
( IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED ) ) ) {
adapter - > flags | = IAVF_FLAG_RESET_NEEDED ;
2019-05-14 10:37:05 -07:00
queue_work ( iavf_wq , & adapter - > reset_task ) ;
2016-01-15 14:33:10 -08:00
}
}
2021-09-15 09:01:00 +00:00
/**
* iavf_schedule_request_stats - Set the flags and schedule statistics request
* @ adapter : board private structure
*
* Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task ( ) will explicitly
* request and refresh ethtool stats
* */
void iavf_schedule_request_stats ( struct iavf_adapter * adapter )
{
adapter - > aq_required | = IAVF_FLAG_AQ_REQUEST_STATS ;
mod_delayed_work ( iavf_wq , & adapter - > watchdog_task , 0 ) ;
}
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_tx_timeout - Respond to a Tx Hang
2013-12-21 06:12:45 +00:00
* @ netdev : network interface device structure
2020-09-25 15:24:37 -07:00
* @ txqueue : queue number that is timing out
2013-12-21 06:12:45 +00:00
* */
netdev: pass the stuck queue to the timeout handler
This allows incrementing the correct timeout statistic without any mess.
Down the road, devices can learn to reset just the specific queue.
The patch was generated with the following script:
use strict;
use warnings;
our $^I = '.bak';
my @work = (
["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"],
["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"],
["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"],
["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"],
["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"],
["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"],
["drivers/net/appletalk/cops.c", "cops_timeout"],
["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"],
["drivers/net/arcnet/arcnet.c", "arcnet_timeout"],
["drivers/net/arcnet/com20020.c", "arcnet_timeout"],
["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"],
["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"],
["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"],
["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"],
["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"],
["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"],
["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"],
["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"],
["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"],
["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"],
["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"],
["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"],
["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"],
["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"],
["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"],
["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"],
["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"],
["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"],
["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"],
["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"],
["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"],
["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"],
["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"],
["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"],
["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"],
["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"],
["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"],
["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"],
["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"],
["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"],
["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"],
["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"],
["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"],
["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"],
["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"],
["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"],
["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"],
["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"],
["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"],
["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"],
["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"],
["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"],
["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"],
["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"],
["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"],
["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"],
["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"],
["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"],
["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"],
["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"],
["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"],
["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"],
["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"],
["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"],
["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"],
["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"],
["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"],
["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"],
["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"],
["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"],
["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"],
["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"],
["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"],
["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"],
["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"],
["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"],
["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"],
["drivers/net/ethernet/jme.c", "jme_tx_timeout"],
["drivers/net/ethernet/korina.c", "korina_tx_timeout"],
["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"],
["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"],
["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"],
["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"],
["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"],
["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"],
["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"],
["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"],
["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"],
["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"],
["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"],
["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"],
["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"],
["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"],
["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"],
["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"],
["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"],
["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"],
["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"],
["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"],
["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"],
["drivers/net/ethernet/realtek/atp.c", "tx_timeout"],
["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"],
["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"],
["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"],
["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"],
["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"],
["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"],
["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"],
["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"],
["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"],
["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"],
["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"],
["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"],
["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"],
["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"],
["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"],
["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"],
["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"],
["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"],
["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"],
["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"],
["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"],
["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"],
["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"],
["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"],
["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"],
["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"],
["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"],
["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"],
["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"],
["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"],
["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"],
["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"],
["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"],
["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"],
["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"],
["drivers/net/slip/slip.c", "sl_tx_timeout"],
["include/linux/usb/usbnet.h", "usbnet_tx_timeout"],
["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"],
["drivers/net/usb/catc.c", "catc_tx_timeout"],
["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"],
["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"],
["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"],
["drivers/net/usb/hso.c", "hso_net_tx_timeout"],
["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"],
["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"],
["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"],
["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"],
["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"],
["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"],
["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"],
["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"],
["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"],
["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"],
["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"],
["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"],
["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"],
["drivers/net/wan/cosa.c", "cosa_net_timeout"],
["drivers/net/wan/farsync.c", "fst_tx_timeout"],
["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"],
["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"],
["drivers/net/wan/x25_asy.c", "x25_asy_timeout"],
["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"],
["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"],
["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"],
["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"],
["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"],
["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"],
["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"],
["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"],
["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"],
["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"],
["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"],
["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"],
["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"],
["drivers/tty/synclink.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"],
["net/atm/lec.c", "lec_tx_timeout"],
["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"]
);
for my $p (@work) {
my @pair = @$p;
my $file = $pair[0];
my $func = $pair[1];
print STDERR $file , ": ", $func,"\n";
our @ARGV = ($file);
while (<ARGV>) {
if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) {
print STDERR "found $1+$2 in $file\n";
}
if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) {
print STDERR "$func found in $file\n";
}
print;
}
}
where the list of files and functions is simply from:
git grep ndo_tx_timeout, with manual addition of headers
in the rare cases where the function is from a header,
then manually changing the few places which actually
call ndo_tx_timeout.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Heiner Kallweit <hkallweit1@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Shannon Nelson <snelson@pensando.io>
Reviewed-by: Martin Habets <mhabets@solarflare.com>
changes from v9:
fixup a forward declaration
changes from v9:
more leftovers from v3 change
changes from v8:
fix up a missing direct call to timeout
rebased on net-next
changes from v7:
fixup leftovers from v3 change
changes from v6:
fix typo in rtl driver
changes from v5:
add missing files (allow any net device argument name)
changes from v4:
add a missing driver header
changes from v3:
change queue # to unsigned
Changes from v2:
added headers
Changes from v1:
Fix errors found by kbuild:
generalize the pattern a bit, to pick up
a couple of instances missed by the previous
version.
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 09:23:51 -05:00
static void iavf_tx_timeout ( struct net_device * netdev , unsigned int txqueue )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2013-12-21 06:12:45 +00:00
adapter - > tx_timeout_count + + ;
2018-09-14 17:37:46 -07:00
iavf_schedule_reset ( adapter ) ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_misc_irq_disable - Mask off interrupt generation on the NIC
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* */
2018-09-14 17:37:46 -07:00
static void iavf_misc_irq_disable ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2014-11-11 20:02:42 +00:00
2016-11-08 13:05:08 -08:00
if ( ! adapter - > msix_entries )
return ;
2018-09-14 17:37:49 -07:00
wr32 ( hw , IAVF_VFINT_DYN_CTL01 , 0 ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:49 -07:00
iavf_flush ( hw ) ;
2013-12-21 06:12:45 +00:00
synchronize_irq ( adapter - > msix_entries [ 0 ] . vector ) ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_misc_irq_enable - Enable default interrupt generation settings
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* */
2018-09-14 17:37:46 -07:00
static void iavf_misc_irq_enable ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2014-11-11 20:02:42 +00:00
2018-09-14 17:37:49 -07:00
wr32 ( hw , IAVF_VFINT_DYN_CTL01 , IAVF_VFINT_DYN_CTL01_INTENA_MASK |
IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK ) ;
wr32 ( hw , IAVF_VFINT_ICR0_ENA1 , IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:49 -07:00
iavf_flush ( hw ) ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_irq_disable - Mask off interrupt generation on the NIC
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* */
2018-09-14 17:37:46 -07:00
static void iavf_irq_disable ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int i ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2013-12-21 06:12:45 +00:00
2014-02-20 19:29:07 -08:00
if ( ! adapter - > msix_entries )
return ;
2013-12-21 06:12:45 +00:00
for ( i = 1 ; i < adapter - > num_msix_vectors ; i + + ) {
2018-09-14 17:37:49 -07:00
wr32 ( hw , IAVF_VFINT_DYN_CTLN1 ( i - 1 ) , 0 ) ;
2013-12-21 06:12:45 +00:00
synchronize_irq ( adapter - > msix_entries [ i ] . vector ) ;
}
2018-09-14 17:37:49 -07:00
iavf_flush ( hw ) ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_irq_enable_queues - Enable interrupt for specified queues
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* @ mask : bitmap of queues to enable
* */
2018-09-14 17:37:46 -07:00
void iavf_irq_enable_queues ( struct iavf_adapter * adapter , u32 mask )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2013-12-21 06:12:45 +00:00
int i ;
for ( i = 1 ; i < adapter - > num_msix_vectors ; i + + ) {
2015-06-04 16:24:02 -04:00
if ( mask & BIT ( i - 1 ) ) {
2018-09-14 17:37:49 -07:00
wr32 ( hw , IAVF_VFINT_DYN_CTLN1 ( i - 1 ) ,
IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK ) ;
2013-12-21 06:12:45 +00:00
}
}
}
/**
2018-09-14 17:37:46 -07:00
* iavf_irq_enable - Enable default interrupt generation settings
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
2015-10-13 01:06:27 -06:00
* @ flush : boolean value whether to run rd32 ( )
2013-12-21 06:12:45 +00:00
* */
2018-09-14 17:37:46 -07:00
void iavf_irq_enable ( struct iavf_adapter * adapter , bool flush )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
iavf_misc_irq_enable ( adapter ) ;
iavf_irq_enable_queues ( adapter , ~ 0 ) ;
2013-12-21 06:12:45 +00:00
if ( flush )
2018-09-14 17:37:49 -07:00
iavf_flush ( hw ) ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_msix_aq - Interrupt handler for vector 0
2013-12-21 06:12:45 +00:00
* @ irq : interrupt number
* @ data : pointer to netdev
* */
2018-09-14 17:37:46 -07:00
static irqreturn_t iavf_msix_aq ( int irq , void * data )
2013-12-21 06:12:45 +00:00
{
struct net_device * netdev = data ;
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2013-12-21 06:12:45 +00:00
2015-10-04 01:09:49 -07:00
/* handle non-queue interrupts, these reads clear the registers */
2018-09-14 17:37:49 -07:00
rd32 ( hw , IAVF_VFINT_ICR01 ) ;
rd32 ( hw , IAVF_VFINT_ICR0_ENA1 ) ;
2013-12-21 06:12:45 +00:00
/* schedule work on the private workqueue */
2019-05-14 10:37:05 -07:00
queue_work ( iavf_wq , & adapter - > adminq_task ) ;
2013-12-21 06:12:45 +00:00
return IRQ_HANDLED ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_msix_clean_rings - MSIX mode Interrupt Handler
2013-12-21 06:12:45 +00:00
* @ irq : interrupt number
* @ data : pointer to a q_vector
* */
2018-09-14 17:37:46 -07:00
static irqreturn_t iavf_msix_clean_rings ( int irq , void * data )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:55 -07:00
struct iavf_q_vector * q_vector = data ;
2013-12-21 06:12:45 +00:00
if ( ! q_vector - > tx . ring & & ! q_vector - > rx . ring )
return IRQ_HANDLED ;
2015-09-29 15:19:50 -07:00
napi_schedule_irqoff ( & q_vector - > napi ) ;
2013-12-21 06:12:45 +00:00
return IRQ_HANDLED ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_map_vector_to_rxq - associate irqs with rx queues
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* @ v_idx : interrupt number
* @ r_idx : queue number
* */
static void
2018-09-14 17:37:46 -07:00
iavf_map_vector_to_rxq ( struct iavf_adapter * adapter , int v_idx , int r_idx )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:55 -07:00
struct iavf_q_vector * q_vector = & adapter - > q_vectors [ v_idx ] ;
struct iavf_ring * rx_ring = & adapter - > rx_rings [ r_idx ] ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2013-12-21 06:12:45 +00:00
rx_ring - > q_vector = q_vector ;
rx_ring - > next = q_vector - > rx . ring ;
rx_ring - > vsi = & adapter - > vsi ;
q_vector - > rx . ring = rx_ring ;
q_vector - > rx . count + + ;
2017-12-29 08:52:19 -05:00
q_vector - > rx . next_update = jiffies + 1 ;
2017-12-29 08:51:25 -05:00
q_vector - > rx . target_itr = ITR_TO_REG ( rx_ring - > itr_setting ) ;
2016-09-12 14:18:38 -07:00
q_vector - > ring_mask | = BIT ( r_idx ) ;
2018-09-14 17:37:55 -07:00
wr32 ( hw , IAVF_VFINT_ITRN1 ( IAVF_RX_ITR , q_vector - > reg_idx ) ,
2019-11-05 04:22:14 -08:00
q_vector - > rx . current_itr > > 1 ) ;
2017-12-29 08:51:25 -05:00
q_vector - > rx . current_itr = q_vector - > rx . target_itr ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_map_vector_to_txq - associate irqs with tx queues
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* @ v_idx : interrupt number
* @ t_idx : queue number
* */
static void
2018-09-14 17:37:46 -07:00
iavf_map_vector_to_txq ( struct iavf_adapter * adapter , int v_idx , int t_idx )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:55 -07:00
struct iavf_q_vector * q_vector = & adapter - > q_vectors [ v_idx ] ;
struct iavf_ring * tx_ring = & adapter - > tx_rings [ t_idx ] ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2013-12-21 06:12:45 +00:00
tx_ring - > q_vector = q_vector ;
tx_ring - > next = q_vector - > tx . ring ;
tx_ring - > vsi = & adapter - > vsi ;
q_vector - > tx . ring = tx_ring ;
q_vector - > tx . count + + ;
2017-12-29 08:52:19 -05:00
q_vector - > tx . next_update = jiffies + 1 ;
2017-12-29 08:51:25 -05:00
q_vector - > tx . target_itr = ITR_TO_REG ( tx_ring - > itr_setting ) ;
2013-12-21 06:12:45 +00:00
q_vector - > num_ringpairs + + ;
2018-09-14 17:37:55 -07:00
wr32 ( hw , IAVF_VFINT_ITRN1 ( IAVF_TX_ITR , q_vector - > reg_idx ) ,
2019-11-05 04:22:14 -08:00
q_vector - > tx . target_itr > > 1 ) ;
2017-12-29 08:51:25 -05:00
q_vector - > tx . current_itr = q_vector - > tx . target_itr ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_map_rings_to_vectors - Maps descriptor rings to vectors
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure to initialize
*
* This function maps descriptor rings to the queue - specific vectors
* we were allotted through the MSI - X enabling code . Ideally , we ' d have
* one vector per ring / queue , but on a constrained vector budget , we
* group the rings as " efficiently " as possible . You would add new
* mapping configurations in here .
* */
2018-09-14 17:37:46 -07:00
static void iavf_map_rings_to_vectors ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
i40evf: fix ring to vector mapping
The current implementation for mapping queues to vectors is broken
because it attempts to map each Tx and Rx ring to its own vector,
however we use combined queues so we should actually be mapping the
Tx/Rx rings together on one vector.
Also in the current implementation, in the case where we have more
queues than vectors, we attempt to group the queues together into
'chunks' and map each 'chunk' of queues to a vector. Chunking them
together would be more ideal if, and only if, we only had RSS because of
the way the hashing algorithm works but in the case of a future patch
that enables VF ADq, round robin assignment is better and still works
with RSS.
This patch resolves both those issues and simplifies the code needed to
accomplish this. Instead of treating the case where we have more queues
than vectors as special, if we notice our vector index is greater than
vectors, reset the vector index to zero and continue mapping. This
should ensure that in both cases, whether we have enough vectors for
each queue or not, the queues get appropriately mapped.
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-07-14 09:27:07 -04:00
int rings_remaining = adapter - > num_active_queues ;
int ridx = 0 , vidx = 0 ;
2013-12-21 06:12:45 +00:00
int q_vectors ;
q_vectors = adapter - > num_msix_vectors - NONQ_VECS ;
i40evf: fix ring to vector mapping
The current implementation for mapping queues to vectors is broken
because it attempts to map each Tx and Rx ring to its own vector,
however we use combined queues so we should actually be mapping the
Tx/Rx rings together on one vector.
Also in the current implementation, in the case where we have more
queues than vectors, we attempt to group the queues together into
'chunks' and map each 'chunk' of queues to a vector. Chunking them
together would be more ideal if, and only if, we only had RSS because of
the way the hashing algorithm works but in the case of a future patch
that enables VF ADq, round robin assignment is better and still works
with RSS.
This patch resolves both those issues and simplifies the code needed to
accomplish this. Instead of treating the case where we have more queues
than vectors as special, if we notice our vector index is greater than
vectors, reset the vector index to zero and continue mapping. This
should ensure that in both cases, whether we have enough vectors for
each queue or not, the queues get appropriately mapped.
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-07-14 09:27:07 -04:00
for ( ; ridx < rings_remaining ; ridx + + ) {
2018-09-14 17:37:46 -07:00
iavf_map_vector_to_rxq ( adapter , vidx , ridx ) ;
iavf_map_vector_to_txq ( adapter , vidx , ridx ) ;
2013-12-21 06:12:45 +00:00
i40evf: fix ring to vector mapping
The current implementation for mapping queues to vectors is broken
because it attempts to map each Tx and Rx ring to its own vector,
however we use combined queues so we should actually be mapping the
Tx/Rx rings together on one vector.
Also in the current implementation, in the case where we have more
queues than vectors, we attempt to group the queues together into
'chunks' and map each 'chunk' of queues to a vector. Chunking them
together would be more ideal if, and only if, we only had RSS because of
the way the hashing algorithm works but in the case of a future patch
that enables VF ADq, round robin assignment is better and still works
with RSS.
This patch resolves both those issues and simplifies the code needed to
accomplish this. Instead of treating the case where we have more queues
than vectors as special, if we notice our vector index is greater than
vectors, reset the vector index to zero and continue mapping. This
should ensure that in both cases, whether we have enough vectors for
each queue or not, the queues get appropriately mapped.
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-07-14 09:27:07 -04:00
/* In the case where we have more queues than vectors, continue
* round - robin on vectors until all queues are mapped .
*/
if ( + + vidx > = q_vectors )
vidx = 0 ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_MAP_VECTORS ;
2013-12-21 06:12:45 +00:00
}
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
/**
2018-09-14 17:37:46 -07:00
* iavf_irq_affinity_notify - Callback for affinity changes
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
* @ notify : context as to what irq was changed
* @ mask : the new affinity mask
*
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks .
* */
2018-09-14 17:37:46 -07:00
static void iavf_irq_affinity_notify ( struct irq_affinity_notify * notify ,
const cpumask_t * mask )
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
{
2018-09-14 17:37:55 -07:00
struct iavf_q_vector * q_vector =
container_of ( notify , struct iavf_q_vector , affinity_notify ) ;
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
2017-07-12 05:46:05 -04:00
cpumask_copy ( & q_vector - > affinity_mask , mask ) ;
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_irq_affinity_release - Callback for affinity notifier release
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
* @ ref : internal core kernel usage
*
* This is a callback function used by the irq_set_affinity_notifier function
* to inform the current notification subscriber that they will no longer
* receive notifications .
* */
2018-09-14 17:37:46 -07:00
static void iavf_irq_affinity_release ( struct kref * ref ) { }
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_request_traffic_irqs - Initialize MSI - X interrupts
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
2018-04-20 01:41:33 -07:00
* @ basename : device basename
2013-12-21 06:12:45 +00:00
*
* Allocates MSI - X vectors for tx and rx handling , and requests
* interrupts from the kernel .
* */
static int
2018-09-14 17:37:46 -07:00
iavf_request_traffic_irqs ( struct iavf_adapter * adapter , char * basename )
2013-12-21 06:12:45 +00:00
{
2017-07-12 05:46:11 -04:00
unsigned int vector , q_vectors ;
unsigned int rx_int_idx = 0 , tx_int_idx = 0 ;
int irq_num , err ;
i40e/i40evf: spread CPU affinity hints across online CPUs only
Currently, when setting up the IRQ for a q_vector, we set an affinity
hint based on the v_idx of that q_vector. Meaning a loop iterates on
v_idx, which is an incremental value, and the cpumask is created based
on this value.
This is a problem in systems with multiple logical CPUs per core (like in
simultaneous multithreading (SMT) scenarios). If we disable some logical
CPUs, by turning SMT off for example, we will end up with a sparse
cpu_online_mask, i.e., only the first CPU in a core is online, and
incremental filling in q_vector cpumask might lead to multiple offline
CPUs being assigned to q_vectors.
Example: if we have a system with 8 cores each one containing 8 logical
CPUs (SMT == 8 in this case), we have 64 CPUs in total. But if SMT is
disabled, only the 1st CPU in each core remains online, so the
cpu_online_mask in this case would have only 8 bits set, in a sparse way.
In general case, when SMT is off the cpu_online_mask has only C bits set:
0, 1*N, 2*N, ..., C*(N-1) where
C == # of cores;
N == # of logical CPUs per core.
In our example, only bits 0, 8, 16, 24, 32, 40, 48, 56 would be set.
Instead, we should only assign hints for CPUs which are online. Even
better, the kernel already provides a function, cpumask_local_spread()
which takes an index and returns a CPU, spreading the interrupts across
local NUMA nodes first, and then remote ones if necessary.
Since we generally have a 1:1 mapping between vectors and CPUs, there
is no real advantage to spreading vectors to local CPUs first. In order
to avoid mismatch of the default XPS hints, we'll pass -1 so that it
spreads across all CPUs without regard to the node locality.
Note that we don't need to change the q_vector->affinity_mask as this is
initialized to cpu_possible_mask, until an actual affinity is set and
then notified back to us.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-08-29 05:32:31 -04:00
int cpu ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
iavf_irq_disable ( adapter ) ;
2013-12-21 06:12:45 +00:00
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter - > num_msix_vectors - NONQ_VECS ;
for ( vector = 0 ; vector < q_vectors ; vector + + ) {
2018-09-14 17:37:55 -07:00
struct iavf_q_vector * q_vector = & adapter - > q_vectors [ vector ] ;
2018-09-14 17:37:47 -07:00
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
irq_num = adapter - > msix_entries [ vector + NONQ_VECS ] . vector ;
2013-12-21 06:12:45 +00:00
if ( q_vector - > tx . ring & & q_vector - > rx . ring ) {
2017-07-12 05:46:11 -04:00
snprintf ( q_vector - > name , sizeof ( q_vector - > name ) ,
2021-08-31 13:39:01 +02:00
" iavf-%s-TxRx-%u " , basename , rx_int_idx + + ) ;
2013-12-21 06:12:45 +00:00
tx_int_idx + + ;
} else if ( q_vector - > rx . ring ) {
2017-07-12 05:46:11 -04:00
snprintf ( q_vector - > name , sizeof ( q_vector - > name ) ,
2021-08-31 13:39:01 +02:00
" iavf-%s-rx-%u " , basename , rx_int_idx + + ) ;
2013-12-21 06:12:45 +00:00
} else if ( q_vector - > tx . ring ) {
2017-07-12 05:46:11 -04:00
snprintf ( q_vector - > name , sizeof ( q_vector - > name ) ,
2021-08-31 13:39:01 +02:00
" iavf-%s-tx-%u " , basename , tx_int_idx + + ) ;
2013-12-21 06:12:45 +00:00
} else {
/* skip this unused q_vector */
continue ;
}
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
err = request_irq ( irq_num ,
2018-09-14 17:37:46 -07:00
iavf_msix_clean_rings ,
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
0 ,
q_vector - > name ,
q_vector ) ;
2013-12-21 06:12:45 +00:00
if ( err ) {
dev_info ( & adapter - > pdev - > dev ,
2015-08-26 15:14:17 -04:00
" Request_irq failed, error: %d \n " , err ) ;
2013-12-21 06:12:45 +00:00
goto free_queue_irqs ;
}
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
/* register for affinity change notifications */
2018-09-14 17:37:46 -07:00
q_vector - > affinity_notify . notify = iavf_irq_affinity_notify ;
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
q_vector - > affinity_notify . release =
2018-09-14 17:37:46 -07:00
iavf_irq_affinity_release ;
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
irq_set_affinity_notifier ( irq_num , & q_vector - > affinity_notify ) ;
i40e/i40evf: spread CPU affinity hints across online CPUs only
Currently, when setting up the IRQ for a q_vector, we set an affinity
hint based on the v_idx of that q_vector. Meaning a loop iterates on
v_idx, which is an incremental value, and the cpumask is created based
on this value.
This is a problem in systems with multiple logical CPUs per core (like in
simultaneous multithreading (SMT) scenarios). If we disable some logical
CPUs, by turning SMT off for example, we will end up with a sparse
cpu_online_mask, i.e., only the first CPU in a core is online, and
incremental filling in q_vector cpumask might lead to multiple offline
CPUs being assigned to q_vectors.
Example: if we have a system with 8 cores each one containing 8 logical
CPUs (SMT == 8 in this case), we have 64 CPUs in total. But if SMT is
disabled, only the 1st CPU in each core remains online, so the
cpu_online_mask in this case would have only 8 bits set, in a sparse way.
In general case, when SMT is off the cpu_online_mask has only C bits set:
0, 1*N, 2*N, ..., C*(N-1) where
C == # of cores;
N == # of logical CPUs per core.
In our example, only bits 0, 8, 16, 24, 32, 40, 48, 56 would be set.
Instead, we should only assign hints for CPUs which are online. Even
better, the kernel already provides a function, cpumask_local_spread()
which takes an index and returns a CPU, spreading the interrupts across
local NUMA nodes first, and then remote ones if necessary.
Since we generally have a 1:1 mapping between vectors and CPUs, there
is no real advantage to spreading vectors to local CPUs first. In order
to avoid mismatch of the default XPS hints, we'll pass -1 so that it
spreads across all CPUs without regard to the node locality.
Note that we don't need to change the q_vector->affinity_mask as this is
initialized to cpu_possible_mask, until an actual affinity is set and
then notified back to us.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-08-29 05:32:31 -04:00
/* Spread the IRQ affinity hints across online CPUs. Note that
* get_cpu_mask returns a mask with a permanent lifetime so
* it ' s safe to use as a hint for irq_set_affinity_hint .
2017-07-14 09:10:10 -04:00
*/
i40e/i40evf: spread CPU affinity hints across online CPUs only
Currently, when setting up the IRQ for a q_vector, we set an affinity
hint based on the v_idx of that q_vector. Meaning a loop iterates on
v_idx, which is an incremental value, and the cpumask is created based
on this value.
This is a problem in systems with multiple logical CPUs per core (like in
simultaneous multithreading (SMT) scenarios). If we disable some logical
CPUs, by turning SMT off for example, we will end up with a sparse
cpu_online_mask, i.e., only the first CPU in a core is online, and
incremental filling in q_vector cpumask might lead to multiple offline
CPUs being assigned to q_vectors.
Example: if we have a system with 8 cores each one containing 8 logical
CPUs (SMT == 8 in this case), we have 64 CPUs in total. But if SMT is
disabled, only the 1st CPU in each core remains online, so the
cpu_online_mask in this case would have only 8 bits set, in a sparse way.
In general case, when SMT is off the cpu_online_mask has only C bits set:
0, 1*N, 2*N, ..., C*(N-1) where
C == # of cores;
N == # of logical CPUs per core.
In our example, only bits 0, 8, 16, 24, 32, 40, 48, 56 would be set.
Instead, we should only assign hints for CPUs which are online. Even
better, the kernel already provides a function, cpumask_local_spread()
which takes an index and returns a CPU, spreading the interrupts across
local NUMA nodes first, and then remote ones if necessary.
Since we generally have a 1:1 mapping between vectors and CPUs, there
is no real advantage to spreading vectors to local CPUs first. In order
to avoid mismatch of the default XPS hints, we'll pass -1 so that it
spreads across all CPUs without regard to the node locality.
Note that we don't need to change the q_vector->affinity_mask as this is
initialized to cpu_possible_mask, until an actual affinity is set and
then notified back to us.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-08-29 05:32:31 -04:00
cpu = cpumask_local_spread ( q_vector - > v_idx , - 1 ) ;
irq_set_affinity_hint ( irq_num , get_cpu_mask ( cpu ) ) ;
2013-12-21 06:12:45 +00:00
}
return 0 ;
free_queue_irqs :
while ( vector ) {
vector - - ;
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
irq_num = adapter - > msix_entries [ vector + NONQ_VECS ] . vector ;
irq_set_affinity_notifier ( irq_num , NULL ) ;
irq_set_affinity_hint ( irq_num , NULL ) ;
free_irq ( irq_num , & adapter - > q_vectors [ vector ] ) ;
2013-12-21 06:12:45 +00:00
}
return err ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_request_misc_irq - Initialize MSI - X interrupts
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* Allocates MSI - X vector 0 and requests interrupts from the kernel . This
* vector is only for the admin queue , and stays active even when the netdev
* is closed .
* */
2018-09-14 17:37:46 -07:00
static int iavf_request_misc_irq ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
struct net_device * netdev = adapter - > netdev ;
int err ;
2014-08-01 13:27:08 -07:00
snprintf ( adapter - > misc_vector_name ,
2018-09-14 17:37:46 -07:00
sizeof ( adapter - > misc_vector_name ) - 1 , " iavf-%s:mbx " ,
2015-02-06 08:52:20 +00:00
dev_name ( & adapter - > pdev - > dev ) ) ;
2013-12-21 06:12:45 +00:00
err = request_irq ( adapter - > msix_entries [ 0 ] . vector ,
2018-09-14 17:37:46 -07:00
& iavf_msix_aq , 0 ,
2014-02-13 03:48:51 -08:00
adapter - > misc_vector_name , netdev ) ;
2013-12-21 06:12:45 +00:00
if ( err ) {
dev_err ( & adapter - > pdev - > dev ,
2014-02-20 19:29:17 -08:00
" request_irq for %s failed: %d \n " ,
adapter - > misc_vector_name , err ) ;
2013-12-21 06:12:45 +00:00
free_irq ( adapter - > msix_entries [ 0 ] . vector , netdev ) ;
}
return err ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_free_traffic_irqs - Free MSI - X interrupts
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* Frees all MSI - X vectors other than 0.
* */
2018-09-14 17:37:46 -07:00
static void iavf_free_traffic_irqs ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
int vector , irq_num , q_vectors ;
2014-11-11 20:02:42 +00:00
2016-11-08 13:05:05 -08:00
if ( ! adapter - > msix_entries )
return ;
2013-12-21 06:12:45 +00:00
q_vectors = adapter - > num_msix_vectors - NONQ_VECS ;
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-14 16:24:38 -07:00
for ( vector = 0 ; vector < q_vectors ; vector + + ) {
irq_num = adapter - > msix_entries [ vector + NONQ_VECS ] . vector ;
irq_set_affinity_notifier ( irq_num , NULL ) ;
irq_set_affinity_hint ( irq_num , NULL ) ;
free_irq ( irq_num , & adapter - > q_vectors [ vector ] ) ;
2013-12-21 06:12:45 +00:00
}
}
/**
2018-09-14 17:37:46 -07:00
* iavf_free_misc_irq - Free MSI - X miscellaneous vector
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* Frees MSI - X vector 0.
* */
2018-09-14 17:37:46 -07:00
static void iavf_free_misc_irq ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
struct net_device * netdev = adapter - > netdev ;
2016-11-08 13:05:08 -08:00
if ( ! adapter - > msix_entries )
return ;
2013-12-21 06:12:45 +00:00
free_irq ( adapter - > msix_entries [ 0 ] . vector , netdev ) ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_configure_tx - Configure Transmit Unit after Reset
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* Configure the Tx unit of the MAC after a reset .
* */
2018-09-14 17:37:46 -07:00
static void iavf_configure_tx ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2013-12-21 06:12:45 +00:00
int i ;
2014-11-11 20:02:42 +00:00
2014-10-25 03:24:34 +00:00
for ( i = 0 ; i < adapter - > num_active_queues ; i + + )
2018-09-14 17:37:49 -07:00
adapter - > tx_rings [ i ] . tail = hw - > hw_addr + IAVF_QTX_TAIL1 ( i ) ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_configure_rx - Configure Receive Unit after Reset
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* Configure the Rx unit of the MAC after a reset .
* */
2018-09-14 17:37:46 -07:00
static void iavf_configure_rx ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:55 -07:00
unsigned int rx_buf_len = IAVF_RXBUFFER_2048 ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2013-12-21 06:12:45 +00:00
int i ;
2017-03-14 10:15:27 -07:00
/* Legacy Rx will always default to a 2048 buffer size. */
# if (PAGE_SIZE < 8192)
2018-09-14 17:37:46 -07:00
if ( ! ( adapter - > flags & IAVF_FLAG_LEGACY_RX ) ) {
2017-04-19 19:29:48 +02:00
struct net_device * netdev = adapter - > netdev ;
2017-04-05 07:51:01 -04:00
/* For jumbo frames on systems with 4K pages we have to use
* an order 1 page , so we might as well increase the size
* of our Rx buffer to make better use of the available space
*/
2018-09-14 17:37:55 -07:00
rx_buf_len = IAVF_RXBUFFER_3072 ;
2017-04-05 07:51:01 -04:00
2017-03-14 10:15:27 -07:00
/* We use a 1536 buffer size for configurations with
* standard Ethernet mtu . On x86 this gives us enough room
* for shared info and 192 bytes of padding .
*/
2018-09-14 17:37:55 -07:00
if ( ! IAVF_2K_TOO_SMALL_WITH_PADDING & &
2017-04-05 07:51:02 -04:00
( netdev - > mtu < = ETH_DATA_LEN ) )
2018-09-14 17:37:55 -07:00
rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN ;
2017-03-14 10:15:27 -07:00
}
# endif
2014-10-25 03:24:34 +00:00
for ( i = 0 ; i < adapter - > num_active_queues ; i + + ) {
2018-09-14 17:37:49 -07:00
adapter - > rx_rings [ i ] . tail = hw - > hw_addr + IAVF_QRX_TAIL1 ( i ) ;
2017-03-14 10:15:27 -07:00
adapter - > rx_rings [ i ] . rx_buf_len = rx_buf_len ;
2017-04-05 07:51:02 -04:00
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_LEGACY_RX )
2017-04-05 07:51:02 -04:00
clear_ring_build_skb_enabled ( & adapter - > rx_rings [ i ] ) ;
else
set_ring_build_skb_enabled ( & adapter - > rx_rings [ i ] ) ;
2013-12-21 06:12:45 +00:00
}
}
/**
2018-09-14 17:37:46 -07:00
* iavf_find_vlan - Search filter list for specific vlan filter
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* @ vlan : vlan tag
*
2017-10-27 11:06:50 -04:00
* Returns ptr to the filter object or NULL . Must be called while holding the
* mac_vlan_list_lock .
2013-12-21 06:12:45 +00:00
* */
static struct
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
iavf_vlan_filter * iavf_find_vlan ( struct iavf_adapter * adapter ,
struct iavf_vlan vlan )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_vlan_filter * f ;
2013-12-21 06:12:45 +00:00
list_for_each_entry ( f , & adapter - > vlan_filter_list , list ) {
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
if ( f - > vlan . vid = = vlan . vid & &
f - > vlan . tpid = = vlan . tpid )
2013-12-21 06:12:45 +00:00
return f ;
}
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
2013-12-21 06:12:45 +00:00
return NULL ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_add_vlan - Add a vlan filter to the list
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* @ vlan : VLAN tag
*
* Returns ptr to the filter object or NULL when no memory available .
* */
static struct
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
iavf_vlan_filter * iavf_add_vlan ( struct iavf_adapter * adapter ,
struct iavf_vlan vlan )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_vlan_filter * f = NULL ;
2015-03-31 00:45:05 -07:00
2017-10-27 11:06:50 -04:00
spin_lock_bh ( & adapter - > mac_vlan_list_lock ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
f = iavf_find_vlan ( adapter , vlan ) ;
2014-11-11 20:02:52 +00:00
if ( ! f ) {
2019-05-14 10:37:02 -07:00
f = kzalloc ( sizeof ( * f ) , GFP_ATOMIC ) ;
2014-11-11 20:02:52 +00:00
if ( ! f )
2015-03-31 00:45:05 -07:00
goto clearout ;
2014-05-10 04:49:04 +00:00
2013-12-21 06:12:45 +00:00
f - > vlan = vlan ;
2019-05-14 10:36:59 -07:00
list_add_tail ( & f - > list , & adapter - > vlan_filter_list ) ;
2013-12-21 06:12:45 +00:00
f - > add = true ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_ADD_VLAN_FILTER ;
2013-12-21 06:12:45 +00:00
}
2015-03-31 00:45:05 -07:00
clearout :
2017-10-27 11:06:50 -04:00
spin_unlock_bh ( & adapter - > mac_vlan_list_lock ) ;
2013-12-21 06:12:45 +00:00
return f ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_del_vlan - Remove a vlan filter from the list
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* @ vlan : VLAN tag
* */
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
static void iavf_del_vlan ( struct iavf_adapter * adapter , struct iavf_vlan vlan )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_vlan_filter * f ;
2015-03-31 00:45:05 -07:00
2017-10-27 11:06:50 -04:00
spin_lock_bh ( & adapter - > mac_vlan_list_lock ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
f = iavf_find_vlan ( adapter , vlan ) ;
2013-12-21 06:12:45 +00:00
if ( f ) {
f - > remove = true ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_DEL_VLAN_FILTER ;
2013-12-21 06:12:45 +00:00
}
2017-10-27 11:06:50 -04:00
spin_unlock_bh ( & adapter - > mac_vlan_list_lock ) ;
2013-12-21 06:12:45 +00:00
}
2021-06-04 09:53:27 -07:00
/**
* iavf_restore_filters
* @ adapter : board private structure
*
* Restore existing non MAC filters when VF netdev comes back up
* */
static void iavf_restore_filters ( struct iavf_adapter * adapter )
{
iavf: Fix VLAN feature flags after VFR
When a VF goes through a reset, it's possible for the VF's feature set
to change. For example it may lose the VIRTCHNL_VF_OFFLOAD_VLAN
capability after VF reset. Unfortunately, the driver doesn't correctly
deal with this situation and errors are seen from downing/upping the
interface and/or moving the interface in/out of a network namespace.
When setting the interface down/up we see the following errors after the
VIRTCHNL_VF_OFFLOAD_VLAN capability was taken away from the VF:
ice 0000:51:00.1: VF 1 failed opcode 12, retval: -64 iavf 0000:51:09.1:
Failed to add VLAN filter, error IAVF_NOT_SUPPORTED ice 0000:51:00.1: VF
1 failed opcode 13, retval: -64 iavf 0000:51:09.1: Failed to delete VLAN
filter, error IAVF_NOT_SUPPORTED
These add/delete errors are happening because the VLAN filters are
tracked internally to the driver and regardless of the VLAN_ALLOWED()
setting the driver tries to delete/re-add them over virtchnl.
Fix the delete failure by making sure to delete any VLAN filter tracking
in the driver when a removal request is made, while preventing the
virtchnl request. This makes it so the driver's VLAN list is up to date
and the errors are
Fix the add failure by making sure the check for VLAN_ALLOWED() during
reset is done after the VF receives its capability list from the PF via
VIRTCHNL_OP_GET_VF_RESOURCES. If VLAN functionality is not allowed, then
prevent requesting re-adding the filters over virtchnl.
When moving the interface into a network namespace we see the following
errors after the VIRTCHNL_VF_OFFLOAD_VLAN capability was taken away from
the VF:
iavf 0000:51:09.1 enp81s0f1v1: NIC Link is Up Speed is 25 Gbps Full Duplex
iavf 0000:51:09.1 temp_27: renamed from enp81s0f1v1
iavf 0000:51:09.1 mgmt: renamed from temp_27
iavf 0000:51:09.1 dev27: set_features() failed (-22); wanted 0x020190001fd54833, left 0x020190001fd54bb3
These errors are happening because we aren't correctly updating the
netdev capabilities and dealing with ndo_fix_features() and
ndo_set_features() correctly.
Fix this by only reporting errors in the driver's ndo_set_features()
callback when VIRTCHNL_VF_OFFLOAD_VLAN is not allowed and any attempt to
enable the VLAN features is made. Also, make sure to disable VLAN
insertion, filtering, and stripping since the VIRTCHNL_VF_OFFLOAD_VLAN
flag applies to all of them and not just VLAN stripping.
Also, after we process the capabilities in the VF reset path, make sure
to call netdev_update_features() in case the capabilities have changed
in order to update the netdev's feature set to match the VF's actual
capabilities.
Lastly, make sure to always report success on VLAN filter delete when
VIRTCHNL_VF_OFFLOAD_VLAN is not supported. The changed flow in
iavf_del_vlans() allows the stack to delete previosly existing VLAN
filters even if VLAN filtering is not allowed. This makes it so the VLAN
filter list is up to date.
Fixes: 8774370d268f ("i40e/i40evf: support for VF VLAN tag stripping control")
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-05 09:20:25 -07:00
u16 vid ;
2021-06-04 09:53:27 -07:00
iavf: Fix VLAN feature flags after VFR
When a VF goes through a reset, it's possible for the VF's feature set
to change. For example it may lose the VIRTCHNL_VF_OFFLOAD_VLAN
capability after VF reset. Unfortunately, the driver doesn't correctly
deal with this situation and errors are seen from downing/upping the
interface and/or moving the interface in/out of a network namespace.
When setting the interface down/up we see the following errors after the
VIRTCHNL_VF_OFFLOAD_VLAN capability was taken away from the VF:
ice 0000:51:00.1: VF 1 failed opcode 12, retval: -64 iavf 0000:51:09.1:
Failed to add VLAN filter, error IAVF_NOT_SUPPORTED ice 0000:51:00.1: VF
1 failed opcode 13, retval: -64 iavf 0000:51:09.1: Failed to delete VLAN
filter, error IAVF_NOT_SUPPORTED
These add/delete errors are happening because the VLAN filters are
tracked internally to the driver and regardless of the VLAN_ALLOWED()
setting the driver tries to delete/re-add them over virtchnl.
Fix the delete failure by making sure to delete any VLAN filter tracking
in the driver when a removal request is made, while preventing the
virtchnl request. This makes it so the driver's VLAN list is up to date
and the errors are
Fix the add failure by making sure the check for VLAN_ALLOWED() during
reset is done after the VF receives its capability list from the PF via
VIRTCHNL_OP_GET_VF_RESOURCES. If VLAN functionality is not allowed, then
prevent requesting re-adding the filters over virtchnl.
When moving the interface into a network namespace we see the following
errors after the VIRTCHNL_VF_OFFLOAD_VLAN capability was taken away from
the VF:
iavf 0000:51:09.1 enp81s0f1v1: NIC Link is Up Speed is 25 Gbps Full Duplex
iavf 0000:51:09.1 temp_27: renamed from enp81s0f1v1
iavf 0000:51:09.1 mgmt: renamed from temp_27
iavf 0000:51:09.1 dev27: set_features() failed (-22); wanted 0x020190001fd54833, left 0x020190001fd54bb3
These errors are happening because we aren't correctly updating the
netdev capabilities and dealing with ndo_fix_features() and
ndo_set_features() correctly.
Fix this by only reporting errors in the driver's ndo_set_features()
callback when VIRTCHNL_VF_OFFLOAD_VLAN is not allowed and any attempt to
enable the VLAN features is made. Also, make sure to disable VLAN
insertion, filtering, and stripping since the VIRTCHNL_VF_OFFLOAD_VLAN
flag applies to all of them and not just VLAN stripping.
Also, after we process the capabilities in the VF reset path, make sure
to call netdev_update_features() in case the capabilities have changed
in order to update the netdev's feature set to match the VF's actual
capabilities.
Lastly, make sure to always report success on VLAN filter delete when
VIRTCHNL_VF_OFFLOAD_VLAN is not supported. The changed flow in
iavf_del_vlans() allows the stack to delete previosly existing VLAN
filters even if VLAN filtering is not allowed. This makes it so the VLAN
filter list is up to date.
Fixes: 8774370d268f ("i40e/i40evf: support for VF VLAN tag stripping control")
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-05 09:20:25 -07:00
/* re-add all VLAN filters */
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
for_each_set_bit ( vid , adapter - > vsi . active_cvlans , VLAN_N_VID )
iavf_add_vlan ( adapter , IAVF_VLAN ( vid , ETH_P_8021Q ) ) ;
for_each_set_bit ( vid , adapter - > vsi . active_svlans , VLAN_N_VID )
iavf_add_vlan ( adapter , IAVF_VLAN ( vid , ETH_P_8021AD ) ) ;
2021-06-04 09:53:27 -07:00
}
2021-11-29 16:16:04 -08:00
/**
* iavf_get_num_vlans_added - get number of VLANs added
* @ adapter : board private structure
*/
static u16 iavf_get_num_vlans_added ( struct iavf_adapter * adapter )
{
return bitmap_weight ( adapter - > vsi . active_cvlans , VLAN_N_VID ) +
bitmap_weight ( adapter - > vsi . active_svlans , VLAN_N_VID ) ;
}
/**
* iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
* @ adapter : board private structure
*
* This depends on the negotiated VLAN capability . For VIRTCHNL_VF_OFFLOAD_VLAN ,
* do not impose a limit as that maintains current behavior and for
* VIRTCHNL_VF_OFFLOAD_VLAN_V2 , use the maximum allowed sent from the PF .
* */
static u16 iavf_get_max_vlans_allowed ( struct iavf_adapter * adapter )
{
/* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
* never been a limit on the VF driver side
*/
if ( VLAN_ALLOWED ( adapter ) )
return VLAN_N_VID ;
else if ( VLAN_V2_ALLOWED ( adapter ) )
return adapter - > vlan_v2_caps . filtering . max_filters ;
return 0 ;
}
/**
* iavf_max_vlans_added - check if maximum VLANs allowed already exist
* @ adapter : board private structure
* */
static bool iavf_max_vlans_added ( struct iavf_adapter * adapter )
{
if ( iavf_get_num_vlans_added ( adapter ) <
iavf_get_max_vlans_allowed ( adapter ) )
return false ;
return true ;
}
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_vlan_rx_add_vid - Add a VLAN filter to a device
2013-12-21 06:12:45 +00:00
* @ netdev : network device struct
2018-04-20 01:41:33 -07:00
* @ proto : unused protocol data
2013-12-21 06:12:45 +00:00
* @ vid : VLAN tag
* */
2018-09-14 17:37:46 -07:00
static int iavf_vlan_rx_add_vid ( struct net_device * netdev ,
__always_unused __be16 proto , u16 vid )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2013-12-21 06:12:45 +00:00
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
if ( ! VLAN_FILTERING_ALLOWED ( adapter ) )
2015-08-28 17:55:57 -04:00
return - EIO ;
2021-06-04 09:53:27 -07:00
2021-11-29 16:16:04 -08:00
if ( iavf_max_vlans_added ( adapter ) ) {
netdev_err ( netdev , " Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported. \n " ,
iavf_get_max_vlans_allowed ( adapter ) ) ;
return - EIO ;
}
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
if ( ! iavf_add_vlan ( adapter , IAVF_VLAN ( vid , be16_to_cpu ( proto ) ) ) )
2013-12-21 06:12:45 +00:00
return - ENOMEM ;
2021-06-04 09:53:27 -07:00
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
if ( proto = = cpu_to_be16 ( ETH_P_8021Q ) )
set_bit ( vid , adapter - > vsi . active_cvlans ) ;
else
set_bit ( vid , adapter - > vsi . active_svlans ) ;
2013-12-21 06:12:45 +00:00
return 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
2013-12-21 06:12:45 +00:00
* @ netdev : network device struct
2018-04-20 01:41:33 -07:00
* @ proto : unused protocol data
2013-12-21 06:12:45 +00:00
* @ vid : VLAN tag
* */
2018-09-14 17:37:46 -07:00
static int iavf_vlan_rx_kill_vid ( struct net_device * netdev ,
__always_unused __be16 proto , u16 vid )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2013-12-21 06:12:45 +00:00
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
iavf_del_vlan ( adapter , IAVF_VLAN ( vid , be16_to_cpu ( proto ) ) ) ;
if ( proto = = cpu_to_be16 ( ETH_P_8021Q ) )
clear_bit ( vid , adapter - > vsi . active_cvlans ) ;
else
clear_bit ( vid , adapter - > vsi . active_svlans ) ;
2021-06-04 09:53:27 -07:00
return 0 ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_find_filter - Search filter list for specific mac filter
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* @ macaddr : the MAC address
*
2017-10-27 11:06:50 -04:00
* Returns ptr to the filter object or NULL . Must be called while holding the
* mac_vlan_list_lock .
2013-12-21 06:12:45 +00:00
* */
static struct
2018-09-14 17:37:46 -07:00
iavf_mac_filter * iavf_find_filter ( struct iavf_adapter * adapter ,
const u8 * macaddr )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_mac_filter * f ;
2013-12-21 06:12:45 +00:00
if ( ! macaddr )
return NULL ;
list_for_each_entry ( f , & adapter - > mac_filter_list , list ) {
if ( ether_addr_equal ( macaddr , f - > macaddr ) )
return f ;
}
return NULL ;
}
/**
2018-09-14 17:37:55 -07:00
* iavf_add_filter - Add a mac filter to the filter list
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* @ macaddr : the MAC address
*
* Returns ptr to the filter object or NULL when no memory available .
* */
2019-12-17 11:29:23 +01:00
struct iavf_mac_filter * iavf_add_filter ( struct iavf_adapter * adapter ,
const u8 * macaddr )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_mac_filter * f ;
2013-12-21 06:12:45 +00:00
if ( ! macaddr )
return NULL ;
2018-09-14 17:37:46 -07:00
f = iavf_find_filter ( adapter , macaddr ) ;
2014-11-11 20:02:52 +00:00
if ( ! f ) {
2013-12-21 06:12:45 +00:00
f = kzalloc ( sizeof ( * f ) , GFP_ATOMIC ) ;
2017-10-27 11:06:50 -04:00
if ( ! f )
2018-02-05 13:03:36 -08:00
return f ;
2013-12-21 06:12:45 +00:00
2014-05-22 06:32:02 +00:00
ether_addr_copy ( f - > macaddr , macaddr ) ;
2013-12-21 06:12:45 +00:00
2016-05-16 10:26:42 -07:00
list_add_tail ( & f - > list , & adapter - > mac_filter_list ) ;
2013-12-21 06:12:45 +00:00
f - > add = true ;
2021-08-18 10:42:17 -07:00
f - > is_new_mac = true ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_ADD_MAC_FILTER ;
2017-09-07 08:05:47 -04:00
} else {
f - > remove = false ;
2013-12-21 06:12:45 +00:00
}
return f ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_set_mac - NDO callback to set port mac address
2013-12-21 06:12:45 +00:00
* @ netdev : network interface device structure
* @ p : pointer to an address structure
*
* Returns 0 on success , negative on failure
* */
2018-09-14 17:37:46 -07:00
static int iavf_set_mac ( struct net_device * netdev , void * p )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2018-09-14 17:37:46 -07:00
struct iavf_mac_filter * f ;
2013-12-21 06:12:45 +00:00
struct sockaddr * addr = p ;
if ( ! is_valid_ether_addr ( addr - > sa_data ) )
return - EADDRNOTAVAIL ;
if ( ether_addr_equal ( netdev - > dev_addr , addr - > sa_data ) )
return 0 ;
2017-10-27 11:06:50 -04:00
spin_lock_bh ( & adapter - > mac_vlan_list_lock ) ;
2018-09-14 17:37:46 -07:00
f = iavf_find_filter ( adapter , hw - > mac . addr ) ;
2015-08-31 19:54:44 -04:00
if ( f ) {
f - > remove = true ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_DEL_MAC_FILTER ;
2015-08-31 19:54:44 -04:00
}
2018-09-14 17:37:46 -07:00
f = iavf_add_filter ( adapter , addr - > sa_data ) ;
2018-02-05 13:03:36 -08:00
2017-10-27 11:06:50 -04:00
spin_unlock_bh ( & adapter - > mac_vlan_list_lock ) ;
2013-12-21 06:12:45 +00:00
if ( f ) {
2014-05-22 06:32:02 +00:00
ether_addr_copy ( hw - > mac . addr , addr - > sa_data ) ;
2013-12-21 06:12:45 +00:00
}
return ( f = = NULL ) ? - ENOMEM : 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_addr_sync - Callback for dev_ ( mc | uc ) _sync to add address
2018-01-22 12:00:38 -05:00
* @ netdev : the netdevice
* @ addr : address to add
*
* Called by __dev_ ( mc | uc ) _sync when an address needs to be added . We call
* __dev_ ( uc | mc ) _sync from . set_rx_mode and guarantee to hold the hash lock .
*/
2018-09-14 17:37:46 -07:00
static int iavf_addr_sync ( struct net_device * netdev , const u8 * addr )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2015-08-26 15:14:20 -04:00
2018-09-14 17:37:46 -07:00
if ( iavf_add_filter ( adapter , addr ) )
2018-01-22 12:00:38 -05:00
return 0 ;
else
return - ENOMEM ;
}
2015-08-26 15:14:20 -04:00
2018-01-22 12:00:38 -05:00
/**
2018-09-14 17:37:46 -07:00
* iavf_addr_unsync - Callback for dev_ ( mc | uc ) _sync to remove address
2018-01-22 12:00:38 -05:00
* @ netdev : the netdevice
* @ addr : address to add
*
* Called by __dev_ ( mc | uc ) _sync when an address needs to be removed . We call
* __dev_ ( uc | mc ) _sync from . set_rx_mode and guarantee to hold the hash lock .
*/
2018-09-14 17:37:46 -07:00
static int iavf_addr_unsync ( struct net_device * netdev , const u8 * addr )
2018-01-22 12:00:38 -05:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
struct iavf_mac_filter * f ;
2015-08-26 15:14:20 -04:00
2018-01-22 12:00:38 -05:00
/* Under some circumstances, we might receive a request to delete
* our own device address from our uc list . Because we store the
* device address in the VSI ' s MAC / VLAN filter list , we need to ignore
* such requests and not delete our device address from this list .
*/
if ( ether_addr_equal ( addr , netdev - > dev_addr ) )
return 0 ;
2015-08-26 15:14:20 -04:00
2018-09-14 17:37:46 -07:00
f = iavf_find_filter ( adapter , addr ) ;
2018-01-22 12:00:38 -05:00
if ( f ) {
2015-08-26 15:14:20 -04:00
f - > remove = true ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_DEL_MAC_FILTER ;
2013-12-21 06:12:45 +00:00
}
2018-01-22 12:00:38 -05:00
return 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_set_rx_mode - NDO callback to set the netdev filters
2018-01-22 12:00:38 -05:00
* @ netdev : network interface device structure
* */
2018-09-14 17:37:46 -07:00
static void iavf_set_rx_mode ( struct net_device * netdev )
2018-01-22 12:00:38 -05:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2018-01-22 12:00:38 -05:00
spin_lock_bh ( & adapter - > mac_vlan_list_lock ) ;
2018-09-14 17:37:46 -07:00
__dev_uc_sync ( netdev , iavf_addr_sync , iavf_addr_unsync ) ;
__dev_mc_sync ( netdev , iavf_addr_sync , iavf_addr_unsync ) ;
2018-01-22 12:00:38 -05:00
spin_unlock_bh ( & adapter - > mac_vlan_list_lock ) ;
2016-04-12 08:30:52 -07:00
if ( netdev - > flags & IFF_PROMISC & &
2018-09-14 17:37:46 -07:00
! ( adapter - > flags & IAVF_FLAG_PROMISC_ON ) )
adapter - > aq_required | = IAVF_FLAG_AQ_REQUEST_PROMISC ;
2016-04-12 08:30:52 -07:00
else if ( ! ( netdev - > flags & IFF_PROMISC ) & &
2018-09-14 17:37:46 -07:00
adapter - > flags & IAVF_FLAG_PROMISC_ON )
adapter - > aq_required | = IAVF_FLAG_AQ_RELEASE_PROMISC ;
2016-04-12 08:30:52 -07:00
2016-05-03 15:13:10 -07:00
if ( netdev - > flags & IFF_ALLMULTI & &
2018-09-14 17:37:46 -07:00
! ( adapter - > flags & IAVF_FLAG_ALLMULTI_ON ) )
adapter - > aq_required | = IAVF_FLAG_AQ_REQUEST_ALLMULTI ;
2016-05-03 15:13:10 -07:00
else if ( ! ( netdev - > flags & IFF_ALLMULTI ) & &
2018-09-14 17:37:46 -07:00
adapter - > flags & IAVF_FLAG_ALLMULTI_ON )
adapter - > aq_required | = IAVF_FLAG_AQ_RELEASE_ALLMULTI ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_napi_enable_all - enable NAPI on all queue vectors
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* */
2018-09-14 17:37:46 -07:00
static void iavf_napi_enable_all ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int q_idx ;
2018-09-14 17:37:55 -07:00
struct iavf_q_vector * q_vector ;
2013-12-21 06:12:45 +00:00
int q_vectors = adapter - > num_msix_vectors - NONQ_VECS ;
for ( q_idx = 0 ; q_idx < q_vectors ; q_idx + + ) {
struct napi_struct * napi ;
2014-11-11 20:02:42 +00:00
2015-10-26 19:44:39 -04:00
q_vector = & adapter - > q_vectors [ q_idx ] ;
2013-12-21 06:12:45 +00:00
napi = & q_vector - > napi ;
napi_enable ( napi ) ;
}
}
/**
2018-09-14 17:37:46 -07:00
* iavf_napi_disable_all - disable NAPI on all queue vectors
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* */
2018-09-14 17:37:46 -07:00
static void iavf_napi_disable_all ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int q_idx ;
2018-09-14 17:37:55 -07:00
struct iavf_q_vector * q_vector ;
2013-12-21 06:12:45 +00:00
int q_vectors = adapter - > num_msix_vectors - NONQ_VECS ;
for ( q_idx = 0 ; q_idx < q_vectors ; q_idx + + ) {
2015-10-26 19:44:39 -04:00
q_vector = & adapter - > q_vectors [ q_idx ] ;
2013-12-21 06:12:45 +00:00
napi_disable ( & q_vector - > napi ) ;
}
}
/**
2018-09-14 17:37:46 -07:00
* iavf_configure - set up transmit and receive data structures
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* */
2018-09-14 17:37:46 -07:00
static void iavf_configure ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
struct net_device * netdev = adapter - > netdev ;
int i ;
2018-09-14 17:37:46 -07:00
iavf_set_rx_mode ( netdev ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
iavf_configure_tx ( adapter ) ;
iavf_configure_rx ( adapter ) ;
adapter - > aq_required | = IAVF_FLAG_AQ_CONFIGURE_QUEUES ;
2013-12-21 06:12:45 +00:00
2014-10-25 03:24:34 +00:00
for ( i = 0 ; i < adapter - > num_active_queues ; i + + ) {
2018-09-14 17:37:55 -07:00
struct iavf_ring * ring = & adapter - > rx_rings [ i ] ;
2014-11-11 20:02:42 +00:00
2018-09-14 17:37:55 -07:00
iavf_alloc_rx_buffers ( ring , IAVF_DESC_UNUSED ( ring ) ) ;
2013-12-21 06:12:45 +00:00
}
}
/**
2018-09-14 17:37:46 -07:00
* iavf_up_complete - Finish the last steps of bringing up a connection
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
2017-10-27 11:06:52 -04:00
*
2018-09-14 17:37:46 -07:00
* Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock .
2013-12-21 06:12:45 +00:00
* */
2018-09-14 17:37:46 -07:00
static void iavf_up_complete ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_RUNNING ) ;
2018-09-14 17:37:55 -07:00
clear_bit ( __IAVF_VSI_DOWN , adapter - > vsi . state ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
iavf_napi_enable_all ( adapter ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_ENABLE_QUEUES ;
2017-01-24 10:23:59 -08:00
if ( CLIENT_ENABLED ( adapter ) )
2018-09-14 17:37:46 -07:00
adapter - > flags | = IAVF_FLAG_CLIENT_NEEDS_OPEN ;
2019-05-14 10:37:05 -07:00
mod_delayed_work ( iavf_wq , & adapter - > watchdog_task , 0 ) ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:55 -07:00
* iavf_down - Shutdown the connection processing
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
2017-10-27 11:06:52 -04:00
*
2018-09-14 17:37:46 -07:00
* Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock .
2013-12-21 06:12:45 +00:00
* */
2018-09-14 17:37:46 -07:00
void iavf_down ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
struct net_device * netdev = adapter - > netdev ;
2018-09-14 17:37:46 -07:00
struct iavf_vlan_filter * vlf ;
struct iavf_cloud_filter * cf ;
2021-03-09 11:08:11 +08:00
struct iavf_fdir_fltr * fdir ;
struct iavf_mac_filter * f ;
2021-04-13 08:48:41 +08:00
struct iavf_adv_rss * rss ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
if ( adapter - > state < = __IAVF_DOWN_PENDING )
2014-05-22 06:31:46 +00:00
return ;
2015-03-27 00:12:10 -07:00
netif_carrier_off ( netdev ) ;
netif_tx_disable ( netdev ) ;
i40evf: Fix link state event handling
Currently disabling the link state from PF via
ip link set enp5s0f0 vf 0 state disable
doesn't disable the CARRIER on the VF.
This patch updates the carrier and starts/stops the tx queues based on the
link state notification from PF.
PF: enp5s0f0, VF: enp5s2
#modprobe i40e
#echo 2 > /sys/class/net/enp5s0f0/device/sriov_numvfs
#ip link set enp5s2 up
#ip -d link show enp5s2
175: enp5s2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether ea:4d:60:bc:6f:85 brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64
#ip link set enp5s0f0 vf 0 state disable
#ip -d link show enp5s0f0
171: enp5s0f0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 68:05:ca:2e:72:68 brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64 numtxqueues 72 numrxqueues 72 portid 6805ca2e7268
vf 0 MAC 00:00:00:00:00:00, spoof checking on, link-state disable, trust off
vf 1 MAC 00:00:00:00:00:00, spoof checking on, link-state auto, trust off
#ip -d link show enp5s2
175: enp5s2: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc mq state DOWN mode DEFAULT group default qlen 1000
link/ether ea:4d:60:bc:6f:85 brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64 numtxqueues 16 numrxqueues 16
Signed-off-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-01 22:27:27 +02:00
adapter - > link_up = false ;
2018-09-14 17:37:46 -07:00
iavf_napi_disable_all ( adapter ) ;
iavf_irq_disable ( adapter ) ;
2014-12-09 08:53:04 +00:00
2017-10-27 11:06:50 -04:00
spin_lock_bh ( & adapter - > mac_vlan_list_lock ) ;
2018-01-22 12:00:38 -05:00
/* clear the sync flag on all filters */
__dev_uc_unsync ( adapter - > netdev , NULL ) ;
__dev_mc_unsync ( adapter - > netdev , NULL ) ;
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
/* remove all MAC filters */
2013-12-21 06:12:45 +00:00
list_for_each_entry ( f , & adapter - > mac_filter_list , list ) {
f - > remove = true ;
}
2018-01-22 12:00:38 -05:00
2014-02-20 19:29:06 -08:00
/* remove all VLAN filters */
2018-01-22 12:00:34 -05:00
list_for_each_entry ( vlf , & adapter - > vlan_filter_list , list ) {
2018-01-23 08:51:05 -08:00
vlf - > remove = true ;
2014-02-20 19:29:06 -08:00
}
2017-10-27 11:06:50 -04:00
spin_unlock_bh ( & adapter - > mac_vlan_list_lock ) ;
2018-01-23 08:51:05 -08:00
/* remove all cloud filters */
spin_lock_bh ( & adapter - > cloud_filter_list_lock ) ;
list_for_each_entry ( cf , & adapter - > cloud_filter_list , list ) {
cf - > del = true ;
}
spin_unlock_bh ( & adapter - > cloud_filter_list_lock ) ;
2021-03-09 11:08:11 +08:00
/* remove all Flow Director filters */
spin_lock_bh ( & adapter - > fdir_fltr_lock ) ;
list_for_each_entry ( fdir , & adapter - > fdir_list_head , list ) {
fdir - > state = IAVF_FDIR_FLTR_DEL_REQUEST ;
}
spin_unlock_bh ( & adapter - > fdir_fltr_lock ) ;
2021-04-13 08:48:41 +08:00
/* remove all advance RSS configuration */
spin_lock_bh ( & adapter - > adv_rss_lock ) ;
list_for_each_entry ( rss , & adapter - > adv_rss_list_head , list )
rss - > state = IAVF_ADV_RSS_DEL_REQUEST ;
spin_unlock_bh ( & adapter - > adv_rss_lock ) ;
2018-09-14 17:37:46 -07:00
if ( ! ( adapter - > flags & IAVF_FLAG_PF_COMMS_FAILED ) & &
adapter - > state ! = __IAVF_RESETTING ) {
2014-12-09 08:53:04 +00:00
/* cancel any current operation */
2017-05-11 11:23:11 -07:00
adapter - > current_op = VIRTCHNL_OP_UNKNOWN ;
2014-12-09 08:53:04 +00:00
/* Schedule operations to close down the HW. Don't wait
* here for this to complete . The watchdog is still running
* and it will take care of this .
*/
2018-09-14 17:37:46 -07:00
adapter - > aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER ;
adapter - > aq_required | = IAVF_FLAG_AQ_DEL_VLAN_FILTER ;
adapter - > aq_required | = IAVF_FLAG_AQ_DEL_CLOUD_FILTER ;
2021-03-09 11:08:11 +08:00
adapter - > aq_required | = IAVF_FLAG_AQ_DEL_FDIR_FILTER ;
2021-04-13 08:48:41 +08:00
adapter - > aq_required | = IAVF_FLAG_AQ_DEL_ADV_RSS_CFG ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_DISABLE_QUEUES ;
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
}
2013-12-21 06:12:45 +00:00
2019-05-14 10:37:05 -07:00
mod_delayed_work ( iavf_wq , & adapter - > watchdog_task , 0 ) ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_acquire_msix_vectors - Setup the MSIX capability
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
* @ vectors : number of vectors to request
*
* Work with the OS to set up the MSIX vectors needed .
*
* Returns 0 on success , negative on failure
* */
static int
2018-09-14 17:37:46 -07:00
iavf_acquire_msix_vectors ( struct iavf_adapter * adapter , int vectors )
2013-12-21 06:12:45 +00:00
{
int err , vector_threshold ;
/* We'll want at least 3 (vector_threshold):
* 0 ) Other ( Admin Queue and link , mostly )
* 1 ) TxQ [ 0 ] Cleanup
* 2 ) RxQ [ 0 ] Cleanup
*/
vector_threshold = MIN_MSIX_COUNT ;
/* The more we get, the more we will assign to Tx/Rx Cleanup
* for the separate queues . . . where Rx Cleanup > = Tx Cleanup .
* Right now , we simply care about how many we ' ll get ; we ' ll
* set them up later while requesting irq ' s .
*/
2014-04-28 17:53:16 +00:00
err = pci_enable_msix_range ( adapter - > pdev , adapter - > msix_entries ,
vector_threshold , vectors ) ;
if ( err < 0 ) {
2014-05-10 04:49:06 +00:00
dev_err ( & adapter - > pdev - > dev , " Unable to allocate MSI-X interrupts \n " ) ;
2013-12-21 06:12:45 +00:00
kfree ( adapter - > msix_entries ) ;
adapter - > msix_entries = NULL ;
2014-04-28 17:53:16 +00:00
return err ;
2013-12-21 06:12:45 +00:00
}
2014-04-28 17:53:16 +00:00
/* Adjust for only the vectors we'll use, which is minimum
* of max_msix_q_vectors + NONQ_VECS , or the number of
* vectors we were allocated .
*/
adapter - > num_msix_vectors = err ;
return 0 ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_free_queues - Free memory for all rings
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure to initialize
*
* Free all of the memory associated with queue pairs .
* */
2018-09-14 17:37:46 -07:00
static void iavf_free_queues ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
if ( ! adapter - > vsi_res )
return ;
2017-06-07 05:43:01 -04:00
adapter - > num_active_queues = 0 ;
2015-10-26 19:44:40 -04:00
kfree ( adapter - > tx_rings ) ;
2015-12-09 15:50:30 -08:00
adapter - > tx_rings = NULL ;
2015-10-26 19:44:40 -04:00
kfree ( adapter - > rx_rings ) ;
2015-12-09 15:50:30 -08:00
adapter - > rx_rings = NULL ;
2013-12-21 06:12:45 +00:00
}
2021-11-29 16:16:02 -08:00
/**
* iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
* @ adapter : board private structure
*
* Based on negotiated capabilities , the VLAN tag needs to be inserted and / or
* stripped in certain descriptor fields . Instead of checking the offload
* capability bits in the hot path , cache the location the ring specific
* flags .
*/
void iavf_set_queue_vlan_tag_loc ( struct iavf_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_active_queues ; i + + ) {
struct iavf_ring * tx_ring = & adapter - > tx_rings [ i ] ;
struct iavf_ring * rx_ring = & adapter - > rx_rings [ i ] ;
/* prevent multiple L2TAG bits being set after VFR */
tx_ring - > flags & =
~ ( IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 ) ;
rx_ring - > flags & =
~ ( IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 ) ;
if ( VLAN_ALLOWED ( adapter ) ) {
tx_ring - > flags | = IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 ;
rx_ring - > flags | = IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 ;
} else if ( VLAN_V2_ALLOWED ( adapter ) ) {
struct virtchnl_vlan_supported_caps * stripping_support ;
struct virtchnl_vlan_supported_caps * insertion_support ;
stripping_support =
& adapter - > vlan_v2_caps . offloads . stripping_support ;
insertion_support =
& adapter - > vlan_v2_caps . offloads . insertion_support ;
if ( stripping_support - > outer ) {
if ( stripping_support - > outer &
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 )
rx_ring - > flags | =
IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 ;
else if ( stripping_support - > outer &
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 )
rx_ring - > flags | =
IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 ;
} else if ( stripping_support - > inner ) {
if ( stripping_support - > inner &
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 )
rx_ring - > flags | =
IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 ;
else if ( stripping_support - > inner &
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 )
rx_ring - > flags | =
IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 ;
}
if ( insertion_support - > outer ) {
if ( insertion_support - > outer &
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 )
tx_ring - > flags | =
IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 ;
else if ( insertion_support - > outer &
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 )
tx_ring - > flags | =
IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 ;
} else if ( insertion_support - > inner ) {
if ( insertion_support - > inner &
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 )
tx_ring - > flags | =
IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 ;
else if ( insertion_support - > inner &
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 )
tx_ring - > flags | =
IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 ;
}
}
}
}
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_alloc_queues - Allocate memory for all rings
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure to initialize
*
* We allocate one ring per queue at run - time since we don ' t know the
* number of queues at compile - time . The polling_netdev array is
* intended for Multiqueue , but should work fine with a single queue .
* */
2018-09-14 17:37:46 -07:00
static int iavf_alloc_queues ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
2017-06-07 05:43:01 -04:00
int i , num_active_queues ;
2017-08-22 06:57:50 -04:00
/* If we're in reset reallocating queues we don't actually know yet for
* certain the PF gave us the number of queues we asked for but we ' ll
* assume it did . Once basic reset is finished we ' ll confirm once we
* start negotiating config with PF .
*/
if ( adapter - > num_req_queues )
num_active_queues = adapter - > num_req_queues ;
2018-01-23 08:50:59 -08:00
else if ( ( adapter - > vf_res - > vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ) & &
adapter - > num_tc )
num_active_queues = adapter - > ch_config . total_qps ;
2017-08-22 06:57:50 -04:00
else
num_active_queues = min_t ( int ,
adapter - > vsi_res - > num_queue_pairs ,
( int ) ( num_online_cpus ( ) ) ) ;
2013-12-21 06:12:45 +00:00
2017-06-07 05:43:01 -04:00
adapter - > tx_rings = kcalloc ( num_active_queues ,
2018-09-14 17:37:55 -07:00
sizeof ( struct iavf_ring ) , GFP_KERNEL ) ;
2015-10-26 19:44:40 -04:00
if ( ! adapter - > tx_rings )
goto err_out ;
2017-06-07 05:43:01 -04:00
adapter - > rx_rings = kcalloc ( num_active_queues ,
2018-09-14 17:37:55 -07:00
sizeof ( struct iavf_ring ) , GFP_KERNEL ) ;
2015-10-26 19:44:40 -04:00
if ( ! adapter - > rx_rings )
goto err_out ;
2017-06-07 05:43:01 -04:00
for ( i = 0 ; i < num_active_queues ; i + + ) {
2018-09-14 17:37:55 -07:00
struct iavf_ring * tx_ring ;
struct iavf_ring * rx_ring ;
2013-12-21 06:12:45 +00:00
2015-10-26 19:44:40 -04:00
tx_ring = & adapter - > tx_rings [ i ] ;
2013-12-21 06:12:45 +00:00
tx_ring - > queue_index = i ;
tx_ring - > netdev = adapter - > netdev ;
tx_ring - > dev = & adapter - > pdev - > dev ;
2014-04-24 06:41:37 +00:00
tx_ring - > count = adapter - > tx_desc_count ;
2018-09-14 17:37:55 -07:00
tx_ring - > itr_setting = IAVF_ITR_TX_DEF ;
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_WB_ON_ITR_CAPABLE )
2018-09-14 17:37:55 -07:00
tx_ring - > flags | = IAVF_TXR_FLAGS_WB_ON_ITR ;
2013-12-21 06:12:45 +00:00
2015-10-26 19:44:40 -04:00
rx_ring = & adapter - > rx_rings [ i ] ;
2013-12-21 06:12:45 +00:00
rx_ring - > queue_index = i ;
rx_ring - > netdev = adapter - > netdev ;
rx_ring - > dev = & adapter - > pdev - > dev ;
2014-04-24 06:41:37 +00:00
rx_ring - > count = adapter - > rx_desc_count ;
2018-09-14 17:37:55 -07:00
rx_ring - > itr_setting = IAVF_ITR_RX_DEF ;
2013-12-21 06:12:45 +00:00
}
2017-06-07 05:43:01 -04:00
adapter - > num_active_queues = num_active_queues ;
2021-11-29 16:16:02 -08:00
iavf_set_queue_vlan_tag_loc ( adapter ) ;
2013-12-21 06:12:45 +00:00
return 0 ;
err_out :
2018-09-14 17:37:46 -07:00
iavf_free_queues ( adapter ) ;
2013-12-21 06:12:45 +00:00
return - ENOMEM ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_set_interrupt_capability - set MSI - X or FAIL if not supported
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure to initialize
*
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel .
* */
2018-09-14 17:37:46 -07:00
static int iavf_set_interrupt_capability ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int vector , v_budget ;
int pairs = 0 ;
int err = 0 ;
if ( ! adapter - > vsi_res ) {
err = - EIO ;
goto out ;
}
2014-10-25 03:24:34 +00:00
pairs = adapter - > num_active_queues ;
2013-12-21 06:12:45 +00:00
2017-04-19 09:25:56 -04:00
/* It's easy to be greedy for MSI-X vectors, but it really doesn't do
* us much good if we have more vectors than CPUs . However , we already
* limit the total number of queues by the number of CPUs so we do not
* need any further limiting here .
2013-12-21 06:12:45 +00:00
*/
2017-04-19 09:25:56 -04:00
v_budget = min_t ( int , pairs + NONQ_VECS ,
( int ) adapter - > vf_res - > max_vectors ) ;
2013-12-21 06:12:45 +00:00
adapter - > msix_entries = kcalloc ( v_budget ,
sizeof ( struct msix_entry ) , GFP_KERNEL ) ;
if ( ! adapter - > msix_entries ) {
err = - ENOMEM ;
goto out ;
}
for ( vector = 0 ; vector < v_budget ; vector + + )
adapter - > msix_entries [ vector ] . entry = vector ;
2018-09-14 17:37:46 -07:00
err = iavf_acquire_msix_vectors ( adapter , v_budget ) ;
2013-12-21 06:12:45 +00:00
out :
2015-11-06 15:26:00 -08:00
netif_set_real_num_rx_queues ( adapter - > netdev , pairs ) ;
netif_set_real_num_tx_queues ( adapter - > netdev , pairs ) ;
2013-12-21 06:12:45 +00:00
return err ;
}
2015-06-23 19:00:04 -04:00
/**
2018-09-14 17:37:55 -07:00
* iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
2016-04-12 08:30:44 -07:00
* @ adapter : board private structure
2015-10-27 16:15:06 -04:00
*
* Return 0 on success , negative on failure
2015-06-23 19:00:04 -04:00
* */
2018-09-14 17:37:46 -07:00
static int iavf_config_rss_aq ( struct iavf_adapter * adapter )
2015-06-23 19:00:04 -04:00
{
2019-04-17 15:17:32 -07:00
struct iavf_aqc_get_set_rss_key_data * rss_key =
( struct iavf_aqc_get_set_rss_key_data * ) adapter - > rss_key ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2015-10-27 16:15:06 -04:00
int ret = 0 ;
2015-06-23 19:00:04 -04:00
2017-05-11 11:23:11 -07:00
if ( adapter - > current_op ! = VIRTCHNL_OP_UNKNOWN ) {
2015-06-23 19:00:04 -04:00
/* bail because we already have a command pending */
2015-10-16 21:14:29 +09:00
dev_err ( & adapter - > pdev - > dev , " Cannot configure RSS, command %d pending \n " ,
2015-06-23 19:00:04 -04:00
adapter - > current_op ) ;
2015-10-27 16:15:06 -04:00
return - EBUSY ;
2015-06-23 19:00:04 -04:00
}
2018-09-14 17:37:46 -07:00
ret = iavf_aq_set_rss_key ( hw , adapter - > vsi . id , rss_key ) ;
2016-04-12 08:30:44 -07:00
if ( ret ) {
dev_err ( & adapter - > pdev - > dev , " Cannot set RSS key, err %s aq_err %s \n " ,
2018-09-14 17:37:46 -07:00
iavf_stat_str ( hw , ret ) ,
iavf_aq_str ( hw , hw - > aq . asq_last_status ) ) ;
2016-04-12 08:30:44 -07:00
return ret ;
2015-10-27 16:15:06 -04:00
}
2015-06-23 19:00:04 -04:00
2018-09-14 17:37:46 -07:00
ret = iavf_aq_set_rss_lut ( hw , adapter - > vsi . id , false ,
adapter - > rss_lut , adapter - > rss_lut_size ) ;
2016-04-12 08:30:44 -07:00
if ( ret ) {
dev_err ( & adapter - > pdev - > dev , " Cannot set RSS lut, err %s aq_err %s \n " ,
2018-09-14 17:37:46 -07:00
iavf_stat_str ( hw , ret ) ,
iavf_aq_str ( hw , hw - > aq . asq_last_status ) ) ;
2015-06-23 19:00:04 -04:00
}
2015-10-27 16:15:06 -04:00
return ret ;
2016-04-12 08:30:44 -07:00
2015-06-23 19:00:04 -04:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_config_rss_reg - Configure RSS keys and lut by writing registers
2016-04-12 08:30:44 -07:00
* @ adapter : board private structure
2015-10-27 16:15:06 -04:00
*
* Returns 0 on success , negative on failure
2015-06-23 19:00:04 -04:00
* */
2018-09-14 17:37:46 -07:00
static int iavf_config_rss_reg ( struct iavf_adapter * adapter )
2015-06-23 19:00:04 -04:00
{
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2016-04-12 08:30:44 -07:00
u32 * dw ;
2015-10-27 16:15:06 -04:00
u16 i ;
2015-06-23 19:00:04 -04:00
2016-04-12 08:30:44 -07:00
dw = ( u32 * ) adapter - > rss_key ;
for ( i = 0 ; i < = adapter - > rss_key_size / 4 ; i + + )
2018-09-14 17:37:49 -07:00
wr32 ( hw , IAVF_VFQF_HKEY ( i ) , dw [ i ] ) ;
2015-10-27 16:15:06 -04:00
2016-04-12 08:30:44 -07:00
dw = ( u32 * ) adapter - > rss_lut ;
for ( i = 0 ; i < = adapter - > rss_lut_size / 4 ; i + + )
2018-09-14 17:37:49 -07:00
wr32 ( hw , IAVF_VFQF_HLUT ( i ) , dw [ i ] ) ;
2015-10-27 16:15:06 -04:00
2018-09-14 17:37:49 -07:00
iavf_flush ( hw ) ;
2015-10-27 16:15:06 -04:00
return 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_config_rss - Configure RSS keys and lut
2016-04-12 08:30:44 -07:00
* @ adapter : board private structure
2015-10-26 19:44:33 -04:00
*
* Returns 0 on success , negative on failure
* */
2018-09-14 17:37:46 -07:00
int iavf_config_rss ( struct iavf_adapter * adapter )
2015-10-26 19:44:33 -04:00
{
2016-04-12 08:30:44 -07:00
if ( RSS_PF ( adapter ) ) {
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_SET_RSS_LUT |
IAVF_FLAG_AQ_SET_RSS_KEY ;
2016-04-12 08:30:44 -07:00
return 0 ;
} else if ( RSS_AQ ( adapter ) ) {
2018-09-14 17:37:46 -07:00
return iavf_config_rss_aq ( adapter ) ;
2016-04-12 08:30:44 -07:00
} else {
2018-09-14 17:37:46 -07:00
return iavf_config_rss_reg ( adapter ) ;
2016-04-12 08:30:44 -07:00
}
2015-10-26 19:44:33 -04:00
}
2015-10-27 16:15:06 -04:00
/**
2018-09-14 17:37:46 -07:00
* iavf_fill_rss_lut - Fill the lut with default values
2016-04-12 08:30:44 -07:00
* @ adapter : board private structure
2015-10-27 16:15:06 -04:00
* */
2018-09-14 17:37:46 -07:00
static void iavf_fill_rss_lut ( struct iavf_adapter * adapter )
2015-10-27 16:15:06 -04:00
{
u16 i ;
2016-04-12 08:30:44 -07:00
for ( i = 0 ; i < adapter - > rss_lut_size ; i + + )
adapter - > rss_lut [ i ] = i % adapter - > num_active_queues ;
2015-06-23 19:00:04 -04:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_init_rss - Prepare for RSS
2015-06-23 19:00:04 -04:00
* @ adapter : board private structure
2015-10-27 16:15:06 -04:00
*
* Return 0 on success , negative on failure
2015-06-23 19:00:04 -04:00
* */
2018-09-14 17:37:46 -07:00
static int iavf_init_rss ( struct iavf_adapter * adapter )
2015-06-23 19:00:04 -04:00
{
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2015-10-27 16:15:06 -04:00
int ret ;
2015-06-23 19:00:04 -04:00
2016-04-12 08:30:44 -07:00
if ( ! RSS_PF ( adapter ) ) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
2017-06-29 15:12:24 +02:00
if ( adapter - > vf_res - > vf_cap_flags &
2017-05-11 11:23:11 -07:00
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 )
2018-09-14 17:37:55 -07:00
adapter - > hena = IAVF_DEFAULT_RSS_HENA_EXPANDED ;
2016-04-12 08:30:44 -07:00
else
2018-09-14 17:37:55 -07:00
adapter - > hena = IAVF_DEFAULT_RSS_HENA ;
2015-06-23 19:00:04 -04:00
2018-09-14 17:37:49 -07:00
wr32 ( hw , IAVF_VFQF_HENA ( 0 ) , ( u32 ) adapter - > hena ) ;
wr32 ( hw , IAVF_VFQF_HENA ( 1 ) , ( u32 ) ( adapter - > hena > > 32 ) ) ;
2016-04-12 08:30:44 -07:00
}
2015-10-26 19:44:34 -04:00
2018-09-14 17:37:46 -07:00
iavf_fill_rss_lut ( adapter ) ;
2016-04-12 08:30:44 -07:00
netdev_rss_key_fill ( ( void * ) adapter - > rss_key , adapter - > rss_key_size ) ;
2018-09-14 17:37:46 -07:00
ret = iavf_config_rss ( adapter ) ;
2015-10-27 16:15:06 -04:00
return ret ;
2015-06-23 19:00:04 -04:00
}
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_alloc_q_vectors - Allocate memory for interrupt vectors
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure to initialize
*
* We allocate one q_vector per queue interrupt . If allocation fails we
* return - ENOMEM .
* */
2018-09-14 17:37:46 -07:00
static int iavf_alloc_q_vectors ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
2015-10-26 19:44:39 -04:00
int q_idx = 0 , num_q_vectors ;
2018-09-14 17:37:55 -07:00
struct iavf_q_vector * q_vector ;
2013-12-21 06:12:45 +00:00
num_q_vectors = adapter - > num_msix_vectors - NONQ_VECS ;
2015-10-26 19:44:40 -04:00
adapter - > q_vectors = kcalloc ( num_q_vectors , sizeof ( * q_vector ) ,
2015-10-26 19:44:39 -04:00
GFP_KERNEL ) ;
if ( ! adapter - > q_vectors )
2016-03-01 16:02:15 -08:00
return - ENOMEM ;
2013-12-21 06:12:45 +00:00
for ( q_idx = 0 ; q_idx < num_q_vectors ; q_idx + + ) {
2015-10-26 19:44:39 -04:00
q_vector = & adapter - > q_vectors [ q_idx ] ;
2013-12-21 06:12:45 +00:00
q_vector - > adapter = adapter ;
q_vector - > vsi = & adapter - > vsi ;
q_vector - > v_idx = q_idx ;
2017-12-29 08:48:53 -05:00
q_vector - > reg_idx = q_idx ;
2017-07-14 09:10:10 -04:00
cpumask_copy ( & q_vector - > affinity_mask , cpu_possible_mask ) ;
2013-12-21 06:12:45 +00:00
netif_napi_add ( adapter - > netdev , & q_vector - > napi ,
2018-09-14 17:37:46 -07:00
iavf_napi_poll , NAPI_POLL_WEIGHT ) ;
2013-12-21 06:12:45 +00:00
}
return 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_free_q_vectors - Free memory allocated for interrupt vectors
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure to initialize
*
* This function frees the memory allocated to the q_vectors . In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector .
* */
2018-09-14 17:37:46 -07:00
static void iavf_free_q_vectors ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int q_idx , num_q_vectors ;
int napi_vectors ;
2016-11-08 13:05:08 -08:00
if ( ! adapter - > q_vectors )
return ;
2013-12-21 06:12:45 +00:00
num_q_vectors = adapter - > num_msix_vectors - NONQ_VECS ;
2014-10-25 03:24:34 +00:00
napi_vectors = adapter - > num_active_queues ;
2013-12-21 06:12:45 +00:00
for ( q_idx = 0 ; q_idx < num_q_vectors ; q_idx + + ) {
2018-09-14 17:37:55 -07:00
struct iavf_q_vector * q_vector = & adapter - > q_vectors [ q_idx ] ;
2018-09-14 17:37:47 -07:00
2013-12-21 06:12:45 +00:00
if ( q_idx < napi_vectors )
netif_napi_del ( & q_vector - > napi ) ;
}
2015-10-26 19:44:39 -04:00
kfree ( adapter - > q_vectors ) ;
2016-11-08 13:05:08 -08:00
adapter - > q_vectors = NULL ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_reset_interrupt_capability - Reset MSIX setup
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* */
2018-09-14 17:37:46 -07:00
void iavf_reset_interrupt_capability ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
2016-11-08 13:05:05 -08:00
if ( ! adapter - > msix_entries )
return ;
2013-12-21 06:12:45 +00:00
pci_disable_msix ( adapter - > pdev ) ;
kfree ( adapter - > msix_entries ) ;
adapter - > msix_entries = NULL ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_init_interrupt_scheme - Determine if MSIX is supported and init
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure to initialize
*
* */
2018-09-14 17:37:46 -07:00
int iavf_init_interrupt_scheme ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int err ;
2018-09-14 17:37:46 -07:00
err = iavf_alloc_queues ( adapter ) ;
2017-04-19 09:25:59 -04:00
if ( err ) {
dev_err ( & adapter - > pdev - > dev ,
" Unable to allocate memory for queues \n " ) ;
goto err_alloc_queues ;
}
2016-07-27 12:02:33 -07:00
rtnl_lock ( ) ;
2018-09-14 17:37:46 -07:00
err = iavf_set_interrupt_capability ( adapter ) ;
2016-07-27 12:02:33 -07:00
rtnl_unlock ( ) ;
2013-12-21 06:12:45 +00:00
if ( err ) {
dev_err ( & adapter - > pdev - > dev ,
" Unable to setup interrupt capabilities \n " ) ;
goto err_set_interrupt ;
}
2018-09-14 17:37:46 -07:00
err = iavf_alloc_q_vectors ( adapter ) ;
2013-12-21 06:12:45 +00:00
if ( err ) {
dev_err ( & adapter - > pdev - > dev ,
" Unable to allocate memory for queue vectors \n " ) ;
goto err_alloc_q_vectors ;
}
2018-01-23 08:50:59 -08:00
/* If we've made it so far while ADq flag being ON, then we haven't
* bailed out anywhere in middle . And ADq isn ' t just enabled but actual
* resources have been allocated in the reset path .
* Now we can truly claim that ADq is enabled .
*/
if ( ( adapter - > vf_res - > vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ) & &
adapter - > num_tc )
dev_info ( & adapter - > pdev - > dev , " ADq Enabled, %u TCs created " ,
adapter - > num_tc ) ;
2013-12-21 06:12:45 +00:00
dev_info ( & adapter - > pdev - > dev , " Multiqueue %s: Queue pair count = %u " ,
2014-11-11 20:02:42 +00:00
( adapter - > num_active_queues > 1 ) ? " Enabled " : " Disabled " ,
adapter - > num_active_queues ) ;
2013-12-21 06:12:45 +00:00
return 0 ;
err_alloc_q_vectors :
2018-09-14 17:37:46 -07:00
iavf_reset_interrupt_capability ( adapter ) ;
2013-12-21 06:12:45 +00:00
err_set_interrupt :
2018-09-14 17:37:46 -07:00
iavf_free_queues ( adapter ) ;
2017-04-19 09:25:59 -04:00
err_alloc_queues :
2013-12-21 06:12:45 +00:00
return err ;
}
2015-10-26 19:44:34 -04:00
/**
2018-09-14 17:37:46 -07:00
* iavf_free_rss - Free memory used by RSS structs
2016-04-12 08:30:44 -07:00
* @ adapter : board private structure
2015-10-26 19:44:34 -04:00
* */
2018-09-14 17:37:46 -07:00
static void iavf_free_rss ( struct iavf_adapter * adapter )
2015-10-26 19:44:34 -04:00
{
2016-04-12 08:30:44 -07:00
kfree ( adapter - > rss_key ) ;
adapter - > rss_key = NULL ;
2015-10-26 19:44:34 -04:00
2016-04-12 08:30:44 -07:00
kfree ( adapter - > rss_lut ) ;
adapter - > rss_lut = NULL ;
2015-10-26 19:44:34 -04:00
}
2017-08-22 06:57:50 -04:00
/**
2018-09-14 17:37:46 -07:00
* iavf_reinit_interrupt_scheme - Reallocate queues and vectors
2017-08-22 06:57:50 -04:00
* @ adapter : board private structure
*
* Returns 0 on success , negative on failure
* */
2018-09-14 17:37:46 -07:00
static int iavf_reinit_interrupt_scheme ( struct iavf_adapter * adapter )
2017-08-22 06:57:50 -04:00
{
struct net_device * netdev = adapter - > netdev ;
int err ;
if ( netif_running ( netdev ) )
2018-09-14 17:37:46 -07:00
iavf_free_traffic_irqs ( adapter ) ;
iavf_free_misc_irq ( adapter ) ;
iavf_reset_interrupt_capability ( adapter ) ;
iavf_free_q_vectors ( adapter ) ;
iavf_free_queues ( adapter ) ;
2017-08-22 06:57:50 -04:00
2018-09-14 17:37:46 -07:00
err = iavf_init_interrupt_scheme ( adapter ) ;
2017-08-22 06:57:50 -04:00
if ( err )
goto err ;
netif_tx_stop_all_queues ( netdev ) ;
2018-09-14 17:37:46 -07:00
err = iavf_request_misc_irq ( adapter ) ;
2017-08-22 06:57:50 -04:00
if ( err )
goto err ;
2018-09-14 17:37:55 -07:00
set_bit ( __IAVF_VSI_DOWN , adapter - > vsi . state ) ;
2017-08-22 06:57:50 -04:00
2018-09-14 17:37:46 -07:00
iavf_map_rings_to_vectors ( adapter ) ;
2017-08-22 06:57:50 -04:00
err :
return err ;
}
2013-12-21 06:12:45 +00:00
/**
2019-05-14 10:37:04 -07:00
* iavf_process_aq_command - process aq_required flags
* and sends aq command
* @ adapter : pointer to iavf adapter structure
*
* Returns 0 on success
* Returns error code if no command was sent
* or error code if the command failed .
2013-12-21 06:12:45 +00:00
* */
2019-05-14 10:37:04 -07:00
static int iavf_process_aq_command ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
2019-05-14 10:37:04 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_GET_CONFIG )
return iavf_send_vf_config_msg ( adapter ) ;
2021-11-29 16:16:00 -08:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS )
return iavf_send_vf_offload_vlan_v2_msg ( adapter ) ;
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES ) {
iavf_disable_queues ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2015-03-27 00:12:09 -07:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_MAP_VECTORS ) {
iavf_map_queues ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER ) {
iavf_add_ether_addrs ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER ) {
iavf_add_vlans ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER ) {
iavf_del_ether_addrs ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER ) {
iavf_del_vlans ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING ) {
iavf_enable_vlan_stripping ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2017-07-17 22:09:45 -07:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING ) {
iavf_disable_vlan_stripping ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2017-07-17 22:09:45 -07:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES ) {
iavf_configure_queues ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES ) {
iavf_enable_queues ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS ) {
2015-06-23 19:00:04 -04:00
/* This message goes straight to the firmware, not the
* PF , so we don ' t have to set current_op as we will
* not get a response through the ARQ .
*/
2018-09-14 17:37:46 -07:00
adapter - > aq_required & = ~ IAVF_FLAG_AQ_CONFIGURE_RSS ;
2019-05-14 10:37:04 -07:00
return 0 ;
2015-06-23 19:00:04 -04:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_GET_HENA ) {
iavf_get_hena ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2016-04-12 08:30:44 -07:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_SET_HENA ) {
iavf_set_hena ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2016-04-12 08:30:44 -07:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_SET_RSS_KEY ) {
iavf_set_rss_key ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2016-04-12 08:30:44 -07:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_SET_RSS_LUT ) {
iavf_set_rss_lut ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2016-04-12 08:30:44 -07:00
}
2015-06-23 19:00:04 -04:00
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC ) {
iavf_set_promiscuous ( adapter , FLAG_VF_UNICAST_PROMISC |
2017-05-11 11:23:16 -07:00
FLAG_VF_MULTICAST_PROMISC ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2016-04-12 08:30:52 -07:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI ) {
iavf_set_promiscuous ( adapter , FLAG_VF_MULTICAST_PROMISC ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2016-05-03 15:13:10 -07:00
}
2021-06-04 09:48:56 -07:00
if ( ( adapter - > aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC ) | |
2018-09-14 17:37:46 -07:00
( adapter - > aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI ) ) {
iavf_set_promiscuous ( adapter , 0 ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2016-04-12 08:30:52 -07:00
}
2018-01-23 08:50:57 -08:00
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS ) {
iavf_enable_channels ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2018-01-23 08:50:57 -08:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS ) {
iavf_disable_channels ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2018-01-23 08:50:57 -08:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER ) {
iavf_add_cloud_filter ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
2018-01-23 08:51:05 -08:00
}
2018-09-14 17:37:46 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER ) {
iavf_del_cloud_filter ( adapter ) ;
2019-05-14 10:37:04 -07:00
return 0 ;
}
2019-05-14 10:37:08 -07:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER ) {
iavf_del_cloud_filter ( adapter ) ;
return 0 ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER ) {
iavf_add_cloud_filter ( adapter ) ;
return 0 ;
}
2021-03-09 11:08:11 +08:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER ) {
iavf_add_fdir_filter ( adapter ) ;
return IAVF_SUCCESS ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER ) {
iavf_del_fdir_filter ( adapter ) ;
return IAVF_SUCCESS ;
}
2021-04-13 08:48:41 +08:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG ) {
iavf_add_adv_rss_cfg ( adapter ) ;
return 0 ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG ) {
iavf_del_adv_rss_cfg ( adapter ) ;
return 0 ;
}
2021-11-29 16:16:03 -08:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING ) {
iavf_disable_vlan_stripping_v2 ( adapter , ETH_P_8021Q ) ;
return 0 ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING ) {
iavf_disable_vlan_stripping_v2 ( adapter , ETH_P_8021AD ) ;
return 0 ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING ) {
iavf_enable_vlan_stripping_v2 ( adapter , ETH_P_8021Q ) ;
return 0 ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING ) {
iavf_enable_vlan_stripping_v2 ( adapter , ETH_P_8021AD ) ;
return 0 ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION ) {
iavf_disable_vlan_insertion_v2 ( adapter , ETH_P_8021Q ) ;
return 0 ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION ) {
iavf_disable_vlan_insertion_v2 ( adapter , ETH_P_8021AD ) ;
return 0 ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION ) {
iavf_enable_vlan_insertion_v2 ( adapter , ETH_P_8021Q ) ;
return 0 ;
}
if ( adapter - > aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION ) {
iavf_enable_vlan_insertion_v2 ( adapter , ETH_P_8021AD ) ;
return 0 ;
}
2021-09-15 09:01:00 +00:00
if ( adapter - > aq_required & IAVF_FLAG_AQ_REQUEST_STATS ) {
iavf_request_stats ( adapter ) ;
return 0 ;
}
2019-05-14 10:37:04 -07:00
return - EAGAIN ;
}
2021-11-29 16:16:03 -08:00
/**
* iavf_set_vlan_offload_features - set VLAN offload configuration
* @ adapter : board private structure
* @ prev_features : previous features used for comparison
* @ features : updated features used for configuration
*
* Set the aq_required bit ( s ) based on the requested features passed in to
* configure VLAN stripping and / or VLAN insertion if supported . Also , schedule
* the watchdog if any changes are requested to expedite the request via
* virtchnl .
* */
void
iavf_set_vlan_offload_features ( struct iavf_adapter * adapter ,
netdev_features_t prev_features ,
netdev_features_t features )
{
bool enable_stripping = true , enable_insertion = true ;
u16 vlan_ethertype = 0 ;
u64 aq_required = 0 ;
/* keep cases separate because one ethertype for offloads can be
* disabled at the same time as another is disabled , so check for an
* enabled ethertype first , then check for disabled . Default to
* ETH_P_8021Q so an ethertype is specified if disabling insertion and
* stripping .
*/
if ( features & ( NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX ) )
vlan_ethertype = ETH_P_8021AD ;
else if ( features & ( NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX ) )
vlan_ethertype = ETH_P_8021Q ;
else if ( prev_features & ( NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX ) )
vlan_ethertype = ETH_P_8021AD ;
else if ( prev_features & ( NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX ) )
vlan_ethertype = ETH_P_8021Q ;
else
vlan_ethertype = ETH_P_8021Q ;
if ( ! ( features & ( NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX ) ) )
enable_stripping = false ;
if ( ! ( features & ( NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX ) ) )
enable_insertion = false ;
if ( VLAN_ALLOWED ( adapter ) ) {
/* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
* stripping via virtchnl . VLAN insertion can be toggled on the
* netdev , but it doesn ' t require a virtchnl message
*/
if ( enable_stripping )
aq_required | = IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING ;
else
aq_required | = IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING ;
} else if ( VLAN_V2_ALLOWED ( adapter ) ) {
switch ( vlan_ethertype ) {
case ETH_P_8021Q :
if ( enable_stripping )
aq_required | = IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING ;
else
aq_required | = IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING ;
if ( enable_insertion )
aq_required | = IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION ;
else
aq_required | = IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION ;
break ;
case ETH_P_8021AD :
if ( enable_stripping )
aq_required | = IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING ;
else
aq_required | = IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING ;
if ( enable_insertion )
aq_required | = IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION ;
else
aq_required | = IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION ;
break ;
}
}
if ( aq_required ) {
adapter - > aq_required | = aq_required ;
mod_delayed_work ( iavf_wq , & adapter - > watchdog_task , 0 ) ;
}
}
2019-05-14 10:37:07 -07:00
/**
* iavf_startup - first step of driver startup
* @ adapter : board private structure
*
* Function process __IAVF_STARTUP driver state .
* When success the state is changed to __IAVF_INIT_VERSION_CHECK
2021-08-19 08:47:49 +00:00
* when fails the state is changed to __IAVF_INIT_FAILED
2019-05-14 10:37:07 -07:00
* */
2021-08-19 08:47:49 +00:00
static void iavf_startup ( struct iavf_adapter * adapter )
2019-05-14 10:37:07 -07:00
{
struct pci_dev * pdev = adapter - > pdev ;
struct iavf_hw * hw = & adapter - > hw ;
int err ;
WARN_ON ( adapter - > state ! = __IAVF_STARTUP ) ;
/* driver loaded, probe complete */
adapter - > flags & = ~ IAVF_FLAG_PF_COMMS_FAILED ;
adapter - > flags & = ~ IAVF_FLAG_RESET_PENDING ;
err = iavf_set_mac_type ( hw ) ;
if ( err ) {
dev_err ( & pdev - > dev , " Failed to set MAC type (%d) \n " , err ) ;
goto err ;
}
err = iavf_check_reset_complete ( hw ) ;
if ( err ) {
dev_info ( & pdev - > dev , " Device is still in reset (%d), retrying \n " ,
err ) ;
goto err ;
}
hw - > aq . num_arq_entries = IAVF_AQ_LEN ;
hw - > aq . num_asq_entries = IAVF_AQ_LEN ;
hw - > aq . arq_buf_size = IAVF_MAX_AQ_BUF_SIZE ;
hw - > aq . asq_buf_size = IAVF_MAX_AQ_BUF_SIZE ;
err = iavf_init_adminq ( hw ) ;
if ( err ) {
dev_err ( & pdev - > dev , " Failed to init Admin Queue (%d) \n " , err ) ;
goto err ;
}
err = iavf_send_api_ver ( adapter ) ;
if ( err ) {
dev_err ( & pdev - > dev , " Unable to send to PF (%d) \n " , err ) ;
iavf_shutdown_adminq ( hw ) ;
goto err ;
}
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_INIT_VERSION_CHECK ) ;
2021-08-19 08:47:49 +00:00
return ;
2019-05-14 10:37:07 -07:00
err :
2021-08-19 08:47:49 +00:00
iavf_change_state ( adapter , __IAVF_INIT_FAILED ) ;
2019-05-14 10:37:07 -07:00
}
/**
* iavf_init_version_check - second step of driver startup
* @ adapter : board private structure
*
* Function process __IAVF_INIT_VERSION_CHECK driver state .
* When success the state is changed to __IAVF_INIT_GET_RESOURCES
2021-08-19 08:47:49 +00:00
* when fails the state is changed to __IAVF_INIT_FAILED
2019-05-14 10:37:07 -07:00
* */
2021-08-19 08:47:49 +00:00
static void iavf_init_version_check ( struct iavf_adapter * adapter )
2019-05-14 10:37:07 -07:00
{
struct pci_dev * pdev = adapter - > pdev ;
struct iavf_hw * hw = & adapter - > hw ;
int err = - EAGAIN ;
WARN_ON ( adapter - > state ! = __IAVF_INIT_VERSION_CHECK ) ;
if ( ! iavf_asq_done ( hw ) ) {
dev_err ( & pdev - > dev , " Admin queue command never completed \n " ) ;
iavf_shutdown_adminq ( hw ) ;
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_STARTUP ) ;
2019-05-14 10:37:07 -07:00
goto err ;
}
/* aq msg sent, awaiting reply */
err = iavf_verify_api_ver ( adapter ) ;
if ( err ) {
if ( err = = IAVF_ERR_ADMIN_QUEUE_NO_WORK )
err = iavf_send_api_ver ( adapter ) ;
else
dev_err ( & pdev - > dev , " Unsupported PF API version %d.%d, expected %d.%d \n " ,
adapter - > pf_version . major ,
adapter - > pf_version . minor ,
VIRTCHNL_VERSION_MAJOR ,
VIRTCHNL_VERSION_MINOR ) ;
goto err ;
}
err = iavf_send_vf_config_msg ( adapter ) ;
if ( err ) {
dev_err ( & pdev - > dev , " Unable to send config request (%d) \n " ,
err ) ;
goto err ;
}
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_INIT_GET_RESOURCES ) ;
2021-08-19 08:47:49 +00:00
return ;
2019-05-14 10:37:07 -07:00
err :
2021-08-19 08:47:49 +00:00
iavf_change_state ( adapter , __IAVF_INIT_FAILED ) ;
2019-05-14 10:37:07 -07:00
}
2021-11-29 16:16:00 -08:00
/**
* iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
* @ adapter : board private structure
*/
int iavf_parse_vf_resource_msg ( struct iavf_adapter * adapter )
{
int i , num_req_queues = adapter - > num_req_queues ;
struct iavf_vsi * vsi = & adapter - > vsi ;
for ( i = 0 ; i < adapter - > vf_res - > num_vsis ; i + + ) {
if ( adapter - > vf_res - > vsi_res [ i ] . vsi_type = = VIRTCHNL_VSI_SRIOV )
adapter - > vsi_res = & adapter - > vf_res - > vsi_res [ i ] ;
}
if ( ! adapter - > vsi_res ) {
dev_err ( & adapter - > pdev - > dev , " No LAN VSI found \n " ) ;
return - ENODEV ;
}
if ( num_req_queues & &
num_req_queues > adapter - > vsi_res - > num_queue_pairs ) {
/* Problem. The PF gave us fewer queues than what we had
* negotiated in our request . Need a reset to see if we can ' t
* get back to a working state .
*/
dev_err ( & adapter - > pdev - > dev ,
" Requested %d queues, but PF only gave us %d. \n " ,
num_req_queues ,
adapter - > vsi_res - > num_queue_pairs ) ;
adapter - > flags | = IAVF_FLAG_REINIT_ITR_NEEDED ;
adapter - > num_req_queues = adapter - > vsi_res - > num_queue_pairs ;
iavf_schedule_reset ( adapter ) ;
return - EAGAIN ;
}
adapter - > num_req_queues = 0 ;
adapter - > vsi . id = adapter - > vsi_res - > vsi_id ;
adapter - > vsi . back = adapter ;
adapter - > vsi . base_vector = 1 ;
adapter - > vsi . work_limit = IAVF_DEFAULT_IRQ_WORK ;
vsi - > netdev = adapter - > netdev ;
vsi - > qs_handle = adapter - > vsi_res - > qset_handle ;
if ( adapter - > vf_res - > vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF ) {
adapter - > rss_key_size = adapter - > vf_res - > rss_key_size ;
adapter - > rss_lut_size = adapter - > vf_res - > rss_lut_size ;
} else {
adapter - > rss_key_size = IAVF_HKEY_ARRAY_SIZE ;
adapter - > rss_lut_size = IAVF_HLUT_ARRAY_SIZE ;
}
return 0 ;
}
2019-05-14 10:37:07 -07:00
/**
* iavf_init_get_resources - third step of driver startup
* @ adapter : board private structure
*
* Function process __IAVF_INIT_GET_RESOURCES driver state and
* finishes driver initialization procedure .
* When success the state is changed to __IAVF_DOWN
2021-08-19 08:47:49 +00:00
* when fails the state is changed to __IAVF_INIT_FAILED
2019-05-14 10:37:07 -07:00
* */
2021-08-19 08:47:49 +00:00
static void iavf_init_get_resources ( struct iavf_adapter * adapter )
2019-05-14 10:37:07 -07:00
{
struct pci_dev * pdev = adapter - > pdev ;
struct iavf_hw * hw = & adapter - > hw ;
2020-06-05 10:09:43 -07:00
int err ;
2019-05-14 10:37:07 -07:00
WARN_ON ( adapter - > state ! = __IAVF_INIT_GET_RESOURCES ) ;
/* aq msg sent, awaiting reply */
if ( ! adapter - > vf_res ) {
2020-06-05 10:09:43 -07:00
adapter - > vf_res = kzalloc ( IAVF_VIRTCHNL_VF_RESOURCE_SIZE ,
GFP_KERNEL ) ;
if ( ! adapter - > vf_res ) {
err = - ENOMEM ;
2019-05-14 10:37:07 -07:00
goto err ;
2020-06-05 10:09:43 -07:00
}
2019-05-14 10:37:07 -07:00
}
err = iavf_get_vf_config ( adapter ) ;
if ( err = = IAVF_ERR_ADMIN_QUEUE_NO_WORK ) {
err = iavf_send_vf_config_msg ( adapter ) ;
2021-11-29 16:16:00 -08:00
goto err_alloc ;
2019-05-14 10:37:07 -07:00
} else if ( err = = IAVF_ERR_PARAM ) {
/* We only get ERR_PARAM if the device is in a very bad
* state or if we ' ve been disabled for previous bad
* behavior . Either way , we ' re done now .
*/
iavf_shutdown_adminq ( hw ) ;
dev_err ( & pdev - > dev , " Unable to get VF config due to PF error condition, not retrying \n " ) ;
2021-08-19 08:47:49 +00:00
return ;
2019-05-14 10:37:07 -07:00
}
if ( err ) {
dev_err ( & pdev - > dev , " Unable to get VF config (%d) \n " , err ) ;
goto err_alloc ;
}
2021-11-29 16:16:00 -08:00
err = iavf_parse_vf_resource_msg ( adapter ) ;
2021-03-04 19:10:10 -08:00
if ( err )
2019-05-14 10:37:07 -07:00
goto err_alloc ;
2021-11-29 16:16:00 -08:00
err = iavf_send_vf_offload_vlan_v2_msg ( adapter ) ;
if ( err = = - EOPNOTSUPP ) {
/* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so
* go directly to finishing initialization
*/
iavf_change_state ( adapter , __IAVF_INIT_CONFIG_ADAPTER ) ;
return ;
} else if ( err ) {
dev_err ( & pdev - > dev , " Unable to send offload vlan v2 request (%d) \n " ,
err ) ;
goto err_alloc ;
}
/* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the
* state accordingly
*/
iavf_change_state ( adapter , __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS ) ;
return ;
err_alloc :
kfree ( adapter - > vf_res ) ;
adapter - > vf_res = NULL ;
err :
iavf_change_state ( adapter , __IAVF_INIT_FAILED ) ;
}
/**
* iavf_init_get_offload_vlan_v2_caps - part of driver startup
* @ adapter : board private structure
*
* Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the
* VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2 . If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is
* not negotiated , then this state will never be entered .
* */
static void iavf_init_get_offload_vlan_v2_caps ( struct iavf_adapter * adapter )
{
int ret ;
WARN_ON ( adapter - > state ! = __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS ) ;
memset ( & adapter - > vlan_v2_caps , 0 , sizeof ( adapter - > vlan_v2_caps ) ) ;
ret = iavf_get_vf_vlan_v2_caps ( adapter ) ;
if ( ret ) {
if ( ret = = IAVF_ERR_ADMIN_QUEUE_NO_WORK )
iavf_send_vf_offload_vlan_v2_msg ( adapter ) ;
goto err ;
}
iavf_change_state ( adapter , __IAVF_INIT_CONFIG_ADAPTER ) ;
return ;
err :
iavf_change_state ( adapter , __IAVF_INIT_FAILED ) ;
}
/**
* iavf_init_config_adapter - last part of driver startup
* @ adapter : board private structure
*
* After all the supported capabilities are negotiated , then the
* __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization .
*/
static void iavf_init_config_adapter ( struct iavf_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
struct pci_dev * pdev = adapter - > pdev ;
int err ;
WARN_ON ( adapter - > state ! = __IAVF_INIT_CONFIG_ADAPTER ) ;
if ( iavf_process_config ( adapter ) )
goto err ;
2019-05-14 10:37:07 -07:00
adapter - > current_op = VIRTCHNL_OP_UNKNOWN ;
adapter - > flags | = IAVF_FLAG_RX_CSUM_ENABLED ;
netdev - > netdev_ops = & iavf_netdev_ops ;
iavf_set_ethtool_ops ( netdev ) ;
netdev - > watchdog_timeo = 5 * HZ ;
/* MTU range: 68 - 9710 */
netdev - > min_mtu = ETH_MIN_MTU ;
netdev - > max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD ;
if ( ! is_valid_ether_addr ( adapter - > hw . mac . addr ) ) {
dev_info ( & pdev - > dev , " Invalid MAC address %pM, using random \n " ,
adapter - > hw . mac . addr ) ;
eth_hw_addr_random ( netdev ) ;
ether_addr_copy ( adapter - > hw . mac . addr , netdev - > dev_addr ) ;
} else {
2021-10-01 14:32:23 -07:00
eth_hw_addr_set ( netdev , adapter - > hw . mac . addr ) ;
2019-05-14 10:37:07 -07:00
ether_addr_copy ( netdev - > perm_addr , adapter - > hw . mac . addr ) ;
}
adapter - > tx_desc_count = IAVF_DEFAULT_TXD ;
adapter - > rx_desc_count = IAVF_DEFAULT_RXD ;
err = iavf_init_interrupt_scheme ( adapter ) ;
if ( err )
goto err_sw_init ;
iavf_map_rings_to_vectors ( adapter ) ;
if ( adapter - > vf_res - > vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR )
adapter - > flags | = IAVF_FLAG_WB_ON_ITR_CAPABLE ;
err = iavf_request_misc_irq ( adapter ) ;
if ( err )
goto err_sw_init ;
netif_carrier_off ( netdev ) ;
adapter - > link_up = false ;
/* set the semaphore to prevent any callbacks after device registration
* up to time when state of driver will be set to __IAVF_DOWN
*/
rtnl_lock ( ) ;
if ( ! adapter - > netdev_registered ) {
err = register_netdevice ( netdev ) ;
if ( err ) {
rtnl_unlock ( ) ;
goto err_register ;
}
}
adapter - > netdev_registered = true ;
netif_tx_stop_all_queues ( netdev ) ;
if ( CLIENT_ALLOWED ( adapter ) ) {
err = iavf_lan_add_device ( adapter ) ;
2020-12-02 18:18:06 -08:00
if ( err )
2019-05-14 10:37:07 -07:00
dev_info ( & pdev - > dev , " Failed to add VF to client API service list: %d \n " ,
err ) ;
}
dev_info ( & pdev - > dev , " MAC address: %pM \n " , adapter - > hw . mac . addr ) ;
if ( netdev - > features & NETIF_F_GRO )
dev_info ( & pdev - > dev , " GRO is enabled \n " ) ;
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_DOWN ) ;
2019-05-14 10:37:07 -07:00
set_bit ( __IAVF_VSI_DOWN , adapter - > vsi . state ) ;
rtnl_unlock ( ) ;
iavf_misc_irq_enable ( adapter ) ;
wake_up ( & adapter - > down_waitqueue ) ;
adapter - > rss_key = kzalloc ( adapter - > rss_key_size , GFP_KERNEL ) ;
adapter - > rss_lut = kzalloc ( adapter - > rss_lut_size , GFP_KERNEL ) ;
2020-06-18 14:19:53 +00:00
if ( ! adapter - > rss_key | | ! adapter - > rss_lut ) {
err = - ENOMEM ;
2019-05-14 10:37:07 -07:00
goto err_mem ;
2020-06-18 14:19:53 +00:00
}
2019-05-14 10:37:07 -07:00
if ( RSS_AQ ( adapter ) )
adapter - > aq_required | = IAVF_FLAG_AQ_CONFIGURE_RSS ;
else
iavf_init_rss ( adapter ) ;
2021-11-29 16:16:03 -08:00
if ( VLAN_V2_ALLOWED ( adapter ) )
/* request initial VLAN offload settings */
iavf_set_vlan_offload_features ( adapter , 0 , netdev - > features ) ;
2021-08-19 08:47:49 +00:00
return ;
2019-05-14 10:37:07 -07:00
err_mem :
iavf_free_rss ( adapter ) ;
err_register :
iavf_free_misc_irq ( adapter ) ;
err_sw_init :
iavf_reset_interrupt_capability ( adapter ) ;
err :
2021-08-19 08:47:49 +00:00
iavf_change_state ( adapter , __IAVF_INIT_FAILED ) ;
2019-05-14 10:37:07 -07:00
}
2019-05-14 10:37:04 -07:00
/**
* iavf_watchdog_task - Periodic call - back task
* @ work : pointer to work_struct
* */
static void iavf_watchdog_task ( struct work_struct * work )
{
struct iavf_adapter * adapter = container_of ( work ,
struct iavf_adapter ,
2019-05-14 10:37:05 -07:00
watchdog_task . work ) ;
2019-05-14 10:37:04 -07:00
struct iavf_hw * hw = & adapter - > hw ;
u32 reg_val ;
2021-08-04 10:22:24 +02:00
if ( ! mutex_trylock ( & adapter - > crit_lock ) )
2019-05-14 10:37:04 -07:00
goto restart_watchdog ;
2019-05-14 10:37:06 -07:00
if ( adapter - > flags & IAVF_FLAG_PF_COMMS_FAILED )
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_COMM_FAILED ) ;
2019-05-14 10:37:06 -07:00
2021-08-19 08:47:58 +00:00
if ( adapter - > flags & IAVF_FLAG_RESET_NEEDED & &
adapter - > state ! = __IAVF_RESETTING ) {
iavf_change_state ( adapter , __IAVF_RESETTING ) ;
adapter - > aq_required = 0 ;
adapter - > current_op = VIRTCHNL_OP_UNKNOWN ;
}
2019-05-14 10:37:06 -07:00
switch ( adapter - > state ) {
2021-08-19 08:47:58 +00:00
case __IAVF_STARTUP :
iavf_startup ( adapter ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task ,
msecs_to_jiffies ( 30 ) ) ;
return ;
case __IAVF_INIT_VERSION_CHECK :
iavf_init_version_check ( adapter ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task ,
msecs_to_jiffies ( 30 ) ) ;
return ;
case __IAVF_INIT_GET_RESOURCES :
iavf_init_get_resources ( adapter ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task ,
msecs_to_jiffies ( 1 ) ) ;
return ;
2021-11-29 16:16:00 -08:00
case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS :
iavf_init_get_offload_vlan_v2_caps ( adapter ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task ,
msecs_to_jiffies ( 1 ) ) ;
return ;
case __IAVF_INIT_CONFIG_ADAPTER :
iavf_init_config_adapter ( adapter ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task ,
msecs_to_jiffies ( 1 ) ) ;
return ;
2021-08-19 08:47:58 +00:00
case __IAVF_INIT_FAILED :
if ( + + adapter - > aq_wait_count > IAVF_AQ_MAX_ERR ) {
dev_err ( & adapter - > pdev - > dev ,
" Failed to communicate with PF; waiting before retry \n " ) ;
adapter - > flags | = IAVF_FLAG_PF_COMMS_FAILED ;
iavf_shutdown_adminq ( hw ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
queue_delayed_work ( iavf_wq ,
& adapter - > watchdog_task , ( 5 * HZ ) ) ;
return ;
}
/* Try again from failed step*/
iavf_change_state ( adapter , adapter - > last_state ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task , HZ ) ;
return ;
2019-05-14 10:37:06 -07:00
case __IAVF_COMM_FAILED :
2019-05-14 10:37:04 -07:00
reg_val = rd32 ( hw , IAVF_VFGEN_RSTAT ) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK ;
if ( reg_val = = VIRTCHNL_VFR_VFACTIVE | |
reg_val = = VIRTCHNL_VFR_COMPLETED ) {
/* A chance for redemption! */
2019-05-14 10:37:06 -07:00
dev_err ( & adapter - > pdev - > dev ,
" Hardware came out of reset. Attempting reinit. \n " ) ;
2021-08-19 08:47:58 +00:00
/* When init task contacts the PF and
2019-05-14 10:37:04 -07:00
* gets everything set up again , it ' ll restart the
* watchdog for us . Down , boy . Sit . Stay . Woof .
*/
2021-08-19 08:47:58 +00:00
iavf_change_state ( adapter , __IAVF_STARTUP ) ;
adapter - > flags & = ~ IAVF_FLAG_PF_COMMS_FAILED ;
2019-05-14 10:37:04 -07:00
}
adapter - > aq_required = 0 ;
adapter - > current_op = VIRTCHNL_OP_UNKNOWN ;
2021-11-10 11:13:50 +03:00
mutex_unlock ( & adapter - > crit_lock ) ;
2019-05-14 10:37:06 -07:00
queue_delayed_work ( iavf_wq ,
& adapter - > watchdog_task ,
msecs_to_jiffies ( 10 ) ) ;
2021-08-19 08:47:58 +00:00
return ;
2019-05-14 10:37:06 -07:00
case __IAVF_RESETTING :
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
2019-05-14 10:37:06 -07:00
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task , HZ * 2 ) ;
return ;
case __IAVF_DOWN :
case __IAVF_DOWN_PENDING :
case __IAVF_TESTING :
case __IAVF_RUNNING :
if ( adapter - > current_op ) {
if ( ! iavf_asq_done ( hw ) ) {
dev_dbg ( & adapter - > pdev - > dev ,
" Admin queue timeout \n " ) ;
iavf_send_api_ver ( adapter ) ;
}
} else {
2021-11-29 16:16:00 -08:00
int ret = iavf_process_aq_command ( adapter ) ;
2020-06-24 09:04:22 -07:00
/* An error will be returned if no commands were
* processed ; use this opportunity to update stats
2021-11-29 16:16:00 -08:00
* if the error isn ' t - ENOTSUPP
2020-06-24 09:04:22 -07:00
*/
2021-11-29 16:16:00 -08:00
if ( ret & & ret ! = - EOPNOTSUPP & &
2019-05-14 10:37:06 -07:00
adapter - > state = = __IAVF_RUNNING )
iavf_request_stats ( adapter ) ;
}
2021-08-19 08:47:58 +00:00
if ( adapter - > state = = __IAVF_RUNNING )
iavf_detect_recover_hung ( & adapter - > vsi ) ;
2019-05-14 10:37:06 -07:00
break ;
case __IAVF_REMOVE :
default :
2021-11-10 11:13:50 +03:00
mutex_unlock ( & adapter - > crit_lock ) ;
2021-08-19 08:47:58 +00:00
return ;
2018-01-23 08:51:05 -08:00
}
2021-08-19 08:47:40 +00:00
/* check for hw reset */
2019-05-14 10:37:04 -07:00
reg_val = rd32 ( hw , IAVF_VF_ARQLEN1 ) & IAVF_VF_ARQLEN1_ARQENABLE_MASK ;
2019-05-14 10:37:06 -07:00
if ( ! reg_val ) {
2019-05-14 10:37:04 -07:00
adapter - > flags | = IAVF_FLAG_RESET_PENDING ;
adapter - > aq_required = 0 ;
adapter - > current_op = VIRTCHNL_OP_UNKNOWN ;
2019-05-14 10:37:06 -07:00
dev_err ( & adapter - > pdev - > dev , " Hardware reset detected \n " ) ;
queue_work ( iavf_wq , & adapter - > reset_task ) ;
2021-08-19 08:47:58 +00:00
mutex_unlock ( & adapter - > crit_lock ) ;
queue_delayed_work ( iavf_wq ,
& adapter - > watchdog_task , HZ * 2 ) ;
return ;
2019-05-14 10:37:04 -07:00
}
schedule_delayed_work ( & adapter - > client_task , msecs_to_jiffies ( 5 ) ) ;
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
restart_watchdog :
2021-08-19 08:47:58 +00:00
queue_work ( iavf_wq , & adapter - > adminq_task ) ;
2013-12-21 06:12:45 +00:00
if ( adapter - > aq_required )
2019-05-14 10:37:05 -07:00
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task ,
msecs_to_jiffies ( 20 ) ) ;
2013-12-21 06:12:45 +00:00
else
2019-05-14 10:37:05 -07:00
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task , HZ * 2 ) ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
static void iavf_disable_vf ( struct iavf_adapter * adapter )
2016-11-01 15:35:14 -07:00
{
2018-09-14 17:37:46 -07:00
struct iavf_mac_filter * f , * ftmp ;
struct iavf_vlan_filter * fv , * fvtmp ;
struct iavf_cloud_filter * cf , * cftmp ;
2016-11-01 15:35:14 -07:00
2018-09-14 17:37:46 -07:00
adapter - > flags | = IAVF_FLAG_PF_COMMS_FAILED ;
2016-11-01 15:35:14 -07:00
2017-10-27 11:06:49 -04:00
/* We don't use netif_running() because it may be true prior to
* ndo_open ( ) returning , so we can ' t assume it means all our open
* tasks have finished , since we ' re not holding the rtnl_lock here .
*/
2018-09-14 17:37:46 -07:00
if ( adapter - > state = = __IAVF_RUNNING ) {
2018-09-14 17:37:55 -07:00
set_bit ( __IAVF_VSI_DOWN , adapter - > vsi . state ) ;
2016-11-01 15:35:14 -07:00
netif_carrier_off ( adapter - > netdev ) ;
netif_tx_disable ( adapter - > netdev ) ;
adapter - > link_up = false ;
2018-09-14 17:37:46 -07:00
iavf_napi_disable_all ( adapter ) ;
iavf_irq_disable ( adapter ) ;
iavf_free_traffic_irqs ( adapter ) ;
iavf_free_all_tx_resources ( adapter ) ;
iavf_free_all_rx_resources ( adapter ) ;
2016-11-01 15:35:14 -07:00
}
2017-10-27 11:06:50 -04:00
spin_lock_bh ( & adapter - > mac_vlan_list_lock ) ;
2018-01-23 08:51:05 -08:00
/* Delete all of the filters */
2016-11-01 15:35:14 -07:00
list_for_each_entry_safe ( f , ftmp , & adapter - > mac_filter_list , list ) {
list_del ( & f - > list ) ;
kfree ( f ) ;
}
list_for_each_entry_safe ( fv , fvtmp , & adapter - > vlan_filter_list , list ) {
list_del ( & fv - > list ) ;
kfree ( fv ) ;
}
2017-10-27 11:06:50 -04:00
spin_unlock_bh ( & adapter - > mac_vlan_list_lock ) ;
2018-01-23 08:51:05 -08:00
spin_lock_bh ( & adapter - > cloud_filter_list_lock ) ;
list_for_each_entry_safe ( cf , cftmp , & adapter - > cloud_filter_list , list ) {
list_del ( & cf - > list ) ;
kfree ( cf ) ;
adapter - > num_cloud_filters - - ;
}
spin_unlock_bh ( & adapter - > cloud_filter_list_lock ) ;
2018-09-14 17:37:46 -07:00
iavf_free_misc_irq ( adapter ) ;
iavf_reset_interrupt_capability ( adapter ) ;
iavf_free_q_vectors ( adapter ) ;
2021-06-04 09:48:54 -07:00
iavf_free_queues ( adapter ) ;
2020-06-05 10:09:43 -07:00
memset ( adapter - > vf_res , 0 , IAVF_VIRTCHNL_VF_RESOURCE_SIZE ) ;
2018-09-14 17:37:46 -07:00
iavf_shutdown_adminq ( & adapter - > hw ) ;
2016-11-01 15:35:14 -07:00
adapter - > netdev - > flags & = ~ IFF_UP ;
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
2018-09-14 17:37:46 -07:00
adapter - > flags & = ~ IAVF_FLAG_RESET_PENDING ;
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_DOWN ) ;
2017-06-23 04:24:44 -04:00
wake_up ( & adapter - > down_waitqueue ) ;
2016-11-01 15:35:14 -07:00
dev_info ( & adapter - > pdev - > dev , " Reset task did not complete, VF disabled \n " ) ;
}
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_reset_task - Call - back task to handle hardware reset
2013-12-21 06:12:45 +00:00
* @ work : pointer to work_struct
*
* During reset we need to shut down and reinitialize the admin queue
* before we can use it to communicate with the PF again . We also clear
* and reinit the rings because that context is lost as well .
* */
2018-09-14 17:37:46 -07:00
static void iavf_reset_task ( struct work_struct * work )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = container_of ( work ,
struct iavf_adapter ,
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
reset_task ) ;
2018-01-23 08:51:05 -08:00
struct virtchnl_vf_resource * vfres = adapter - > vf_res ;
2015-01-29 07:17:19 +00:00
struct net_device * netdev = adapter - > netdev ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2019-12-17 11:29:23 +01:00
struct iavf_mac_filter * f , * ftmp ;
2018-09-14 17:37:46 -07:00
struct iavf_cloud_filter * cf ;
2015-08-28 17:55:53 -04:00
u32 reg_val ;
2015-01-29 07:17:19 +00:00
int i = 0 , err ;
2017-10-27 11:06:49 -04:00
bool running ;
2013-12-21 06:12:45 +00:00
2017-12-18 05:16:43 -05:00
/* When device is being removed it doesn't make sense to run the reset
* task , just return in such a case .
*/
2021-08-04 10:22:24 +02:00
if ( mutex_is_locked ( & adapter - > remove_lock ) )
2017-12-18 05:16:43 -05:00
return ;
2021-08-04 10:22:24 +02:00
if ( iavf_lock_timeout ( & adapter - > crit_lock , 200 ) ) {
2021-03-16 11:01:41 +01:00
schedule_work ( & adapter - > reset_task ) ;
return ;
}
2021-08-04 10:22:24 +02:00
while ( ! mutex_trylock ( & adapter - > client_lock ) )
2014-09-13 07:40:44 +00:00
usleep_range ( 500 , 1000 ) ;
2017-01-24 10:23:59 -08:00
if ( CLIENT_ENABLED ( adapter ) ) {
2018-09-14 17:37:46 -07:00
adapter - > flags & = ~ ( IAVF_FLAG_CLIENT_NEEDS_OPEN |
IAVF_FLAG_CLIENT_NEEDS_CLOSE |
IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
IAVF_FLAG_SERVICE_CLIENT_REQUESTED ) ;
2017-01-24 10:23:59 -08:00
cancel_delayed_work_sync ( & adapter - > client_task ) ;
2018-09-14 17:37:46 -07:00
iavf_notify_client_close ( & adapter - > vsi , true ) ;
2017-01-24 10:23:59 -08:00
}
2018-09-14 17:37:46 -07:00
iavf_misc_irq_disable ( adapter ) ;
if ( adapter - > flags & IAVF_FLAG_RESET_NEEDED ) {
adapter - > flags & = ~ IAVF_FLAG_RESET_NEEDED ;
2015-06-19 08:56:30 -07:00
/* Restart the AQ here. If we have been reset but didn't
* detect it , or if the PF had to reinit , our AQ will be hosed .
*/
2018-09-14 17:37:46 -07:00
iavf_shutdown_adminq ( hw ) ;
iavf_init_adminq ( hw ) ;
iavf_request_reset ( adapter ) ;
2014-03-06 08:59:56 +00:00
}
2018-09-14 17:37:46 -07:00
adapter - > flags | = IAVF_FLAG_RESET_PENDING ;
2014-03-06 08:59:56 +00:00
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
/* poll until we see the reset actually happen */
2020-06-05 10:09:46 -07:00
for ( i = 0 ; i < IAVF_RESET_WAIT_DETECTED_COUNT ; i + + ) {
2018-09-14 17:37:49 -07:00
reg_val = rd32 ( hw , IAVF_VF_ARQLEN1 ) &
IAVF_VF_ARQLEN1_ARQENABLE_MASK ;
2015-08-28 17:55:53 -04:00
if ( ! reg_val )
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
break ;
2015-08-28 17:55:53 -04:00
usleep_range ( 5000 , 10000 ) ;
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
}
2020-06-05 10:09:46 -07:00
if ( i = = IAVF_RESET_WAIT_DETECTED_COUNT ) {
2015-06-19 08:56:30 -07:00
dev_info ( & adapter - > pdev - > dev , " Never saw reset \n " ) ;
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
goto continue_reset ; /* act like the reset happened */
}
2013-12-21 06:12:45 +00:00
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
/* wait until the reset is complete and the PF is responding to us */
2020-06-05 10:09:46 -07:00
for ( i = 0 ; i < IAVF_RESET_WAIT_COMPLETE_COUNT ; i + + ) {
2016-10-05 09:30:45 -07:00
/* sleep first to make sure a minimum wait time is met */
2018-09-14 17:37:46 -07:00
msleep ( IAVF_RESET_WAIT_MS ) ;
2016-10-05 09:30:45 -07:00
2018-09-14 17:37:49 -07:00
reg_val = rd32 ( hw , IAVF_VFGEN_RSTAT ) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK ;
2017-05-11 11:23:11 -07:00
if ( reg_val = = VIRTCHNL_VFR_VFACTIVE )
2013-12-21 06:12:45 +00:00
break ;
}
2016-10-05 09:30:45 -07:00
2015-12-23 12:05:52 -08:00
pci_set_master ( adapter - > pdev ) ;
2021-06-04 09:53:28 -07:00
pci_restore_msi_state ( adapter - > pdev ) ;
2016-10-05 09:30:45 -07:00
2020-06-05 10:09:46 -07:00
if ( i = = IAVF_RESET_WAIT_COMPLETE_COUNT ) {
2014-05-10 04:49:06 +00:00
dev_err ( & adapter - > pdev - > dev , " Reset never finished (%x) \n " ,
2015-08-28 17:55:53 -04:00
reg_val ) ;
2018-09-14 17:37:46 -07:00
iavf_disable_vf ( adapter ) ;
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > client_lock ) ;
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
return ; /* Do not attempt to reinit. It's dead, Jim. */
2013-12-21 06:12:45 +00:00
}
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
continue_reset :
2017-10-27 11:06:49 -04:00
/* We don't use netif_running() because it may be true prior to
* ndo_open ( ) returning , so we can ' t assume it means all our open
* tasks have finished , since we ' re not holding the rtnl_lock here .
*/
2018-09-14 17:37:46 -07:00
running = ( ( adapter - > state = = __IAVF_RUNNING ) | |
( adapter - > state = = __IAVF_RESETTING ) ) ;
2017-10-27 11:06:49 -04:00
if ( running ) {
2021-09-07 09:25:40 +00:00
netdev - > flags & = ~ IFF_UP ;
2015-02-27 09:18:31 +00:00
netif_carrier_off ( netdev ) ;
2015-06-19 08:56:30 -07:00
netif_tx_stop_all_queues ( netdev ) ;
i40evf: Fix link state event handling
Currently disabling the link state from PF via
ip link set enp5s0f0 vf 0 state disable
doesn't disable the CARRIER on the VF.
This patch updates the carrier and starts/stops the tx queues based on the
link state notification from PF.
PF: enp5s0f0, VF: enp5s2
#modprobe i40e
#echo 2 > /sys/class/net/enp5s0f0/device/sriov_numvfs
#ip link set enp5s2 up
#ip -d link show enp5s2
175: enp5s2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether ea:4d:60:bc:6f:85 brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64
#ip link set enp5s0f0 vf 0 state disable
#ip -d link show enp5s0f0
171: enp5s0f0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 68:05:ca:2e:72:68 brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64 numtxqueues 72 numrxqueues 72 portid 6805ca2e7268
vf 0 MAC 00:00:00:00:00:00, spoof checking on, link-state disable, trust off
vf 1 MAC 00:00:00:00:00:00, spoof checking on, link-state auto, trust off
#ip -d link show enp5s2
175: enp5s2: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc mq state DOWN mode DEFAULT group default qlen 1000
link/ether ea:4d:60:bc:6f:85 brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64 numtxqueues 16 numrxqueues 16
Signed-off-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-01 22:27:27 +02:00
adapter - > link_up = false ;
2018-09-14 17:37:46 -07:00
iavf_napi_disable_all ( adapter ) ;
2015-02-27 09:18:31 +00:00
}
2018-09-14 17:37:46 -07:00
iavf_irq_disable ( adapter ) ;
2015-01-29 07:17:19 +00:00
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_RESETTING ) ;
2018-09-14 17:37:46 -07:00
adapter - > flags & = ~ IAVF_FLAG_RESET_PENDING ;
2015-06-19 08:56:30 -07:00
/* free the Tx/Rx rings and descriptors, might be better to just
* re - use them sometime in the future
*/
2018-09-14 17:37:46 -07:00
iavf_free_all_rx_resources ( adapter ) ;
iavf_free_all_tx_resources ( adapter ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
adapter - > flags | = IAVF_FLAG_QUEUES_DISABLED ;
2013-12-21 06:12:45 +00:00
/* kill and reinit the admin queue */
2018-09-14 17:37:46 -07:00
iavf_shutdown_adminq ( hw ) ;
2017-05-11 11:23:11 -07:00
adapter - > current_op = VIRTCHNL_OP_UNKNOWN ;
2018-09-14 17:37:46 -07:00
err = iavf_init_adminq ( hw ) ;
2013-12-21 06:12:45 +00:00
if ( err )
2015-01-29 07:17:19 +00:00
dev_info ( & adapter - > pdev - > dev , " Failed to init adminq: %d \n " ,
err ) ;
2017-08-22 06:57:50 -04:00
adapter - > aq_required = 0 ;
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_REINIT_ITR_NEEDED ) {
err = iavf_reinit_interrupt_scheme ( adapter ) ;
2017-08-22 06:57:50 -04:00
if ( err )
goto reset_err ;
}
2013-12-21 06:12:45 +00:00
2021-06-04 09:53:33 -07:00
if ( RSS_AQ ( adapter ) ) {
adapter - > aq_required | = IAVF_FLAG_AQ_CONFIGURE_RSS ;
} else {
err = iavf_init_rss ( adapter ) ;
if ( err )
goto reset_err ;
}
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_GET_CONFIG ;
2021-11-29 16:16:00 -08:00
/* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
* sent / received yet , so VLAN_V2_ALLOWED ( ) cannot is not reliable here ,
* however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won ' t be sent until
* VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
* been successfully sent and negotiated
*/
adapter - > aq_required | = IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_MAP_VECTORS ;
2015-01-29 07:17:19 +00:00
2017-10-27 11:06:50 -04:00
spin_lock_bh ( & adapter - > mac_vlan_list_lock ) ;
2019-12-17 11:29:23 +01:00
/* Delete filter for the current MAC address, it could have
* been changed by the PF via administratively set MAC .
* Will be re - added via VIRTCHNL_OP_GET_VF_RESOURCES .
*/
list_for_each_entry_safe ( f , ftmp , & adapter - > mac_filter_list , list ) {
if ( ether_addr_equal ( f - > macaddr , adapter - > hw . mac . addr ) ) {
list_del ( & f - > list ) ;
kfree ( f ) ;
}
}
2015-01-29 07:17:19 +00:00
/* re-add all MAC filters */
list_for_each_entry ( f , & adapter - > mac_filter_list , list ) {
f - > add = true ;
}
2017-10-27 11:06:50 -04:00
spin_unlock_bh ( & adapter - > mac_vlan_list_lock ) ;
2018-01-23 08:51:05 -08:00
/* check if TCs are running and re-add all cloud filters */
spin_lock_bh ( & adapter - > cloud_filter_list_lock ) ;
if ( ( vfres - > vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ) & &
adapter - > num_tc ) {
list_for_each_entry ( cf , & adapter - > cloud_filter_list , list ) {
cf - > add = true ;
}
}
spin_unlock_bh ( & adapter - > cloud_filter_list_lock ) ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_ADD_MAC_FILTER ;
adapter - > aq_required | = IAVF_FLAG_AQ_ADD_CLOUD_FILTER ;
iavf_misc_irq_enable ( adapter ) ;
2013-12-21 06:12:45 +00:00
2019-05-14 10:37:05 -07:00
mod_delayed_work ( iavf_wq , & adapter - > watchdog_task , 2 ) ;
2013-12-21 06:12:45 +00:00
2017-10-27 11:06:49 -04:00
/* We were running when the reset started, so we need to restore some
* state here .
*/
if ( running ) {
2013-12-21 06:12:45 +00:00
/* allocate transmit descriptors */
2018-09-14 17:37:46 -07:00
err = iavf_setup_all_tx_resources ( adapter ) ;
2013-12-21 06:12:45 +00:00
if ( err )
goto reset_err ;
/* allocate receive descriptors */
2018-09-14 17:37:46 -07:00
err = iavf_setup_all_rx_resources ( adapter ) ;
2013-12-21 06:12:45 +00:00
if ( err )
goto reset_err ;
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_REINIT_ITR_NEEDED ) {
err = iavf_request_traffic_irqs ( adapter , netdev - > name ) ;
2017-08-22 06:57:50 -04:00
if ( err )
goto reset_err ;
2018-09-14 17:37:46 -07:00
adapter - > flags & = ~ IAVF_FLAG_REINIT_ITR_NEEDED ;
2017-08-22 06:57:50 -04:00
}
2018-09-14 17:37:46 -07:00
iavf_configure ( adapter ) ;
2013-12-21 06:12:45 +00:00
2021-08-19 08:47:40 +00:00
/* iavf_up_complete() will switch device back
* to __IAVF_RUNNING
*/
2018-09-14 17:37:46 -07:00
iavf_up_complete ( adapter ) ;
2021-09-07 09:25:40 +00:00
netdev - > flags | = IFF_UP ;
2018-09-14 17:37:46 -07:00
iavf_irq_enable ( adapter , true ) ;
2015-06-19 08:56:30 -07:00
} else {
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_DOWN ) ;
2017-06-23 04:24:44 -04:00
wake_up ( & adapter - > down_waitqueue ) ;
2013-12-21 06:12:45 +00:00
}
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > client_lock ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
2015-06-19 08:56:30 -07:00
2013-12-21 06:12:45 +00:00
return ;
reset_err :
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > client_lock ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
2021-09-07 09:25:40 +00:00
if ( running ) {
2021-08-19 08:47:58 +00:00
iavf_change_state ( adapter , __IAVF_RUNNING ) ;
2021-09-07 09:25:40 +00:00
netdev - > flags | = IFF_UP ;
}
2014-05-10 04:49:06 +00:00
dev_err ( & adapter - > pdev - > dev , " failed to allocate resources during reinit \n " ) ;
2018-09-14 17:37:46 -07:00
iavf_close ( netdev ) ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_adminq_task - worker thread to clean the admin queue
2013-12-21 06:12:45 +00:00
* @ work : pointer to work_struct containing our data
* */
2018-09-14 17:37:46 -07:00
static void iavf_adminq_task ( struct work_struct * work )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter =
container_of ( work , struct iavf_adapter , adminq_task ) ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2019-04-17 15:17:32 -07:00
struct iavf_arq_event_info event ;
2017-06-22 09:44:32 -07:00
enum virtchnl_ops v_op ;
2019-04-17 15:17:30 -07:00
enum iavf_status ret , v_ret ;
2014-05-22 06:32:07 +00:00
u32 val , oldval ;
2013-12-21 06:12:45 +00:00
u16 pending ;
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_PF_COMMS_FAILED )
2014-12-09 08:53:07 +00:00
goto out ;
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
2018-09-14 17:37:46 -07:00
event . buf_len = IAVF_MAX_AQ_BUF_SIZE ;
2014-11-11 20:02:19 +00:00
event . msg_buf = kzalloc ( event . buf_len , GFP_KERNEL ) ;
2014-05-10 04:49:04 +00:00
if ( ! event . msg_buf )
2014-12-09 08:53:07 +00:00
goto out ;
2014-05-10 04:49:04 +00:00
2021-08-04 10:22:24 +02:00
if ( iavf_lock_timeout ( & adapter - > crit_lock , 200 ) )
2021-03-16 11:01:41 +01:00
goto freedom ;
2013-12-21 06:12:45 +00:00
do {
2018-09-14 17:37:46 -07:00
ret = iavf_clean_arq_element ( hw , & event , & pending ) ;
2017-06-22 09:44:32 -07:00
v_op = ( enum virtchnl_ops ) le32_to_cpu ( event . desc . cookie_high ) ;
2019-04-17 15:17:30 -07:00
v_ret = ( enum iavf_status ) le32_to_cpu ( event . desc . cookie_low ) ;
2017-06-22 09:44:32 -07:00
if ( ret | | ! v_op )
2013-12-21 06:12:45 +00:00
break ; /* No event to process or error cleaning ARQ */
2018-09-14 17:37:46 -07:00
iavf_virtchnl_completion ( adapter , v_op , v_ret , event . msg_buf ,
event . msg_len ) ;
2014-11-11 20:02:42 +00:00
if ( pending ! = 0 )
2018-09-14 17:37:46 -07:00
memset ( event . msg_buf , 0 , IAVF_MAX_AQ_BUF_SIZE ) ;
2013-12-21 06:12:45 +00:00
} while ( pending ) ;
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
2013-12-21 06:12:45 +00:00
2015-06-19 08:56:30 -07:00
if ( ( adapter - > flags &
2018-09-14 17:37:46 -07:00
( IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED ) ) | |
adapter - > state = = __IAVF_RESETTING )
2015-06-19 08:56:30 -07:00
goto freedom ;
2014-05-22 06:32:07 +00:00
/* check for error indications */
val = rd32 ( hw , hw - > aq . arq . len ) ;
2021-06-04 09:48:59 -07:00
if ( val = = 0xdeadbeef | | val = = 0xffffffff ) /* device in reset */
2016-03-10 14:59:49 -08:00
goto freedom ;
2014-05-22 06:32:07 +00:00
oldval = val ;
2018-09-14 17:37:49 -07:00
if ( val & IAVF_VF_ARQLEN1_ARQVFE_MASK ) {
2014-05-22 06:32:07 +00:00
dev_info ( & adapter - > pdev - > dev , " ARQ VF Error detected \n " ) ;
2018-09-14 17:37:49 -07:00
val & = ~ IAVF_VF_ARQLEN1_ARQVFE_MASK ;
2014-05-22 06:32:07 +00:00
}
2018-09-14 17:37:49 -07:00
if ( val & IAVF_VF_ARQLEN1_ARQOVFL_MASK ) {
2014-05-22 06:32:07 +00:00
dev_info ( & adapter - > pdev - > dev , " ARQ Overflow Error detected \n " ) ;
2018-09-14 17:37:49 -07:00
val & = ~ IAVF_VF_ARQLEN1_ARQOVFL_MASK ;
2014-05-22 06:32:07 +00:00
}
2018-09-14 17:37:49 -07:00
if ( val & IAVF_VF_ARQLEN1_ARQCRIT_MASK ) {
2014-05-22 06:32:07 +00:00
dev_info ( & adapter - > pdev - > dev , " ARQ Critical Error detected \n " ) ;
2018-09-14 17:37:49 -07:00
val & = ~ IAVF_VF_ARQLEN1_ARQCRIT_MASK ;
2014-05-22 06:32:07 +00:00
}
if ( oldval ! = val )
wr32 ( hw , hw - > aq . arq . len , val ) ;
val = rd32 ( hw , hw - > aq . asq . len ) ;
oldval = val ;
2018-09-14 17:37:49 -07:00
if ( val & IAVF_VF_ATQLEN1_ATQVFE_MASK ) {
2014-05-22 06:32:07 +00:00
dev_info ( & adapter - > pdev - > dev , " ASQ VF Error detected \n " ) ;
2018-09-14 17:37:49 -07:00
val & = ~ IAVF_VF_ATQLEN1_ATQVFE_MASK ;
2014-05-22 06:32:07 +00:00
}
2018-09-14 17:37:49 -07:00
if ( val & IAVF_VF_ATQLEN1_ATQOVFL_MASK ) {
2014-05-22 06:32:07 +00:00
dev_info ( & adapter - > pdev - > dev , " ASQ Overflow Error detected \n " ) ;
2018-09-14 17:37:49 -07:00
val & = ~ IAVF_VF_ATQLEN1_ATQOVFL_MASK ;
2014-05-22 06:32:07 +00:00
}
2018-09-14 17:37:49 -07:00
if ( val & IAVF_VF_ATQLEN1_ATQCRIT_MASK ) {
2014-05-22 06:32:07 +00:00
dev_info ( & adapter - > pdev - > dev , " ASQ Critical Error detected \n " ) ;
2018-09-14 17:37:49 -07:00
val & = ~ IAVF_VF_ATQLEN1_ATQCRIT_MASK ;
2014-05-22 06:32:07 +00:00
}
if ( oldval ! = val )
wr32 ( hw , hw - > aq . asq . len , val ) ;
2015-06-19 08:56:30 -07:00
freedom :
2014-12-09 08:53:07 +00:00
kfree ( event . msg_buf ) ;
out :
2013-12-21 06:12:45 +00:00
/* re-enable Admin queue interrupt cause */
2018-09-14 17:37:46 -07:00
iavf_misc_irq_enable ( adapter ) ;
2013-12-21 06:12:45 +00:00
}
2017-01-24 10:23:59 -08:00
/**
2018-09-14 17:37:46 -07:00
* iavf_client_task - worker thread to perform client work
2017-01-24 10:23:59 -08:00
* @ work : pointer to work_struct containing our data
*
* This task handles client interactions . Because client calls can be
* reentrant , we can ' t handle them in the watchdog .
* */
2018-09-14 17:37:46 -07:00
static void iavf_client_task ( struct work_struct * work )
2017-01-24 10:23:59 -08:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter =
container_of ( work , struct iavf_adapter , client_task . work ) ;
2017-01-24 10:23:59 -08:00
/* If we can't get the client bit, just give up. We'll be rescheduled
* later .
*/
2021-08-04 10:22:24 +02:00
if ( ! mutex_trylock ( & adapter - > client_lock ) )
2017-01-24 10:23:59 -08:00
return ;
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED ) {
iavf_client_subtask ( adapter ) ;
adapter - > flags & = ~ IAVF_FLAG_SERVICE_CLIENT_REQUESTED ;
2017-01-24 10:23:59 -08:00
goto out ;
}
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS ) {
iavf_notify_client_l2_params ( & adapter - > vsi ) ;
adapter - > flags & = ~ IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS ;
2017-11-14 07:00:51 -05:00
goto out ;
}
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE ) {
iavf_notify_client_close ( & adapter - > vsi , false ) ;
adapter - > flags & = ~ IAVF_FLAG_CLIENT_NEEDS_CLOSE ;
2017-01-24 10:23:59 -08:00
goto out ;
}
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_CLIENT_NEEDS_OPEN ) {
iavf_notify_client_open ( & adapter - > vsi ) ;
adapter - > flags & = ~ IAVF_FLAG_CLIENT_NEEDS_OPEN ;
2017-01-24 10:23:59 -08:00
}
out :
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > client_lock ) ;
2017-01-24 10:23:59 -08:00
}
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_free_all_tx_resources - Free Tx Resources for All Queues
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* Free all transmit software resources
* */
2018-09-14 17:37:46 -07:00
void iavf_free_all_tx_resources ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int i ;
2015-11-19 11:34:18 -08:00
if ( ! adapter - > tx_rings )
return ;
2014-10-25 03:24:34 +00:00
for ( i = 0 ; i < adapter - > num_active_queues ; i + + )
2015-10-26 19:44:40 -04:00
if ( adapter - > tx_rings [ i ] . desc )
2018-09-14 17:37:46 -07:00
iavf_free_tx_resources ( & adapter - > tx_rings [ i ] ) ;
2013-12-21 06:12:45 +00:00
}
/**
2018-09-14 17:37:46 -07:00
* iavf_setup_all_tx_resources - allocate all queues Tx resources
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* If this function returns with an error , then it ' s possible one or
* more of the rings is populated ( while the rest are not ) . It is the
* callers duty to clean those orphaned rings .
*
* Return 0 on success , negative on failure
* */
2018-09-14 17:37:46 -07:00
static int iavf_setup_all_tx_resources ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int i , err = 0 ;
2014-10-25 03:24:34 +00:00
for ( i = 0 ; i < adapter - > num_active_queues ; i + + ) {
2015-10-26 19:44:40 -04:00
adapter - > tx_rings [ i ] . count = adapter - > tx_desc_count ;
2018-09-14 17:37:46 -07:00
err = iavf_setup_tx_descriptors ( & adapter - > tx_rings [ i ] ) ;
2013-12-21 06:12:45 +00:00
if ( ! err )
continue ;
dev_err ( & adapter - > pdev - > dev ,
2015-08-26 15:14:17 -04:00
" Allocation for Tx Queue %u failed \n " , i ) ;
2013-12-21 06:12:45 +00:00
break ;
}
return err ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_setup_all_rx_resources - allocate all queues Rx resources
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* If this function returns with an error , then it ' s possible one or
* more of the rings is populated ( while the rest are not ) . It is the
* callers duty to clean those orphaned rings .
*
* Return 0 on success , negative on failure
* */
2018-09-14 17:37:46 -07:00
static int iavf_setup_all_rx_resources ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int i , err = 0 ;
2014-10-25 03:24:34 +00:00
for ( i = 0 ; i < adapter - > num_active_queues ; i + + ) {
2015-10-26 19:44:40 -04:00
adapter - > rx_rings [ i ] . count = adapter - > rx_desc_count ;
2018-09-14 17:37:46 -07:00
err = iavf_setup_rx_descriptors ( & adapter - > rx_rings [ i ] ) ;
2013-12-21 06:12:45 +00:00
if ( ! err )
continue ;
dev_err ( & adapter - > pdev - > dev ,
2015-08-26 15:14:17 -04:00
" Allocation for Rx Queue %u failed \n " , i ) ;
2013-12-21 06:12:45 +00:00
break ;
}
return err ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_free_all_rx_resources - Free Rx Resources for All Queues
2013-12-21 06:12:45 +00:00
* @ adapter : board private structure
*
* Free all receive software resources
* */
2018-09-14 17:37:46 -07:00
void iavf_free_all_rx_resources ( struct iavf_adapter * adapter )
2013-12-21 06:12:45 +00:00
{
int i ;
2015-11-19 11:34:18 -08:00
if ( ! adapter - > rx_rings )
return ;
2014-10-25 03:24:34 +00:00
for ( i = 0 ; i < adapter - > num_active_queues ; i + + )
2015-10-26 19:44:40 -04:00
if ( adapter - > rx_rings [ i ] . desc )
2018-09-14 17:37:46 -07:00
iavf_free_rx_resources ( & adapter - > rx_rings [ i ] ) ;
2013-12-21 06:12:45 +00:00
}
2018-01-23 08:51:01 -08:00
/**
2018-09-14 17:37:46 -07:00
* iavf_validate_tx_bandwidth - validate the max Tx bandwidth
2018-01-23 08:51:01 -08:00
* @ adapter : board private structure
* @ max_tx_rate : max Tx bw for a tc
* */
2018-09-14 17:37:46 -07:00
static int iavf_validate_tx_bandwidth ( struct iavf_adapter * adapter ,
u64 max_tx_rate )
2018-01-23 08:51:01 -08:00
{
int speed = 0 , ret = 0 ;
2020-06-05 10:09:43 -07:00
if ( ADV_LINK_SUPPORT ( adapter ) ) {
if ( adapter - > link_speed_mbps < U32_MAX ) {
speed = adapter - > link_speed_mbps ;
goto validate_bw ;
} else {
dev_err ( & adapter - > pdev - > dev , " Unknown link speed \n " ) ;
return - EINVAL ;
}
}
2018-01-23 08:51:01 -08:00
switch ( adapter - > link_speed ) {
2020-06-05 10:09:44 -07:00
case VIRTCHNL_LINK_SPEED_40GB :
2020-06-05 10:09:45 -07:00
speed = SPEED_40000 ;
2018-01-23 08:51:01 -08:00
break ;
2020-06-05 10:09:44 -07:00
case VIRTCHNL_LINK_SPEED_25GB :
2020-06-05 10:09:45 -07:00
speed = SPEED_25000 ;
2018-01-23 08:51:01 -08:00
break ;
2020-06-05 10:09:44 -07:00
case VIRTCHNL_LINK_SPEED_20GB :
2020-06-05 10:09:45 -07:00
speed = SPEED_20000 ;
2018-01-23 08:51:01 -08:00
break ;
2020-06-05 10:09:44 -07:00
case VIRTCHNL_LINK_SPEED_10GB :
2020-06-05 10:09:45 -07:00
speed = SPEED_10000 ;
break ;
case VIRTCHNL_LINK_SPEED_5GB :
speed = SPEED_5000 ;
break ;
case VIRTCHNL_LINK_SPEED_2_5GB :
speed = SPEED_2500 ;
2018-01-23 08:51:01 -08:00
break ;
2020-06-05 10:09:44 -07:00
case VIRTCHNL_LINK_SPEED_1GB :
2020-06-05 10:09:45 -07:00
speed = SPEED_1000 ;
2018-01-23 08:51:01 -08:00
break ;
2020-06-05 10:09:44 -07:00
case VIRTCHNL_LINK_SPEED_100MB :
2020-06-05 10:09:45 -07:00
speed = SPEED_100 ;
2018-01-23 08:51:01 -08:00
break ;
default :
break ;
}
2020-06-05 10:09:43 -07:00
validate_bw :
2018-01-23 08:51:01 -08:00
if ( max_tx_rate > speed ) {
dev_err ( & adapter - > pdev - > dev ,
" Invalid tx rate specified \n " ) ;
ret = - EINVAL ;
}
return ret ;
}
2018-01-23 08:50:57 -08:00
/**
2021-03-18 16:18:52 -07:00
* iavf_validate_ch_config - validate queue mapping info
2018-01-23 08:50:57 -08:00
* @ adapter : board private structure
* @ mqprio_qopt : queue parameters
*
* This function validates if the config provided by the user to
* configure queue channels is valid or not . Returns 0 on a valid
* config .
* */
2018-09-14 17:37:46 -07:00
static int iavf_validate_ch_config ( struct iavf_adapter * adapter ,
struct tc_mqprio_qopt_offload * mqprio_qopt )
2018-01-23 08:50:57 -08:00
{
2018-01-23 08:51:01 -08:00
u64 total_max_rate = 0 ;
2018-01-23 08:50:57 -08:00
int i , num_qps = 0 ;
2018-01-23 08:51:01 -08:00
u64 tx_rate = 0 ;
int ret = 0 ;
2018-01-23 08:50:57 -08:00
2018-09-14 17:37:46 -07:00
if ( mqprio_qopt - > qopt . num_tc > IAVF_MAX_TRAFFIC_CLASS | |
2018-01-23 08:50:57 -08:00
mqprio_qopt - > qopt . num_tc < 1 )
return - EINVAL ;
for ( i = 0 ; i < = mqprio_qopt - > qopt . num_tc - 1 ; i + + ) {
if ( ! mqprio_qopt - > qopt . count [ i ] | |
mqprio_qopt - > qopt . offset [ i ] ! = num_qps )
return - EINVAL ;
2018-01-23 08:51:01 -08:00
if ( mqprio_qopt - > min_rate [ i ] ) {
dev_err ( & adapter - > pdev - > dev ,
" Invalid min tx rate (greater than 0) specified \n " ) ;
return - EINVAL ;
}
/*convert to Mbps */
tx_rate = div_u64 ( mqprio_qopt - > max_rate [ i ] ,
2018-09-14 17:37:46 -07:00
IAVF_MBPS_DIVISOR ) ;
2018-01-23 08:51:01 -08:00
total_max_rate + = tx_rate ;
2018-01-23 08:50:57 -08:00
num_qps + = mqprio_qopt - > qopt . count [ i ] ;
}
2018-09-14 17:37:46 -07:00
if ( num_qps > IAVF_MAX_REQ_QUEUES )
2018-01-23 08:50:57 -08:00
return - EINVAL ;
2018-09-14 17:37:46 -07:00
ret = iavf_validate_tx_bandwidth ( adapter , total_max_rate ) ;
2018-01-23 08:51:01 -08:00
return ret ;
2018-01-23 08:50:57 -08:00
}
2018-01-23 08:51:05 -08:00
/**
2020-09-25 15:24:37 -07:00
* iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
* @ adapter : board private structure
2018-01-23 08:51:05 -08:00
* */
2018-09-14 17:37:46 -07:00
static void iavf_del_all_cloud_filters ( struct iavf_adapter * adapter )
2018-01-23 08:51:05 -08:00
{
2018-09-14 17:37:46 -07:00
struct iavf_cloud_filter * cf , * cftmp ;
2018-01-23 08:51:05 -08:00
spin_lock_bh ( & adapter - > cloud_filter_list_lock ) ;
list_for_each_entry_safe ( cf , cftmp , & adapter - > cloud_filter_list ,
list ) {
list_del ( & cf - > list ) ;
kfree ( cf ) ;
adapter - > num_cloud_filters - - ;
}
spin_unlock_bh ( & adapter - > cloud_filter_list_lock ) ;
}
2018-01-23 08:50:57 -08:00
/**
2018-09-14 17:37:46 -07:00
* __iavf_setup_tc - configure multiple traffic classes
2018-01-23 08:50:57 -08:00
* @ netdev : network interface device structure
2020-09-25 15:24:37 -07:00
* @ type_data : tc offload data
2018-01-23 08:50:57 -08:00
*
* This function processes the config information provided by the
* user to configure traffic classes / queue channels and packages the
* information to request the PF to setup traffic classes .
*
* Returns 0 on success .
* */
2018-09-14 17:37:46 -07:00
static int __iavf_setup_tc ( struct net_device * netdev , void * type_data )
2018-01-23 08:50:57 -08:00
{
struct tc_mqprio_qopt_offload * mqprio_qopt = type_data ;
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2018-01-23 08:50:57 -08:00
struct virtchnl_vf_resource * vfres = adapter - > vf_res ;
u8 num_tc = 0 , total_qps = 0 ;
int ret = 0 , netdev_tc = 0 ;
2018-01-23 08:51:01 -08:00
u64 max_tx_rate ;
2018-01-23 08:50:57 -08:00
u16 mode ;
int i ;
num_tc = mqprio_qopt - > qopt . num_tc ;
mode = mqprio_qopt - > mode ;
/* delete queue_channel */
if ( ! mqprio_qopt - > qopt . hw ) {
2018-09-14 17:37:46 -07:00
if ( adapter - > ch_config . state = = __IAVF_TC_RUNNING ) {
2018-01-23 08:50:57 -08:00
/* reset the tc configuration */
netdev_reset_tc ( netdev ) ;
adapter - > num_tc = 0 ;
netif_tx_stop_all_queues ( netdev ) ;
netif_tx_disable ( netdev ) ;
2018-09-14 17:37:46 -07:00
iavf_del_all_cloud_filters ( adapter ) ;
adapter - > aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS ;
2018-01-23 08:50:57 -08:00
goto exit ;
} else {
return - EINVAL ;
}
}
/* add queue channel */
if ( mode = = TC_MQPRIO_MODE_CHANNEL ) {
if ( ! ( vfres - > vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ) ) {
dev_err ( & adapter - > pdev - > dev , " ADq not supported \n " ) ;
return - EOPNOTSUPP ;
}
2018-09-14 17:37:46 -07:00
if ( adapter - > ch_config . state ! = __IAVF_TC_INVALID ) {
2018-01-23 08:50:57 -08:00
dev_err ( & adapter - > pdev - > dev , " TC configuration already exists \n " ) ;
return - EINVAL ;
}
2018-09-14 17:37:46 -07:00
ret = iavf_validate_ch_config ( adapter , mqprio_qopt ) ;
2018-01-23 08:50:57 -08:00
if ( ret )
return ret ;
/* Return if same TC config is requested */
if ( adapter - > num_tc = = num_tc )
return 0 ;
adapter - > num_tc = num_tc ;
2018-09-14 17:37:46 -07:00
for ( i = 0 ; i < IAVF_MAX_TRAFFIC_CLASS ; i + + ) {
2018-01-23 08:50:57 -08:00
if ( i < num_tc ) {
adapter - > ch_config . ch_info [ i ] . count =
mqprio_qopt - > qopt . count [ i ] ;
adapter - > ch_config . ch_info [ i ] . offset =
mqprio_qopt - > qopt . offset [ i ] ;
total_qps + = mqprio_qopt - > qopt . count [ i ] ;
2018-01-23 08:51:01 -08:00
max_tx_rate = mqprio_qopt - > max_rate [ i ] ;
/* convert to Mbps */
max_tx_rate = div_u64 ( max_tx_rate ,
2018-09-14 17:37:46 -07:00
IAVF_MBPS_DIVISOR ) ;
2018-01-23 08:51:01 -08:00
adapter - > ch_config . ch_info [ i ] . max_tx_rate =
max_tx_rate ;
2018-01-23 08:50:57 -08:00
} else {
adapter - > ch_config . ch_info [ i ] . count = 1 ;
adapter - > ch_config . ch_info [ i ] . offset = 0 ;
}
}
adapter - > ch_config . total_qps = total_qps ;
netif_tx_stop_all_queues ( netdev ) ;
netif_tx_disable ( netdev ) ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_ENABLE_CHANNELS ;
2018-01-23 08:50:57 -08:00
netdev_reset_tc ( netdev ) ;
/* Report the tc mapping up the stack */
netdev_set_num_tc ( adapter - > netdev , num_tc ) ;
2018-09-14 17:37:46 -07:00
for ( i = 0 ; i < IAVF_MAX_TRAFFIC_CLASS ; i + + ) {
2018-01-23 08:50:57 -08:00
u16 qcount = mqprio_qopt - > qopt . count [ i ] ;
u16 qoffset = mqprio_qopt - > qopt . offset [ i ] ;
if ( i < num_tc )
netdev_set_tc_queue ( netdev , netdev_tc + + , qcount ,
qoffset ) ;
}
}
exit :
return ret ;
}
2018-01-23 08:51:05 -08:00
/**
2018-09-14 17:37:46 -07:00
* iavf_parse_cls_flower - Parse tc flower filters provided by kernel
2018-01-23 08:51:05 -08:00
* @ adapter : board private structure
2020-09-25 15:24:37 -07:00
* @ f : pointer to struct flow_cls_offload
2018-01-23 08:51:05 -08:00
* @ filter : pointer to cloud filter structure
*/
2018-09-14 17:37:46 -07:00
static int iavf_parse_cls_flower ( struct iavf_adapter * adapter ,
2019-07-09 22:55:49 +02:00
struct flow_cls_offload * f ,
2018-09-14 17:37:46 -07:00
struct iavf_cloud_filter * filter )
2018-01-23 08:51:05 -08:00
{
2019-07-09 22:55:49 +02:00
struct flow_rule * rule = flow_cls_offload_flow_rule ( f ) ;
2019-02-02 12:50:43 +01:00
struct flow_dissector * dissector = rule - > match . dissector ;
2018-01-23 08:51:05 -08:00
u16 n_proto_mask = 0 ;
u16 n_proto_key = 0 ;
u8 field_flags = 0 ;
u16 addr_type = 0 ;
u16 n_proto = 0 ;
int i = 0 ;
2018-02-19 10:23:30 +00:00
struct virtchnl_filter * vf = & filter - > f ;
2018-01-23 08:51:05 -08:00
2019-02-02 12:50:43 +01:00
if ( dissector - > used_keys &
2018-01-23 08:51:05 -08:00
~ ( BIT ( FLOW_DISSECTOR_KEY_CONTROL ) |
BIT ( FLOW_DISSECTOR_KEY_BASIC ) |
BIT ( FLOW_DISSECTOR_KEY_ETH_ADDRS ) |
BIT ( FLOW_DISSECTOR_KEY_VLAN ) |
BIT ( FLOW_DISSECTOR_KEY_IPV4_ADDRS ) |
BIT ( FLOW_DISSECTOR_KEY_IPV6_ADDRS ) |
BIT ( FLOW_DISSECTOR_KEY_PORTS ) |
BIT ( FLOW_DISSECTOR_KEY_ENC_KEYID ) ) ) {
dev_err ( & adapter - > pdev - > dev , " Unsupported key used: 0x%x \n " ,
2019-02-02 12:50:43 +01:00
dissector - > used_keys ) ;
2018-01-23 08:51:05 -08:00
return - EOPNOTSUPP ;
}
2019-02-02 12:50:43 +01:00
if ( flow_rule_match_key ( rule , FLOW_DISSECTOR_KEY_ENC_KEYID ) ) {
struct flow_match_enc_keyid match ;
2018-01-23 08:51:05 -08:00
2019-02-02 12:50:43 +01:00
flow_rule_match_enc_keyid ( rule , & match ) ;
if ( match . mask - > keyid ! = 0 )
2018-09-14 17:37:46 -07:00
field_flags | = IAVF_CLOUD_FIELD_TEN_ID ;
2018-01-23 08:51:05 -08:00
}
2019-02-02 12:50:43 +01:00
if ( flow_rule_match_key ( rule , FLOW_DISSECTOR_KEY_BASIC ) ) {
struct flow_match_basic match ;
2018-01-23 08:51:05 -08:00
2019-02-02 12:50:43 +01:00
flow_rule_match_basic ( rule , & match ) ;
n_proto_key = ntohs ( match . key - > n_proto ) ;
n_proto_mask = ntohs ( match . mask - > n_proto ) ;
2018-01-23 08:51:05 -08:00
if ( n_proto_key = = ETH_P_ALL ) {
n_proto_key = 0 ;
n_proto_mask = 0 ;
}
n_proto = n_proto_key & n_proto_mask ;
if ( n_proto ! = ETH_P_IP & & n_proto ! = ETH_P_IPV6 )
return - EINVAL ;
if ( n_proto = = ETH_P_IPV6 ) {
/* specify flow type as TCP IPv6 */
2018-02-19 10:23:30 +00:00
vf - > flow_type = VIRTCHNL_TCP_V6_FLOW ;
2018-01-23 08:51:05 -08:00
}
2019-02-02 12:50:43 +01:00
if ( match . key - > ip_proto ! = IPPROTO_TCP ) {
2018-01-23 08:51:05 -08:00
dev_info ( & adapter - > pdev - > dev , " Only TCP transport is supported \n " ) ;
return - EINVAL ;
}
}
2019-02-02 12:50:43 +01:00
if ( flow_rule_match_key ( rule , FLOW_DISSECTOR_KEY_ETH_ADDRS ) ) {
struct flow_match_eth_addrs match ;
flow_rule_match_eth_addrs ( rule , & match ) ;
2018-01-23 08:51:05 -08:00
/* use is_broadcast and is_zero to check for all 0xf or 0 */
2019-02-02 12:50:43 +01:00
if ( ! is_zero_ether_addr ( match . mask - > dst ) ) {
if ( is_broadcast_ether_addr ( match . mask - > dst ) ) {
2018-09-14 17:37:46 -07:00
field_flags | = IAVF_CLOUD_FIELD_OMAC ;
2018-01-23 08:51:05 -08:00
} else {
dev_err ( & adapter - > pdev - > dev , " Bad ether dest mask %pM \n " ,
2019-02-02 12:50:43 +01:00
match . mask - > dst ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
}
2019-02-02 12:50:43 +01:00
if ( ! is_zero_ether_addr ( match . mask - > src ) ) {
if ( is_broadcast_ether_addr ( match . mask - > src ) ) {
2018-09-14 17:37:46 -07:00
field_flags | = IAVF_CLOUD_FIELD_IMAC ;
2018-01-23 08:51:05 -08:00
} else {
dev_err ( & adapter - > pdev - > dev , " Bad ether src mask %pM \n " ,
2019-02-02 12:50:43 +01:00
match . mask - > src ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
}
2019-02-02 12:50:43 +01:00
if ( ! is_zero_ether_addr ( match . key - > dst ) )
if ( is_valid_ether_addr ( match . key - > dst ) | |
is_multicast_ether_addr ( match . key - > dst ) ) {
2018-01-23 08:51:05 -08:00
/* set the mask if a valid dst_mac address */
for ( i = 0 ; i < ETH_ALEN ; i + + )
2018-02-19 10:23:30 +00:00
vf - > mask . tcp_spec . dst_mac [ i ] | = 0xff ;
ether_addr_copy ( vf - > data . tcp_spec . dst_mac ,
2019-02-02 12:50:43 +01:00
match . key - > dst ) ;
2018-01-23 08:51:05 -08:00
}
2019-02-02 12:50:43 +01:00
if ( ! is_zero_ether_addr ( match . key - > src ) )
if ( is_valid_ether_addr ( match . key - > src ) | |
is_multicast_ether_addr ( match . key - > src ) ) {
2018-01-23 08:51:05 -08:00
/* set the mask if a valid dst_mac address */
for ( i = 0 ; i < ETH_ALEN ; i + + )
2018-02-19 10:23:30 +00:00
vf - > mask . tcp_spec . src_mac [ i ] | = 0xff ;
ether_addr_copy ( vf - > data . tcp_spec . src_mac ,
2019-02-02 12:50:43 +01:00
match . key - > src ) ;
2018-01-23 08:51:05 -08:00
}
}
2019-02-02 12:50:43 +01:00
if ( flow_rule_match_key ( rule , FLOW_DISSECTOR_KEY_VLAN ) ) {
struct flow_match_vlan match ;
2018-01-23 08:51:05 -08:00
2019-02-02 12:50:43 +01:00
flow_rule_match_vlan ( rule , & match ) ;
if ( match . mask - > vlan_id ) {
if ( match . mask - > vlan_id = = VLAN_VID_MASK ) {
2018-09-14 17:37:46 -07:00
field_flags | = IAVF_CLOUD_FIELD_IVLAN ;
2018-01-23 08:51:05 -08:00
} else {
dev_err ( & adapter - > pdev - > dev , " Bad vlan mask %u \n " ,
2019-02-02 12:50:43 +01:00
match . mask - > vlan_id ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
}
2018-02-19 10:23:30 +00:00
vf - > mask . tcp_spec . vlan_id | = cpu_to_be16 ( 0xffff ) ;
2019-02-02 12:50:43 +01:00
vf - > data . tcp_spec . vlan_id = cpu_to_be16 ( match . key - > vlan_id ) ;
2018-01-23 08:51:05 -08:00
}
2019-02-02 12:50:43 +01:00
if ( flow_rule_match_key ( rule , FLOW_DISSECTOR_KEY_CONTROL ) ) {
struct flow_match_control match ;
2018-01-23 08:51:05 -08:00
2019-02-02 12:50:43 +01:00
flow_rule_match_control ( rule , & match ) ;
addr_type = match . key - > addr_type ;
2018-01-23 08:51:05 -08:00
}
if ( addr_type = = FLOW_DISSECTOR_KEY_IPV4_ADDRS ) {
2019-02-02 12:50:43 +01:00
struct flow_match_ipv4_addrs match ;
flow_rule_match_ipv4_addrs ( rule , & match ) ;
if ( match . mask - > dst ) {
if ( match . mask - > dst = = cpu_to_be32 ( 0xffffffff ) ) {
2018-09-14 17:37:46 -07:00
field_flags | = IAVF_CLOUD_FIELD_IIP ;
2018-01-23 08:51:05 -08:00
} else {
dev_err ( & adapter - > pdev - > dev , " Bad ip dst mask 0x%08x \n " ,
2019-02-02 12:50:43 +01:00
be32_to_cpu ( match . mask - > dst ) ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
}
2019-02-02 12:50:43 +01:00
if ( match . mask - > src ) {
if ( match . mask - > src = = cpu_to_be32 ( 0xffffffff ) ) {
2018-09-14 17:37:46 -07:00
field_flags | = IAVF_CLOUD_FIELD_IIP ;
2018-01-23 08:51:05 -08:00
} else {
dev_err ( & adapter - > pdev - > dev , " Bad ip src mask 0x%08x \n " ,
2019-02-02 12:50:43 +01:00
be32_to_cpu ( match . mask - > dst ) ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
}
2018-09-14 17:37:46 -07:00
if ( field_flags & IAVF_CLOUD_FIELD_TEN_ID ) {
2018-01-23 08:51:05 -08:00
dev_info ( & adapter - > pdev - > dev , " Tenant id not allowed for ip filter \n " ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
2019-02-02 12:50:43 +01:00
if ( match . key - > dst ) {
2018-02-19 10:23:30 +00:00
vf - > mask . tcp_spec . dst_ip [ 0 ] | = cpu_to_be32 ( 0xffffffff ) ;
2019-02-02 12:50:43 +01:00
vf - > data . tcp_spec . dst_ip [ 0 ] = match . key - > dst ;
2018-01-23 08:51:05 -08:00
}
2019-02-02 12:50:43 +01:00
if ( match . key - > src ) {
2018-02-19 10:23:30 +00:00
vf - > mask . tcp_spec . src_ip [ 0 ] | = cpu_to_be32 ( 0xffffffff ) ;
2019-02-02 12:50:43 +01:00
vf - > data . tcp_spec . src_ip [ 0 ] = match . key - > src ;
2018-01-23 08:51:05 -08:00
}
}
if ( addr_type = = FLOW_DISSECTOR_KEY_IPV6_ADDRS ) {
2019-02-02 12:50:43 +01:00
struct flow_match_ipv6_addrs match ;
flow_rule_match_ipv6_addrs ( rule , & match ) ;
2018-01-23 08:51:05 -08:00
/* validate mask, make sure it is not IPV6_ADDR_ANY */
2019-02-02 12:50:43 +01:00
if ( ipv6_addr_any ( & match . mask - > dst ) ) {
2018-01-23 08:51:05 -08:00
dev_err ( & adapter - > pdev - > dev , " Bad ipv6 dst mask 0x%02x \n " ,
IPV6_ADDR_ANY ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
/* src and dest IPv6 address should not be LOOPBACK
* ( 0 : 0 : 0 : 0 : 0 : 0 : 0 : 1 ) which can be represented as : : 1
*/
2019-02-02 12:50:43 +01:00
if ( ipv6_addr_loopback ( & match . key - > dst ) | |
ipv6_addr_loopback ( & match . key - > src ) ) {
2018-01-23 08:51:05 -08:00
dev_err ( & adapter - > pdev - > dev ,
" ipv6 addr should not be loopback \n " ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
2019-02-02 12:50:43 +01:00
if ( ! ipv6_addr_any ( & match . mask - > dst ) | |
! ipv6_addr_any ( & match . mask - > src ) )
2018-09-14 17:37:46 -07:00
field_flags | = IAVF_CLOUD_FIELD_IIP ;
2018-01-23 08:51:05 -08:00
2018-02-19 10:23:30 +00:00
for ( i = 0 ; i < 4 ; i + + )
vf - > mask . tcp_spec . dst_ip [ i ] | = cpu_to_be32 ( 0xffffffff ) ;
2019-02-02 12:50:43 +01:00
memcpy ( & vf - > data . tcp_spec . dst_ip , & match . key - > dst . s6_addr32 ,
2018-02-19 10:23:30 +00:00
sizeof ( vf - > data . tcp_spec . dst_ip ) ) ;
for ( i = 0 ; i < 4 ; i + + )
vf - > mask . tcp_spec . src_ip [ i ] | = cpu_to_be32 ( 0xffffffff ) ;
2019-02-02 12:50:43 +01:00
memcpy ( & vf - > data . tcp_spec . src_ip , & match . key - > src . s6_addr32 ,
2018-02-19 10:23:30 +00:00
sizeof ( vf - > data . tcp_spec . src_ip ) ) ;
2018-01-23 08:51:05 -08:00
}
2019-02-02 12:50:43 +01:00
if ( flow_rule_match_key ( rule , FLOW_DISSECTOR_KEY_PORTS ) ) {
struct flow_match_ports match ;
flow_rule_match_ports ( rule , & match ) ;
if ( match . mask - > src ) {
if ( match . mask - > src = = cpu_to_be16 ( 0xffff ) ) {
2018-09-14 17:37:46 -07:00
field_flags | = IAVF_CLOUD_FIELD_IIP ;
2018-01-23 08:51:05 -08:00
} else {
dev_err ( & adapter - > pdev - > dev , " Bad src port mask %u \n " ,
2019-02-02 12:50:43 +01:00
be16_to_cpu ( match . mask - > src ) ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
}
2019-02-02 12:50:43 +01:00
if ( match . mask - > dst ) {
if ( match . mask - > dst = = cpu_to_be16 ( 0xffff ) ) {
2018-09-14 17:37:46 -07:00
field_flags | = IAVF_CLOUD_FIELD_IIP ;
2018-01-23 08:51:05 -08:00
} else {
dev_err ( & adapter - > pdev - > dev , " Bad dst port mask %u \n " ,
2019-02-02 12:50:43 +01:00
be16_to_cpu ( match . mask - > dst ) ) ;
2021-06-04 09:53:34 -07:00
return - EINVAL ;
2018-01-23 08:51:05 -08:00
}
}
2019-02-02 12:50:43 +01:00
if ( match . key - > dst ) {
2018-02-19 10:23:30 +00:00
vf - > mask . tcp_spec . dst_port | = cpu_to_be16 ( 0xffff ) ;
2019-02-02 12:50:43 +01:00
vf - > data . tcp_spec . dst_port = match . key - > dst ;
2018-01-23 08:51:05 -08:00
}
2019-02-02 12:50:43 +01:00
if ( match . key - > src ) {
2018-02-19 10:23:30 +00:00
vf - > mask . tcp_spec . src_port | = cpu_to_be16 ( 0xffff ) ;
2019-02-02 12:50:43 +01:00
vf - > data . tcp_spec . src_port = match . key - > src ;
2018-01-23 08:51:05 -08:00
}
}
2018-02-19 10:23:30 +00:00
vf - > field_flags = field_flags ;
2018-01-23 08:51:05 -08:00
return 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_handle_tclass - Forward to a traffic class on the device
2018-01-23 08:51:05 -08:00
* @ adapter : board private structure
* @ tc : traffic class index on the device
* @ filter : pointer to cloud filter structure
*/
2018-09-14 17:37:46 -07:00
static int iavf_handle_tclass ( struct iavf_adapter * adapter , u32 tc ,
struct iavf_cloud_filter * filter )
2018-01-23 08:51:05 -08:00
{
if ( tc = = 0 )
return 0 ;
if ( tc < adapter - > num_tc ) {
if ( ! filter - > f . data . tcp_spec . dst_port ) {
dev_err ( & adapter - > pdev - > dev ,
" Specify destination port to redirect to traffic class other than TC0 \n " ) ;
return - EINVAL ;
}
}
/* redirect to a traffic class on the same device */
filter - > f . action = VIRTCHNL_ACTION_TC_REDIRECT ;
filter - > f . action_meta = tc ;
return 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_configure_clsflower - Add tc flower filters
2018-01-23 08:51:05 -08:00
* @ adapter : board private structure
2019-07-09 22:55:49 +02:00
* @ cls_flower : Pointer to struct flow_cls_offload
2018-01-23 08:51:05 -08:00
*/
2018-09-14 17:37:46 -07:00
static int iavf_configure_clsflower ( struct iavf_adapter * adapter ,
2019-07-09 22:55:49 +02:00
struct flow_cls_offload * cls_flower )
2018-01-23 08:51:05 -08:00
{
int tc = tc_classid_to_hwtc ( adapter - > netdev , cls_flower - > classid ) ;
2018-09-14 17:37:46 -07:00
struct iavf_cloud_filter * filter = NULL ;
2018-03-19 09:28:03 -07:00
int err = - EINVAL , count = 50 ;
2018-01-23 08:51:05 -08:00
if ( tc < 0 ) {
dev_err ( & adapter - > pdev - > dev , " Invalid traffic class \n " ) ;
return - EINVAL ;
}
filter = kzalloc ( sizeof ( * filter ) , GFP_KERNEL ) ;
2018-03-19 09:28:03 -07:00
if ( ! filter )
return - ENOMEM ;
2021-08-04 10:22:24 +02:00
while ( ! mutex_trylock ( & adapter - > crit_lock ) ) {
2021-06-04 09:48:55 -07:00
if ( - - count = = 0 ) {
kfree ( filter ) ;
return err ;
}
2018-03-19 09:28:03 -07:00
udelay ( 1 ) ;
2018-01-23 08:51:05 -08:00
}
2018-03-19 09:28:03 -07:00
2018-01-23 08:51:05 -08:00
filter - > cookie = cls_flower - > cookie ;
/* set the mask to all zeroes to begin with */
memset ( & filter - > f . mask . tcp_spec , 0 , sizeof ( struct virtchnl_l4_spec ) ) ;
/* start out with flow type and eth type IPv4 to begin with */
filter - > f . flow_type = VIRTCHNL_TCP_V4_FLOW ;
2018-09-14 17:37:46 -07:00
err = iavf_parse_cls_flower ( adapter , cls_flower , filter ) ;
2021-06-04 09:48:57 -07:00
if ( err )
2018-01-23 08:51:05 -08:00
goto err ;
2018-09-14 17:37:46 -07:00
err = iavf_handle_tclass ( adapter , tc , filter ) ;
2021-06-04 09:48:57 -07:00
if ( err )
2018-01-23 08:51:05 -08:00
goto err ;
/* add filter to the list */
spin_lock_bh ( & adapter - > cloud_filter_list_lock ) ;
list_add_tail ( & filter - > list , & adapter - > cloud_filter_list ) ;
adapter - > num_cloud_filters + + ;
filter - > add = true ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_ADD_CLOUD_FILTER ;
2018-01-23 08:51:05 -08:00
spin_unlock_bh ( & adapter - > cloud_filter_list_lock ) ;
err :
if ( err )
kfree ( filter ) ;
2018-03-19 09:28:03 -07:00
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
2018-01-23 08:51:05 -08:00
return err ;
}
2018-09-14 17:37:46 -07:00
/* iavf_find_cf - Find the cloud filter in the list
2018-01-23 08:51:05 -08:00
* @ adapter : Board private structure
* @ cookie : filter specific cookie
*
* Returns ptr to the filter object or NULL . Must be called while holding the
* cloud_filter_list_lock .
*/
2018-09-14 17:37:46 -07:00
static struct iavf_cloud_filter * iavf_find_cf ( struct iavf_adapter * adapter ,
unsigned long * cookie )
2018-01-23 08:51:05 -08:00
{
2018-09-14 17:37:46 -07:00
struct iavf_cloud_filter * filter = NULL ;
2018-01-23 08:51:05 -08:00
if ( ! cookie )
return NULL ;
list_for_each_entry ( filter , & adapter - > cloud_filter_list , list ) {
if ( ! memcmp ( cookie , & filter - > cookie , sizeof ( filter - > cookie ) ) )
return filter ;
}
return NULL ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_delete_clsflower - Remove tc flower filters
2018-01-23 08:51:05 -08:00
* @ adapter : board private structure
2019-07-09 22:55:49 +02:00
* @ cls_flower : Pointer to struct flow_cls_offload
2018-01-23 08:51:05 -08:00
*/
2018-09-14 17:37:46 -07:00
static int iavf_delete_clsflower ( struct iavf_adapter * adapter ,
2019-07-09 22:55:49 +02:00
struct flow_cls_offload * cls_flower )
2018-01-23 08:51:05 -08:00
{
2018-09-14 17:37:46 -07:00
struct iavf_cloud_filter * filter = NULL ;
2018-01-23 08:51:05 -08:00
int err = 0 ;
spin_lock_bh ( & adapter - > cloud_filter_list_lock ) ;
2018-09-14 17:37:46 -07:00
filter = iavf_find_cf ( adapter , & cls_flower - > cookie ) ;
2018-01-23 08:51:05 -08:00
if ( filter ) {
filter - > del = true ;
2018-09-14 17:37:46 -07:00
adapter - > aq_required | = IAVF_FLAG_AQ_DEL_CLOUD_FILTER ;
2018-01-23 08:51:05 -08:00
} else {
err = - EINVAL ;
}
spin_unlock_bh ( & adapter - > cloud_filter_list_lock ) ;
return err ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_setup_tc_cls_flower - flower classifier offloads
2020-09-25 15:24:37 -07:00
* @ adapter : board private structure
* @ cls_flower : pointer to flow_cls_offload struct with flow info
2018-01-23 08:51:05 -08:00
*/
2018-09-14 17:37:46 -07:00
static int iavf_setup_tc_cls_flower ( struct iavf_adapter * adapter ,
2019-07-09 22:55:49 +02:00
struct flow_cls_offload * cls_flower )
2018-01-23 08:51:05 -08:00
{
switch ( cls_flower - > command ) {
2019-07-09 22:55:49 +02:00
case FLOW_CLS_REPLACE :
2018-09-14 17:37:46 -07:00
return iavf_configure_clsflower ( adapter , cls_flower ) ;
2019-07-09 22:55:49 +02:00
case FLOW_CLS_DESTROY :
2018-09-14 17:37:46 -07:00
return iavf_delete_clsflower ( adapter , cls_flower ) ;
2019-07-09 22:55:49 +02:00
case FLOW_CLS_STATS :
2018-01-23 08:51:05 -08:00
return - EOPNOTSUPP ;
default :
2018-06-24 10:38:39 +02:00
return - EOPNOTSUPP ;
2018-01-23 08:51:05 -08:00
}
}
/**
2018-09-14 17:37:46 -07:00
* iavf_setup_tc_block_cb - block callback for tc
2018-01-23 08:51:05 -08:00
* @ type : type of offload
* @ type_data : offload data
* @ cb_priv :
*
* This function is the block callback for traffic classes
* */
2018-09-14 17:37:46 -07:00
static int iavf_setup_tc_block_cb ( enum tc_setup_type type , void * type_data ,
void * cb_priv )
2018-01-23 08:51:05 -08:00
{
2020-02-26 09:21:57 +01:00
struct iavf_adapter * adapter = cb_priv ;
if ( ! tc_cls_can_offload_and_chain0 ( adapter - > netdev , type_data ) )
return - EOPNOTSUPP ;
2018-01-23 08:51:05 -08:00
switch ( type ) {
case TC_SETUP_CLSFLOWER :
2018-09-14 17:37:46 -07:00
return iavf_setup_tc_cls_flower ( cb_priv , type_data ) ;
2018-01-23 08:51:05 -08:00
default :
return - EOPNOTSUPP ;
}
}
2019-07-09 22:55:46 +02:00
static LIST_HEAD ( iavf_block_cb_list ) ;
2018-01-23 08:50:57 -08:00
/**
2018-09-14 17:37:46 -07:00
* iavf_setup_tc - configure multiple traffic classes
2018-01-23 08:50:57 -08:00
* @ netdev : network interface device structure
* @ type : type of offload
2020-09-25 15:24:37 -07:00
* @ type_data : tc offload data
2018-01-23 08:50:57 -08:00
*
* This function is the callback to ndo_setup_tc in the
* netdev_ops .
*
* Returns 0 on success
* */
2018-09-14 17:37:46 -07:00
static int iavf_setup_tc ( struct net_device * netdev , enum tc_setup_type type ,
void * type_data )
2018-01-23 08:50:57 -08:00
{
2019-07-09 22:55:39 +02:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2018-01-23 08:51:05 -08:00
switch ( type ) {
case TC_SETUP_QDISC_MQPRIO :
2018-09-14 17:37:46 -07:00
return __iavf_setup_tc ( netdev , type_data ) ;
2018-01-23 08:51:05 -08:00
case TC_SETUP_BLOCK :
2019-07-09 22:55:46 +02:00
return flow_block_cb_setup_simple ( type_data ,
& iavf_block_cb_list ,
2019-07-09 22:55:39 +02:00
iavf_setup_tc_block_cb ,
adapter , adapter , true ) ;
2018-01-23 08:51:05 -08:00
default :
2018-01-23 08:50:57 -08:00
return - EOPNOTSUPP ;
2018-01-23 08:51:05 -08:00
}
2018-01-23 08:50:57 -08:00
}
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_open - Called when a network interface is made active
2013-12-21 06:12:45 +00:00
* @ netdev : network interface device structure
*
* Returns 0 on success , negative value on failure
*
* The open entry point is called when a network interface is made
* active by the system ( IFF_UP ) . At this point all resources needed
* for transmit and receive operations are allocated , the interrupt
2019-05-14 10:37:05 -07:00
* handler is registered with the OS , the watchdog is started ,
2013-12-21 06:12:45 +00:00
* and the stack is notified that the interface is ready .
* */
2018-09-14 17:37:46 -07:00
static int iavf_open ( struct net_device * netdev )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2013-12-21 06:12:45 +00:00
int err ;
2018-09-14 17:37:46 -07:00
if ( adapter - > flags & IAVF_FLAG_PF_COMMS_FAILED ) {
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
dev_err ( & adapter - > pdev - > dev , " Unable to open device due to PF driver failure. \n " ) ;
return - EIO ;
}
2015-12-09 15:50:27 -08:00
2021-08-04 10:22:24 +02:00
while ( ! mutex_trylock ( & adapter - > crit_lock ) )
2017-10-27 11:06:52 -04:00
usleep_range ( 500 , 1000 ) ;
2018-09-14 17:37:46 -07:00
if ( adapter - > state ! = __IAVF_DOWN ) {
2017-10-27 11:06:52 -04:00
err = - EBUSY ;
goto err_unlock ;
}
2013-12-21 06:12:45 +00:00
iavf: Fix kernel BUG in free_msi_irqs
Fix driver not freeing VF's traffic irqs, prior to calling
pci_disable_msix in iavf_remove.
There were possible 2 erroneous states in which, iavf_close would
not be called.
One erroneous state is fixed by allowing netdev to register, when state
is already running. It was possible for VF adapter to enter state loop
from running to resetting, where iavf_open would subsequently fail.
If user would then unload driver/remove VF pci, iavf_close would not be
called, as the netdev was not registered, leaving traffic pcis still
allocated.
Fixed this by breaking loop, allowing netdev to open device when adapter
state is __IAVF_RUNNING and it is not explicitily downed.
Other possiblity is entering to iavf_remove from __IAVF_RESETTING state,
where iavf_close would not free irqs, but just return 0.
Fixed this by checking for last adapter state and then removing irqs.
Kernel panic:
[ 2773.628585] kernel BUG at drivers/pci/msi.c:375!
...
[ 2773.631567] RIP: 0010:free_msi_irqs+0x180/0x1b0
...
[ 2773.640939] Call Trace:
[ 2773.641572] pci_disable_msix+0xf7/0x120
[ 2773.642224] iavf_reset_interrupt_capability.part.41+0x15/0x30 [iavf]
[ 2773.642897] iavf_remove+0x12e/0x500 [iavf]
[ 2773.643578] pci_device_remove+0x3b/0xc0
[ 2773.644266] device_release_driver_internal+0x103/0x1f0
[ 2773.644948] pci_stop_bus_device+0x69/0x90
[ 2773.645576] pci_stop_and_remove_bus_device+0xe/0x20
[ 2773.646215] pci_iov_remove_virtfn+0xba/0x120
[ 2773.646862] sriov_disable+0x2f/0xe0
[ 2773.647531] ice_free_vfs+0x2f8/0x350 [ice]
[ 2773.648207] ice_sriov_configure+0x94/0x960 [ice]
[ 2773.648883] ? _kstrtoull+0x3b/0x90
[ 2773.649560] sriov_numvfs_store+0x10a/0x190
[ 2773.650249] kernfs_fop_write+0x116/0x190
[ 2773.650948] vfs_write+0xa5/0x1a0
[ 2773.651651] ksys_write+0x4f/0xb0
[ 2773.652358] do_syscall_64+0x5b/0x1a0
[ 2773.653075] entry_SYSCALL_64_after_hwframe+0x65/0xca
Fixes: 22ead37f8af8 ("i40evf: Add longer wait after remove module")
Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-10-22 10:30:14 +02:00
if ( adapter - > state = = __IAVF_RUNNING & &
! test_bit ( __IAVF_VSI_DOWN , adapter - > vsi . state ) ) {
dev_dbg ( & adapter - > pdev - > dev , " VF is already open. \n " ) ;
err = 0 ;
goto err_unlock ;
}
2013-12-21 06:12:45 +00:00
/* allocate transmit descriptors */
2018-09-14 17:37:46 -07:00
err = iavf_setup_all_tx_resources ( adapter ) ;
2013-12-21 06:12:45 +00:00
if ( err )
goto err_setup_tx ;
/* allocate receive descriptors */
2018-09-14 17:37:46 -07:00
err = iavf_setup_all_rx_resources ( adapter ) ;
2013-12-21 06:12:45 +00:00
if ( err )
goto err_setup_rx ;
/* clear any pending interrupts, may auto mask */
2018-09-14 17:37:46 -07:00
err = iavf_request_traffic_irqs ( adapter , netdev - > name ) ;
2013-12-21 06:12:45 +00:00
if ( err )
goto err_req_irq ;
2018-02-05 13:03:36 -08:00
spin_lock_bh ( & adapter - > mac_vlan_list_lock ) ;
2018-09-14 17:37:46 -07:00
iavf_add_filter ( adapter , adapter - > hw . mac . addr ) ;
2018-02-05 13:03:36 -08:00
spin_unlock_bh ( & adapter - > mac_vlan_list_lock ) ;
2021-06-04 09:53:27 -07:00
/* Restore VLAN filters that were removed with IFF_DOWN */
iavf_restore_filters ( adapter ) ;
2018-09-14 17:37:46 -07:00
iavf_configure ( adapter ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
iavf_up_complete ( adapter ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
iavf_irq_enable ( adapter , true ) ;
2013-12-21 06:12:45 +00:00
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
2017-10-27 11:06:52 -04:00
2013-12-21 06:12:45 +00:00
return 0 ;
err_req_irq :
2018-09-14 17:37:46 -07:00
iavf_down ( adapter ) ;
iavf_free_traffic_irqs ( adapter ) ;
2013-12-21 06:12:45 +00:00
err_setup_rx :
2018-09-14 17:37:46 -07:00
iavf_free_all_rx_resources ( adapter ) ;
2013-12-21 06:12:45 +00:00
err_setup_tx :
2018-09-14 17:37:46 -07:00
iavf_free_all_tx_resources ( adapter ) ;
2017-10-27 11:06:52 -04:00
err_unlock :
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
2013-12-21 06:12:45 +00:00
return err ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_close - Disables a network interface
2013-12-21 06:12:45 +00:00
* @ netdev : network interface device structure
*
* Returns 0 , this is not allowed to fail
*
* The close entry point is called when an interface is de - activated
* by the OS . The hardware is still under the drivers control , but
* needs to be disabled . All IRQs except vector 0 ( reserved for admin queue )
* are freed , along with all transmit and receive resources .
* */
2018-09-14 17:37:46 -07:00
static int iavf_close ( struct net_device * netdev )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2017-06-23 04:24:44 -04:00
int status ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
if ( adapter - > state < = __IAVF_DOWN_PENDING )
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
return 0 ;
2021-08-04 10:22:24 +02:00
while ( ! mutex_trylock ( & adapter - > crit_lock ) )
2017-10-27 11:06:52 -04:00
usleep_range ( 500 , 1000 ) ;
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
2018-09-14 17:37:55 -07:00
set_bit ( __IAVF_VSI_DOWN , adapter - > vsi . state ) ;
2017-01-24 10:23:59 -08:00
if ( CLIENT_ENABLED ( adapter ) )
2018-09-14 17:37:46 -07:00
adapter - > flags | = IAVF_FLAG_CLIENT_NEEDS_CLOSE ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
iavf_down ( adapter ) ;
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_DOWN_PENDING ) ;
2018-09-14 17:37:46 -07:00
iavf_free_traffic_irqs ( adapter ) ;
2013-12-21 06:12:45 +00:00
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
2017-10-27 11:06:52 -04:00
2016-12-12 15:44:11 -08:00
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory . Resources are cleared in
2018-09-14 17:37:46 -07:00
* iavf_virtchnl_completion ( ) after we get confirmation from the PF
2016-12-12 15:44:11 -08:00
* driver that the rings have been stopped .
2017-06-23 04:24:44 -04:00
*
2018-09-14 17:37:46 -07:00
* Also , we wait for state to transition to __IAVF_DOWN before
* returning . State change occurs in iavf_virtchnl_completion ( ) after
2017-06-23 04:24:44 -04:00
* VF resources are released ( which occurs after PF driver processes and
* responds to admin queue commands ) .
2016-12-12 15:44:11 -08:00
*/
2017-06-23 04:24:44 -04:00
status = wait_event_timeout ( adapter - > down_waitqueue ,
2018-09-14 17:37:46 -07:00
adapter - > state = = __IAVF_DOWN ,
2019-05-14 10:37:01 -07:00
msecs_to_jiffies ( 500 ) ) ;
2017-06-23 04:24:44 -04:00
if ( ! status )
netdev_warn ( netdev , " Device resources not yet released \n " ) ;
2013-12-21 06:12:45 +00:00
return 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_change_mtu - Change the Maximum Transfer Unit
2013-12-21 06:12:45 +00:00
* @ netdev : network interface device structure
* @ new_mtu : new value for maximum frame size
*
* Returns 0 on success , negative on failure
* */
2018-09-14 17:37:46 -07:00
static int iavf_change_mtu ( struct net_device * netdev , int new_mtu )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2013-12-21 06:12:45 +00:00
2021-06-04 09:53:30 -07:00
netdev_dbg ( netdev , " changing MTU from %d to %d \n " ,
netdev - > mtu , new_mtu ) ;
2013-12-21 06:12:45 +00:00
netdev - > mtu = new_mtu ;
2017-01-24 10:23:59 -08:00
if ( CLIENT_ENABLED ( adapter ) ) {
2018-09-14 17:37:46 -07:00
iavf_notify_client_l2_params ( & adapter - > vsi ) ;
adapter - > flags | = IAVF_FLAG_SERVICE_CLIENT_REQUESTED ;
2017-01-24 10:23:59 -08:00
}
2018-09-14 17:37:46 -07:00
adapter - > flags | = IAVF_FLAG_RESET_NEEDED ;
2019-05-14 10:37:05 -07:00
queue_work ( iavf_wq , & adapter - > reset_task ) ;
2015-06-19 08:56:30 -07:00
2013-12-21 06:12:45 +00:00
return 0 ;
}
2021-11-29 16:16:03 -08:00
# define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_RX | \
NETIF_F_HW_VLAN_STAG_TX )
2017-07-17 22:09:45 -07:00
/**
2018-09-14 17:37:55 -07:00
* iavf_set_features - set the netdev feature flags
2017-07-17 22:09:45 -07:00
* @ netdev : ptr to the netdev being adjusted
* @ features : the feature set that the stack is suggesting
* Note : expects to be called while under rtnl_lock ( )
* */
2018-09-14 17:37:46 -07:00
static int iavf_set_features ( struct net_device * netdev ,
netdev_features_t features )
2017-07-17 22:09:45 -07:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2017-07-17 22:09:45 -07:00
2021-11-29 16:16:03 -08:00
/* trigger update on any VLAN feature change */
if ( ( netdev - > features & NETIF_VLAN_OFFLOAD_FEATURES ) ^
( features & NETIF_VLAN_OFFLOAD_FEATURES ) )
iavf_set_vlan_offload_features ( adapter , netdev - > features ,
features ) ;
2017-07-17 22:09:45 -07:00
return 0 ;
}
2016-10-25 16:08:47 -07:00
/**
2018-09-14 17:37:46 -07:00
* iavf_features_check - Validate encapsulated packet conforms to limits
2016-10-25 16:08:47 -07:00
* @ skb : skb buff
2018-04-20 01:41:33 -07:00
* @ dev : This physical port ' s netdev
2016-10-25 16:08:47 -07:00
* @ features : Offload features that the stack believes apply
* */
2018-09-14 17:37:46 -07:00
static netdev_features_t iavf_features_check ( struct sk_buff * skb ,
struct net_device * dev ,
netdev_features_t features )
2016-10-25 16:08:47 -07:00
{
size_t len ;
/* No point in doing any of this if neither checksum nor GSO are
* being requested for this frame . We can rule out both by just
* checking for CHECKSUM_PARTIAL
*/
if ( skb - > ip_summed ! = CHECKSUM_PARTIAL )
return features ;
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes . If it is then we need to drop support for GSO .
*/
if ( skb_is_gso ( skb ) & & ( skb_shinfo ( skb ) - > gso_size < 64 ) )
features & = ~ NETIF_F_GSO_MASK ;
/* MACLEN can support at most 63 words */
len = skb_network_header ( skb ) - skb - > data ;
if ( len & ~ ( 63 * 2 ) )
goto out_err ;
/* IPLEN and EIPLEN can support at most 127 dwords */
len = skb_transport_header ( skb ) - skb_network_header ( skb ) ;
if ( len & ~ ( 127 * 4 ) )
goto out_err ;
if ( skb - > encapsulation ) {
/* L4TUNLEN can support 127 words */
len = skb_inner_network_header ( skb ) - skb_transport_header ( skb ) ;
if ( len & ~ ( 127 * 2 ) )
goto out_err ;
/* IPLEN can support at most 127 dwords */
len = skb_inner_transport_header ( skb ) -
skb_inner_network_header ( skb ) ;
if ( len & ~ ( 127 * 4 ) )
goto out_err ;
}
/* No need to validate L4LEN as TCP is the only protocol with a
* a flexible value and we support all possible values supported
* by TCP , which is at most 15 dwords
*/
return features ;
out_err :
return features & ~ ( NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK ) ;
}
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
/**
* iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on / off
* @ adapter : board private structure
*
* Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
* were negotiated determine the VLAN features that can be toggled on and off .
* */
static netdev_features_t
iavf_get_netdev_vlan_hw_features ( struct iavf_adapter * adapter )
{
netdev_features_t hw_features = 0 ;
if ( ! adapter - > vf_res | | ! adapter - > vf_res - > vf_cap_flags )
return hw_features ;
/* Enable VLAN features if supported */
if ( VLAN_ALLOWED ( adapter ) ) {
hw_features | = ( NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX ) ;
} else if ( VLAN_V2_ALLOWED ( adapter ) ) {
struct virtchnl_vlan_caps * vlan_v2_caps =
& adapter - > vlan_v2_caps ;
struct virtchnl_vlan_supported_caps * stripping_support =
& vlan_v2_caps - > offloads . stripping_support ;
struct virtchnl_vlan_supported_caps * insertion_support =
& vlan_v2_caps - > offloads . insertion_support ;
if ( stripping_support - > outer ! = VIRTCHNL_VLAN_UNSUPPORTED & &
stripping_support - > outer & VIRTCHNL_VLAN_TOGGLE ) {
if ( stripping_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_8100 )
hw_features | = NETIF_F_HW_VLAN_CTAG_RX ;
if ( stripping_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_88A8 )
hw_features | = NETIF_F_HW_VLAN_STAG_RX ;
} else if ( stripping_support - > inner ! =
VIRTCHNL_VLAN_UNSUPPORTED & &
stripping_support - > inner & VIRTCHNL_VLAN_TOGGLE ) {
if ( stripping_support - > inner &
VIRTCHNL_VLAN_ETHERTYPE_8100 )
hw_features | = NETIF_F_HW_VLAN_CTAG_RX ;
}
if ( insertion_support - > outer ! = VIRTCHNL_VLAN_UNSUPPORTED & &
insertion_support - > outer & VIRTCHNL_VLAN_TOGGLE ) {
if ( insertion_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_8100 )
hw_features | = NETIF_F_HW_VLAN_CTAG_TX ;
if ( insertion_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_88A8 )
hw_features | = NETIF_F_HW_VLAN_STAG_TX ;
} else if ( insertion_support - > inner & &
insertion_support - > inner & VIRTCHNL_VLAN_TOGGLE ) {
if ( insertion_support - > inner &
VIRTCHNL_VLAN_ETHERTYPE_8100 )
hw_features | = NETIF_F_HW_VLAN_CTAG_TX ;
}
}
return hw_features ;
}
/**
* iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
* @ adapter : board private structure
*
* Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
* were negotiated determine the VLAN features that are enabled by default .
* */
static netdev_features_t
iavf_get_netdev_vlan_features ( struct iavf_adapter * adapter )
{
netdev_features_t features = 0 ;
if ( ! adapter - > vf_res | | ! adapter - > vf_res - > vf_cap_flags )
return features ;
if ( VLAN_ALLOWED ( adapter ) ) {
features | = NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX ;
} else if ( VLAN_V2_ALLOWED ( adapter ) ) {
struct virtchnl_vlan_caps * vlan_v2_caps =
& adapter - > vlan_v2_caps ;
struct virtchnl_vlan_supported_caps * filtering_support =
& vlan_v2_caps - > filtering . filtering_support ;
struct virtchnl_vlan_supported_caps * stripping_support =
& vlan_v2_caps - > offloads . stripping_support ;
struct virtchnl_vlan_supported_caps * insertion_support =
& vlan_v2_caps - > offloads . insertion_support ;
u32 ethertype_init ;
/* give priority to outer stripping and don't support both outer
* and inner stripping
*/
ethertype_init = vlan_v2_caps - > offloads . ethertype_init ;
if ( stripping_support - > outer ! = VIRTCHNL_VLAN_UNSUPPORTED ) {
if ( stripping_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_8100 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100 )
features | = NETIF_F_HW_VLAN_CTAG_RX ;
else if ( stripping_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_88A8 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8 )
features | = NETIF_F_HW_VLAN_STAG_RX ;
} else if ( stripping_support - > inner ! =
VIRTCHNL_VLAN_UNSUPPORTED ) {
if ( stripping_support - > inner &
VIRTCHNL_VLAN_ETHERTYPE_8100 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100 )
features | = NETIF_F_HW_VLAN_CTAG_RX ;
}
/* give priority to outer insertion and don't support both outer
* and inner insertion
*/
if ( insertion_support - > outer ! = VIRTCHNL_VLAN_UNSUPPORTED ) {
if ( insertion_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_8100 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100 )
features | = NETIF_F_HW_VLAN_CTAG_TX ;
else if ( insertion_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_88A8 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8 )
features | = NETIF_F_HW_VLAN_STAG_TX ;
} else if ( insertion_support - > inner ! =
VIRTCHNL_VLAN_UNSUPPORTED ) {
if ( insertion_support - > inner &
VIRTCHNL_VLAN_ETHERTYPE_8100 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100 )
features | = NETIF_F_HW_VLAN_CTAG_TX ;
}
/* give priority to outer filtering and don't bother if both
* outer and inner filtering are enabled
*/
ethertype_init = vlan_v2_caps - > filtering . ethertype_init ;
if ( filtering_support - > outer ! = VIRTCHNL_VLAN_UNSUPPORTED ) {
if ( filtering_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_8100 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100 )
features | = NETIF_F_HW_VLAN_CTAG_FILTER ;
if ( filtering_support - > outer &
VIRTCHNL_VLAN_ETHERTYPE_88A8 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8 )
features | = NETIF_F_HW_VLAN_STAG_FILTER ;
} else if ( filtering_support - > inner ! =
VIRTCHNL_VLAN_UNSUPPORTED ) {
if ( filtering_support - > inner &
VIRTCHNL_VLAN_ETHERTYPE_8100 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100 )
features | = NETIF_F_HW_VLAN_CTAG_FILTER ;
if ( filtering_support - > inner &
VIRTCHNL_VLAN_ETHERTYPE_88A8 & &
ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8 )
features | = NETIF_F_HW_VLAN_STAG_FILTER ;
}
}
return features ;
}
# define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
( ! ( ( ( requested ) & ( feature_bit ) ) & & \
! ( ( allowed ) & ( feature_bit ) ) ) )
/**
* iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
* @ adapter : board private structure
* @ requested_features : stack requested NETDEV features
* */
static netdev_features_t
iavf_fix_netdev_vlan_features ( struct iavf_adapter * adapter ,
netdev_features_t requested_features )
{
netdev_features_t allowed_features ;
allowed_features = iavf_get_netdev_vlan_hw_features ( adapter ) |
iavf_get_netdev_vlan_features ( adapter ) ;
if ( ! IAVF_NETDEV_VLAN_FEATURE_ALLOWED ( requested_features ,
allowed_features ,
NETIF_F_HW_VLAN_CTAG_TX ) )
requested_features & = ~ NETIF_F_HW_VLAN_CTAG_TX ;
if ( ! IAVF_NETDEV_VLAN_FEATURE_ALLOWED ( requested_features ,
allowed_features ,
NETIF_F_HW_VLAN_CTAG_RX ) )
requested_features & = ~ NETIF_F_HW_VLAN_CTAG_RX ;
if ( ! IAVF_NETDEV_VLAN_FEATURE_ALLOWED ( requested_features ,
allowed_features ,
NETIF_F_HW_VLAN_STAG_TX ) )
requested_features & = ~ NETIF_F_HW_VLAN_STAG_TX ;
if ( ! IAVF_NETDEV_VLAN_FEATURE_ALLOWED ( requested_features ,
allowed_features ,
NETIF_F_HW_VLAN_STAG_RX ) )
requested_features & = ~ NETIF_F_HW_VLAN_STAG_RX ;
if ( ! IAVF_NETDEV_VLAN_FEATURE_ALLOWED ( requested_features ,
allowed_features ,
NETIF_F_HW_VLAN_CTAG_FILTER ) )
requested_features & = ~ NETIF_F_HW_VLAN_CTAG_FILTER ;
if ( ! IAVF_NETDEV_VLAN_FEATURE_ALLOWED ( requested_features ,
allowed_features ,
NETIF_F_HW_VLAN_STAG_FILTER ) )
requested_features & = ~ NETIF_F_HW_VLAN_STAG_FILTER ;
if ( ( requested_features &
( NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX ) ) & &
( requested_features &
( NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX ) ) & &
adapter - > vlan_v2_caps . offloads . ethertype_match = =
VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION ) {
netdev_warn ( adapter - > netdev , " cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings \n " ) ;
requested_features & = ~ ( NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_TX ) ;
}
return requested_features ;
}
2016-03-18 12:18:07 -07:00
/**
2018-09-14 17:37:46 -07:00
* iavf_fix_features - fix up the netdev feature bits
2016-03-18 12:18:07 -07:00
* @ netdev : our net device
* @ features : desired feature bits
*
* Returns fixed - up features bits
* */
2018-09-14 17:37:46 -07:00
static netdev_features_t iavf_fix_features ( struct net_device * netdev ,
netdev_features_t features )
2016-03-18 12:18:07 -07:00
{
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2016-03-18 12:18:07 -07:00
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
return iavf_fix_netdev_vlan_features ( adapter , features ) ;
2016-03-18 12:18:07 -07:00
}
2018-09-14 17:37:46 -07:00
static const struct net_device_ops iavf_netdev_ops = {
. ndo_open = iavf_open ,
. ndo_stop = iavf_close ,
. ndo_start_xmit = iavf_xmit_frame ,
. ndo_set_rx_mode = iavf_set_rx_mode ,
2013-12-21 06:12:45 +00:00
. ndo_validate_addr = eth_validate_addr ,
2018-09-14 17:37:46 -07:00
. ndo_set_mac_address = iavf_set_mac ,
. ndo_change_mtu = iavf_change_mtu ,
. ndo_tx_timeout = iavf_tx_timeout ,
. ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid ,
. ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid ,
. ndo_features_check = iavf_features_check ,
. ndo_fix_features = iavf_fix_features ,
. ndo_set_features = iavf_set_features ,
. ndo_setup_tc = iavf_setup_tc ,
2013-12-21 06:12:45 +00:00
} ;
/**
2018-09-14 17:37:46 -07:00
* iavf_check_reset_complete - check that VF reset is complete
2013-12-21 06:12:45 +00:00
* @ hw : pointer to hw struct
*
* Returns 0 if device is ready to use , or - EBUSY if it ' s in reset .
* */
2018-09-14 17:37:52 -07:00
static int iavf_check_reset_complete ( struct iavf_hw * hw )
2013-12-21 06:12:45 +00:00
{
u32 rstat ;
int i ;
2020-06-05 10:09:46 -07:00
for ( i = 0 ; i < IAVF_RESET_WAIT_COMPLETE_COUNT ; i + + ) {
2018-09-14 17:37:49 -07:00
rstat = rd32 ( hw , IAVF_VFGEN_RSTAT ) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK ;
2017-05-11 11:23:11 -07:00
if ( ( rstat = = VIRTCHNL_VFR_VFACTIVE ) | |
( rstat = = VIRTCHNL_VFR_COMPLETED ) )
2013-12-21 06:12:45 +00:00
return 0 ;
2014-09-13 07:40:44 +00:00
usleep_range ( 10 , 20 ) ;
2013-12-21 06:12:45 +00:00
}
return - EBUSY ;
}
i40evf: handle big resets
The most common type of reset that the VF will encounter is a PF reset
that cascades down into a VF reset for each VF. In this case, the VF
will always be assigned the same VSI and recovery is fairly simple.
However, in the case of 'bigger' resets, such as a Core or EMP reset,
when the device is reinitialized, it's probable that the VF will NOT get
the same VSI. When this happens, the VF will not be able to recover, as
it will continue to request resources for its original VSI.
Add an extra state to the admin queue state machine so that the driver
can re-request its configuration information at runtime. During reset
recovery, set this bit in the aq_required field, and fetch the (possibly
new) configuration information before attempting to bring the driver
back up. Since the driver doesn't know what kind of reset it has
encountered, this step is done even for a PF reset, but it doesn't hurt
anything - it just gets the same VSI back.
Change-ID: I915d59ffb40375215117362f4ac7a37811aba748
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Jim Young <james.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2015-06-04 16:23:58 -04:00
/**
2018-09-14 17:37:46 -07:00
* iavf_process_config - Process the config information we got from the PF
i40evf: handle big resets
The most common type of reset that the VF will encounter is a PF reset
that cascades down into a VF reset for each VF. In this case, the VF
will always be assigned the same VSI and recovery is fairly simple.
However, in the case of 'bigger' resets, such as a Core or EMP reset,
when the device is reinitialized, it's probable that the VF will NOT get
the same VSI. When this happens, the VF will not be able to recover, as
it will continue to request resources for its original VSI.
Add an extra state to the admin queue state machine so that the driver
can re-request its configuration information at runtime. During reset
recovery, set this bit in the aq_required field, and fetch the (possibly
new) configuration information before attempting to bring the driver
back up. Since the driver doesn't know what kind of reset it has
encountered, this step is done even for a PF reset, but it doesn't hurt
anything - it just gets the same VSI back.
Change-ID: I915d59ffb40375215117362f4ac7a37811aba748
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Jim Young <james.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2015-06-04 16:23:58 -04:00
* @ adapter : board private structure
*
* Verify that we have a valid config struct , and set up our netdev features
* and our VSI struct .
* */
2018-09-14 17:37:46 -07:00
int iavf_process_config ( struct iavf_adapter * adapter )
i40evf: handle big resets
The most common type of reset that the VF will encounter is a PF reset
that cascades down into a VF reset for each VF. In this case, the VF
will always be assigned the same VSI and recovery is fairly simple.
However, in the case of 'bigger' resets, such as a Core or EMP reset,
when the device is reinitialized, it's probable that the VF will NOT get
the same VSI. When this happens, the VF will not be able to recover, as
it will continue to request resources for its original VSI.
Add an extra state to the admin queue state machine so that the driver
can re-request its configuration information at runtime. During reset
recovery, set this bit in the aq_required field, and fetch the (possibly
new) configuration information before attempting to bring the driver
back up. Since the driver doesn't know what kind of reset it has
encountered, this step is done even for a PF reset, but it doesn't hurt
anything - it just gets the same VSI back.
Change-ID: I915d59ffb40375215117362f4ac7a37811aba748
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Jim Young <james.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2015-06-04 16:23:58 -04:00
{
2017-05-11 11:23:11 -07:00
struct virtchnl_vf_resource * vfres = adapter - > vf_res ;
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
netdev_features_t hw_vlan_features , vlan_features ;
i40evf: handle big resets
The most common type of reset that the VF will encounter is a PF reset
that cascades down into a VF reset for each VF. In this case, the VF
will always be assigned the same VSI and recovery is fairly simple.
However, in the case of 'bigger' resets, such as a Core or EMP reset,
when the device is reinitialized, it's probable that the VF will NOT get
the same VSI. When this happens, the VF will not be able to recover, as
it will continue to request resources for its original VSI.
Add an extra state to the admin queue state machine so that the driver
can re-request its configuration information at runtime. During reset
recovery, set this bit in the aq_required field, and fetch the (possibly
new) configuration information before attempting to bring the driver
back up. Since the driver doesn't know what kind of reset it has
encountered, this step is done even for a PF reset, but it doesn't hurt
anything - it just gets the same VSI back.
Change-ID: I915d59ffb40375215117362f4ac7a37811aba748
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Jim Young <james.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2015-06-04 16:23:58 -04:00
struct net_device * netdev = adapter - > netdev ;
2017-03-27 14:43:18 -07:00
netdev_features_t hw_enc_features ;
netdev_features_t hw_features ;
i40evf: handle big resets
The most common type of reset that the VF will encounter is a PF reset
that cascades down into a VF reset for each VF. In this case, the VF
will always be assigned the same VSI and recovery is fairly simple.
However, in the case of 'bigger' resets, such as a Core or EMP reset,
when the device is reinitialized, it's probable that the VF will NOT get
the same VSI. When this happens, the VF will not be able to recover, as
it will continue to request resources for its original VSI.
Add an extra state to the admin queue state machine so that the driver
can re-request its configuration information at runtime. During reset
recovery, set this bit in the aq_required field, and fetch the (possibly
new) configuration information before attempting to bring the driver
back up. Since the driver doesn't know what kind of reset it has
encountered, this step is done even for a PF reset, but it doesn't hurt
anything - it just gets the same VSI back.
Change-ID: I915d59ffb40375215117362f4ac7a37811aba748
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Jim Young <james.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2015-06-04 16:23:58 -04:00
2017-03-27 14:43:18 -07:00
hw_enc_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_HIGHDMA |
NETIF_F_SOFT_FEATURES |
NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_SCTP_CRC |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
0 ;
/* advertise to stack only if offloads for encapsulated packets is
* supported
*/
2017-06-29 15:12:24 +02:00
if ( vfres - > vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP ) {
2017-03-27 14:43:18 -07:00
hw_enc_features | = NETIF_F_GSO_UDP_TUNNEL |
2016-04-02 00:05:14 -07:00
NETIF_F_GSO_GRE |
2016-04-14 17:19:25 -04:00
NETIF_F_GSO_GRE_CSUM |
2016-05-18 09:06:10 -07:00
NETIF_F_GSO_IPXIP4 |
2016-05-18 10:44:53 -07:00
NETIF_F_GSO_IPXIP6 |
2016-04-02 00:05:14 -07:00
NETIF_F_GSO_UDP_TUNNEL_CSUM |
2016-04-14 17:19:25 -04:00
NETIF_F_GSO_PARTIAL |
2016-04-02 00:05:14 -07:00
0 ;
2017-06-29 15:12:24 +02:00
if ( ! ( vfres - > vf_cap_flags &
2017-05-11 11:23:11 -07:00
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM ) )
2017-03-27 14:43:18 -07:00
netdev - > gso_partial_features | =
NETIF_F_GSO_UDP_TUNNEL_CSUM ;
2016-04-02 00:05:14 -07:00
2017-03-27 14:43:18 -07:00
netdev - > gso_partial_features | = NETIF_F_GSO_GRE_CSUM ;
netdev - > hw_enc_features | = NETIF_F_TSO_MANGLEID ;
netdev - > hw_enc_features | = hw_enc_features ;
}
2016-04-02 00:05:14 -07:00
/* record features VLANs can make use of */
2017-03-27 14:43:18 -07:00
netdev - > vlan_features | = hw_enc_features | NETIF_F_TSO_MANGLEID ;
2016-04-02 00:05:14 -07:00
/* Write features and hw_features separately to avoid polluting
2017-03-27 14:43:18 -07:00
* with , or dropping , features that are set when we registered .
2016-04-02 00:05:14 -07:00
*/
2017-03-27 14:43:18 -07:00
hw_features = hw_enc_features ;
2016-04-02 00:05:14 -07:00
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
/* get HW VLAN features that can be toggled */
hw_vlan_features = iavf_get_netdev_vlan_hw_features ( adapter ) ;
2018-01-23 08:51:05 -08:00
/* Enable cloud filter if ADQ is supported */
if ( vfres - > vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ )
hw_features | = NETIF_F_HW_TC ;
2021-03-02 10:12:13 -08:00
if ( vfres - > vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO )
hw_features | = NETIF_F_GSO_UDP_L4 ;
2017-08-29 05:32:41 -04:00
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
netdev - > hw_features | = hw_features | hw_vlan_features ;
vlan_features = iavf_get_netdev_vlan_features ( adapter ) ;
2016-04-02 00:05:14 -07:00
iavf: Add support VIRTCHNL_VF_OFFLOAD_VLAN_V2 during netdev config
Based on VIRTCHNL_VF_OFFLOAD_VLAN_V2, the VF can now support more VLAN
capabilities (i.e. 802.1AD offloads and filtering). In order to
communicate these capabilities to the netdev layer, the VF needs to
parse its VLAN capabilities based on whether it was able to negotiation
VIRTCHNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 or neither of
these.
In order to support this, add the following functionality:
iavf_get_netdev_vlan_hw_features() - This is used to determine the VLAN
features that the underlying hardware supports and that can be toggled
off/on based on the negotiated capabiltiies. For example, if
VIRTCHNL_VF_OFFLOAD_VLAN_V2 was negotiated, then any capability marked
with VIRTCHNL_VLAN_TOGGLE can be toggled on/off by the VF. If
VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, then only VLAN insertion and/or
stripping can be toggled on/off.
iavf_get_netdev_vlan_features() - This is used to determine the VLAN
features that the underlying hardware supports and that should be
enabled by default. For example, if VIRTHCNL_VF_OFFLOAD_VLAN_V2 was
negotiated, then any supported capability that has its ethertype_init
filed set should be enabled by default. If VIRTCHNL_VF_OFFLOAD_VLAN was
negotiated, then filtering, stripping, and insertion should be enabled
by default.
Also, refactor iavf_fix_features() to take into account the new
capabilities. To do this, query all the supported features (enabled by
default and toggleable) and make sure the requested change is supported.
If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is successfully negotiated, there is no
need to check VIRTCHNL_VLAN_TOGGLE here because the driver already told
the netdev layer which features can be toggled via netdev->hw_features
during iavf_process_config(), so only those features will be requested
to change.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-11-29 16:16:01 -08:00
netdev - > features | = hw_features | vlan_features ;
2017-08-29 05:32:41 -04:00
if ( vfres - > vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN )
netdev - > features | = NETIF_F_HW_VLAN_CTAG_FILTER ;
i40evf: handle big resets
The most common type of reset that the VF will encounter is a PF reset
that cascades down into a VF reset for each VF. In this case, the VF
will always be assigned the same VSI and recovery is fairly simple.
However, in the case of 'bigger' resets, such as a Core or EMP reset,
when the device is reinitialized, it's probable that the VF will NOT get
the same VSI. When this happens, the VF will not be able to recover, as
it will continue to request resources for its original VSI.
Add an extra state to the admin queue state machine so that the driver
can re-request its configuration information at runtime. During reset
recovery, set this bit in the aq_required field, and fetch the (possibly
new) configuration information before attempting to bring the driver
back up. Since the driver doesn't know what kind of reset it has
encountered, this step is done even for a PF reset, but it doesn't hurt
anything - it just gets the same VSI back.
Change-ID: I915d59ffb40375215117362f4ac7a37811aba748
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Jim Young <james.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2015-06-04 16:23:58 -04:00
2018-08-20 08:12:31 -07:00
netdev - > priv_flags | = IFF_UNICAST_FLT ;
2018-04-20 01:41:36 -07:00
/* Do not turn on offloads when they are requested to be turned off.
* TSO needs minimum 576 bytes to work correctly .
*/
if ( netdev - > wanted_features ) {
if ( ! ( netdev - > wanted_features & NETIF_F_TSO ) | |
netdev - > mtu < 576 )
netdev - > features & = ~ NETIF_F_TSO ;
if ( ! ( netdev - > wanted_features & NETIF_F_TSO6 ) | |
netdev - > mtu < 576 )
netdev - > features & = ~ NETIF_F_TSO6 ;
if ( ! ( netdev - > wanted_features & NETIF_F_TSO_ECN ) )
netdev - > features & = ~ NETIF_F_TSO_ECN ;
if ( ! ( netdev - > wanted_features & NETIF_F_GRO ) )
netdev - > features & = ~ NETIF_F_GRO ;
if ( ! ( netdev - > wanted_features & NETIF_F_GSO ) )
netdev - > features & = ~ NETIF_F_GSO ;
}
i40evf: handle big resets
The most common type of reset that the VF will encounter is a PF reset
that cascades down into a VF reset for each VF. In this case, the VF
will always be assigned the same VSI and recovery is fairly simple.
However, in the case of 'bigger' resets, such as a Core or EMP reset,
when the device is reinitialized, it's probable that the VF will NOT get
the same VSI. When this happens, the VF will not be able to recover, as
it will continue to request resources for its original VSI.
Add an extra state to the admin queue state machine so that the driver
can re-request its configuration information at runtime. During reset
recovery, set this bit in the aq_required field, and fetch the (possibly
new) configuration information before attempting to bring the driver
back up. Since the driver doesn't know what kind of reset it has
encountered, this step is done even for a PF reset, but it doesn't hurt
anything - it just gets the same VSI back.
Change-ID: I915d59ffb40375215117362f4ac7a37811aba748
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Jim Young <james.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2015-06-04 16:23:58 -04:00
return 0 ;
}
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:46 -07:00
* iavf_shutdown - Shutdown the device in preparation for a reboot
2013-12-21 06:12:45 +00:00
* @ pdev : pci device structure
* */
2018-09-14 17:37:46 -07:00
static void iavf_shutdown ( struct pci_dev * pdev )
2013-12-21 06:12:45 +00:00
{
2021-09-15 08:41:23 +02:00
struct iavf_adapter * adapter = iavf_pdev_to_adapter ( pdev ) ;
struct net_device * netdev = adapter - > netdev ;
2013-12-21 06:12:45 +00:00
netif_device_detach ( netdev ) ;
if ( netif_running ( netdev ) )
2018-09-14 17:37:46 -07:00
iavf_close ( netdev ) ;
2013-12-21 06:12:45 +00:00
2021-08-04 10:22:24 +02:00
if ( iavf_lock_timeout ( & adapter - > crit_lock , 5000 ) )
dev_warn ( & adapter - > pdev - > dev , " failed to acquire crit_lock in %s \n " , __FUNCTION__ ) ;
2015-01-09 11:18:18 +00:00
/* Prevent the watchdog from running. */
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_REMOVE ) ;
2015-01-09 11:18:18 +00:00
adapter - > aq_required = 0 ;
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
2015-01-09 11:18:18 +00:00
2013-12-21 06:12:45 +00:00
# ifdef CONFIG_PM
pci_save_state ( pdev ) ;
# endif
pci_disable_device ( pdev ) ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_probe - Device Initialization Routine
2013-12-21 06:12:45 +00:00
* @ pdev : PCI device information struct
2018-09-14 17:37:46 -07:00
* @ ent : entry in iavf_pci_tbl
2013-12-21 06:12:45 +00:00
*
* Returns 0 on success , negative on failure
*
2018-09-14 17:37:46 -07:00
* iavf_probe initializes an adapter identified by a pci_dev structure .
2013-12-21 06:12:45 +00:00
* The OS initialization , configuring of the adapter private structure ,
* and a hardware reset occur .
* */
2018-09-14 17:37:46 -07:00
static int iavf_probe ( struct pci_dev * pdev , const struct pci_device_id * ent )
2013-12-21 06:12:45 +00:00
{
struct net_device * netdev ;
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = NULL ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = NULL ;
2014-02-20 19:29:08 -08:00
int err ;
2013-12-21 06:12:45 +00:00
err = pci_enable_device ( pdev ) ;
if ( err )
return err ;
2014-02-11 08:26:33 +00:00
err = dma_set_mask_and_coherent ( & pdev - > dev , DMA_BIT_MASK ( 64 ) ) ;
if ( err ) {
2014-03-25 04:30:27 +00:00
err = dma_set_mask_and_coherent ( & pdev - > dev , DMA_BIT_MASK ( 32 ) ) ;
if ( err ) {
dev_err ( & pdev - > dev ,
" DMA configuration failed: 0x%x \n " , err ) ;
goto err_dma ;
}
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
err = pci_request_regions ( pdev , iavf_driver_name ) ;
2013-12-21 06:12:45 +00:00
if ( err ) {
dev_err ( & pdev - > dev ,
" pci_request_regions failed 0x%x \n " , err ) ;
goto err_pci_reg ;
}
pci_enable_pcie_error_reporting ( pdev ) ;
pci_set_master ( pdev ) ;
2018-09-14 17:37:46 -07:00
netdev = alloc_etherdev_mq ( sizeof ( struct iavf_adapter ) ,
IAVF_MAX_REQ_QUEUES ) ;
2013-12-21 06:12:45 +00:00
if ( ! netdev ) {
err = - ENOMEM ;
goto err_alloc_etherdev ;
}
SET_NETDEV_DEV ( netdev , & pdev - > dev ) ;
pci_set_drvdata ( pdev , netdev ) ;
adapter = netdev_priv ( netdev ) ;
adapter - > netdev = netdev ;
adapter - > pdev = pdev ;
hw = & adapter - > hw ;
hw - > back = adapter ;
2015-06-04 16:24:02 -04:00
adapter - > msg_enable = BIT ( DEFAULT_DEBUG_LEVEL_SHIFT ) - 1 ;
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_STARTUP ) ;
2013-12-21 06:12:45 +00:00
/* Call save state here because it relies on the adapter struct. */
pci_save_state ( pdev ) ;
hw - > hw_addr = ioremap ( pci_resource_start ( pdev , 0 ) ,
pci_resource_len ( pdev , 0 ) ) ;
if ( ! hw - > hw_addr ) {
err = - EIO ;
goto err_ioremap ;
}
hw - > vendor_id = pdev - > vendor ;
hw - > device_id = pdev - > device ;
pci_read_config_byte ( pdev , PCI_REVISION_ID , & hw - > revision_id ) ;
hw - > subsystem_vendor_id = pdev - > subsystem_vendor ;
hw - > subsystem_device_id = pdev - > subsystem_device ;
hw - > bus . device = PCI_SLOT ( pdev - > devfn ) ;
hw - > bus . func = PCI_FUNC ( pdev - > devfn ) ;
2017-02-09 23:58:22 -08:00
hw - > bus . bus_id = pdev - > bus - > number ;
2013-12-21 06:12:45 +00:00
2015-11-18 15:47:06 -08:00
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
*/
2021-08-04 10:22:24 +02:00
mutex_init ( & adapter - > crit_lock ) ;
mutex_init ( & adapter - > client_lock ) ;
mutex_init ( & adapter - > remove_lock ) ;
2015-11-18 15:47:06 -08:00
mutex_init ( & hw - > aq . asq_mutex ) ;
mutex_init ( & hw - > aq . arq_mutex ) ;
2017-10-27 11:06:50 -04:00
spin_lock_init ( & adapter - > mac_vlan_list_lock ) ;
2018-01-23 08:51:05 -08:00
spin_lock_init ( & adapter - > cloud_filter_list_lock ) ;
2021-03-09 11:08:11 +08:00
spin_lock_init ( & adapter - > fdir_fltr_lock ) ;
2021-04-13 08:48:41 +08:00
spin_lock_init ( & adapter - > adv_rss_lock ) ;
2017-10-27 11:06:50 -04:00
2014-08-01 13:27:15 -07:00
INIT_LIST_HEAD ( & adapter - > mac_filter_list ) ;
INIT_LIST_HEAD ( & adapter - > vlan_filter_list ) ;
2018-01-23 08:51:05 -08:00
INIT_LIST_HEAD ( & adapter - > cloud_filter_list ) ;
2021-03-09 11:08:11 +08:00
INIT_LIST_HEAD ( & adapter - > fdir_list_head ) ;
2021-04-13 08:48:41 +08:00
INIT_LIST_HEAD ( & adapter - > adv_rss_list_head ) ;
2014-08-01 13:27:15 -07:00
2018-09-14 17:37:46 -07:00
INIT_WORK ( & adapter - > reset_task , iavf_reset_task ) ;
INIT_WORK ( & adapter - > adminq_task , iavf_adminq_task ) ;
2019-05-14 10:37:05 -07:00
INIT_DELAYED_WORK ( & adapter - > watchdog_task , iavf_watchdog_task ) ;
2018-09-14 17:37:46 -07:00
INIT_DELAYED_WORK ( & adapter - > client_task , iavf_client_task ) ;
2021-08-19 08:47:58 +00:00
queue_delayed_work ( iavf_wq , & adapter - > watchdog_task ,
2019-05-14 10:37:05 -07:00
msecs_to_jiffies ( 5 * ( pdev - > devfn & 0x07 ) ) ) ;
2013-12-21 06:12:45 +00:00
2017-06-23 04:24:44 -04:00
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head ( & adapter - > down_waitqueue ) ;
2013-12-21 06:12:45 +00:00
return 0 ;
err_ioremap :
free_netdev ( netdev ) ;
err_alloc_etherdev :
2021-06-16 07:53:02 +02:00
pci_disable_pcie_error_reporting ( pdev ) ;
2013-12-21 06:12:45 +00:00
pci_release_regions ( pdev ) ;
err_pci_reg :
err_dma :
pci_disable_device ( pdev ) ;
return err ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_suspend - Power management suspend routine
2020-09-25 15:24:37 -07:00
* @ dev_d : device info pointer
2013-12-21 06:12:45 +00:00
*
* Called when the system ( VM ) is entering sleep / suspend .
* */
2020-06-29 14:59:39 +05:30
static int __maybe_unused iavf_suspend ( struct device * dev_d )
2013-12-21 06:12:45 +00:00
{
2020-06-29 14:59:39 +05:30
struct net_device * netdev = dev_get_drvdata ( dev_d ) ;
2018-09-14 17:37:46 -07:00
struct iavf_adapter * adapter = netdev_priv ( netdev ) ;
2013-12-21 06:12:45 +00:00
netif_device_detach ( netdev ) ;
2021-08-04 10:22:24 +02:00
while ( ! mutex_trylock ( & adapter - > crit_lock ) )
2017-10-27 11:06:52 -04:00
usleep_range ( 500 , 1000 ) ;
2013-12-21 06:12:45 +00:00
if ( netif_running ( netdev ) ) {
rtnl_lock ( ) ;
2018-09-14 17:37:46 -07:00
iavf_down ( adapter ) ;
2013-12-21 06:12:45 +00:00
rtnl_unlock ( ) ;
}
2018-09-14 17:37:46 -07:00
iavf_free_misc_irq ( adapter ) ;
iavf_reset_interrupt_capability ( adapter ) ;
2013-12-21 06:12:45 +00:00
2021-08-04 10:22:24 +02:00
mutex_unlock ( & adapter - > crit_lock ) ;
2017-10-27 11:06:52 -04:00
2013-12-21 06:12:45 +00:00
return 0 ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_resume - Power management resume routine
2020-09-25 15:24:37 -07:00
* @ dev_d : device info pointer
2013-12-21 06:12:45 +00:00
*
* Called when the system ( VM ) is resumed from sleep / suspend .
* */
2020-06-29 14:59:39 +05:30
static int __maybe_unused iavf_resume ( struct device * dev_d )
2013-12-21 06:12:45 +00:00
{
2020-06-29 14:59:39 +05:30
struct pci_dev * pdev = to_pci_dev ( dev_d ) ;
2021-09-15 08:41:23 +02:00
struct iavf_adapter * adapter ;
2013-12-21 06:12:45 +00:00
u32 err ;
2021-09-15 08:41:23 +02:00
adapter = iavf_pdev_to_adapter ( pdev ) ;
2013-12-21 06:12:45 +00:00
pci_set_master ( pdev ) ;
rtnl_lock ( ) ;
2018-09-14 17:37:46 -07:00
err = iavf_set_interrupt_capability ( adapter ) ;
2013-12-21 06:12:45 +00:00
if ( err ) {
2015-07-07 18:53:38 +03:00
rtnl_unlock ( ) ;
2013-12-21 06:12:45 +00:00
dev_err ( & pdev - > dev , " Cannot enable MSI-X interrupts. \n " ) ;
return err ;
}
2018-09-14 17:37:46 -07:00
err = iavf_request_misc_irq ( adapter ) ;
2013-12-21 06:12:45 +00:00
rtnl_unlock ( ) ;
if ( err ) {
dev_err ( & pdev - > dev , " Cannot get interrupt vector. \n " ) ;
return err ;
}
2019-05-14 10:37:05 -07:00
queue_work ( iavf_wq , & adapter - > reset_task ) ;
2013-12-21 06:12:45 +00:00
2021-09-15 08:41:23 +02:00
netif_device_attach ( adapter - > netdev ) ;
2013-12-21 06:12:45 +00:00
return err ;
}
/**
2018-09-14 17:37:46 -07:00
* iavf_remove - Device Removal Routine
2013-12-21 06:12:45 +00:00
* @ pdev : PCI device information struct
*
2018-09-14 17:37:46 -07:00
* iavf_remove is called by the PCI subsystem to alert the driver
2013-12-21 06:12:45 +00:00
* that it should release a PCI device . The could be caused by a
* Hot - Plug event , or because the driver is going to be removed from
* memory .
* */
2018-09-14 17:37:46 -07:00
static void iavf_remove ( struct pci_dev * pdev )
2013-12-21 06:12:45 +00:00
{
2021-09-15 08:41:23 +02:00
struct iavf_adapter * adapter = iavf_pdev_to_adapter ( pdev ) ;
iavf: Fix kernel BUG in free_msi_irqs
Fix driver not freeing VF's traffic irqs, prior to calling
pci_disable_msix in iavf_remove.
There were possible 2 erroneous states in which, iavf_close would
not be called.
One erroneous state is fixed by allowing netdev to register, when state
is already running. It was possible for VF adapter to enter state loop
from running to resetting, where iavf_open would subsequently fail.
If user would then unload driver/remove VF pci, iavf_close would not be
called, as the netdev was not registered, leaving traffic pcis still
allocated.
Fixed this by breaking loop, allowing netdev to open device when adapter
state is __IAVF_RUNNING and it is not explicitily downed.
Other possiblity is entering to iavf_remove from __IAVF_RESETTING state,
where iavf_close would not free irqs, but just return 0.
Fixed this by checking for last adapter state and then removing irqs.
Kernel panic:
[ 2773.628585] kernel BUG at drivers/pci/msi.c:375!
...
[ 2773.631567] RIP: 0010:free_msi_irqs+0x180/0x1b0
...
[ 2773.640939] Call Trace:
[ 2773.641572] pci_disable_msix+0xf7/0x120
[ 2773.642224] iavf_reset_interrupt_capability.part.41+0x15/0x30 [iavf]
[ 2773.642897] iavf_remove+0x12e/0x500 [iavf]
[ 2773.643578] pci_device_remove+0x3b/0xc0
[ 2773.644266] device_release_driver_internal+0x103/0x1f0
[ 2773.644948] pci_stop_bus_device+0x69/0x90
[ 2773.645576] pci_stop_and_remove_bus_device+0xe/0x20
[ 2773.646215] pci_iov_remove_virtfn+0xba/0x120
[ 2773.646862] sriov_disable+0x2f/0xe0
[ 2773.647531] ice_free_vfs+0x2f8/0x350 [ice]
[ 2773.648207] ice_sriov_configure+0x94/0x960 [ice]
[ 2773.648883] ? _kstrtoull+0x3b/0x90
[ 2773.649560] sriov_numvfs_store+0x10a/0x190
[ 2773.650249] kernfs_fop_write+0x116/0x190
[ 2773.650948] vfs_write+0xa5/0x1a0
[ 2773.651651] ksys_write+0x4f/0xb0
[ 2773.652358] do_syscall_64+0x5b/0x1a0
[ 2773.653075] entry_SYSCALL_64_after_hwframe+0x65/0xca
Fixes: 22ead37f8af8 ("i40evf: Add longer wait after remove module")
Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-10-22 10:30:14 +02:00
enum iavf_state_t prev_state = adapter - > last_state ;
2021-09-15 08:41:23 +02:00
struct net_device * netdev = adapter - > netdev ;
2021-03-09 11:08:11 +08:00
struct iavf_fdir_fltr * fdir , * fdirtmp ;
2018-09-14 17:37:46 -07:00
struct iavf_vlan_filter * vlf , * vlftmp ;
2021-04-13 08:48:41 +08:00
struct iavf_adv_rss * rss , * rsstmp ;
2018-09-14 17:37:46 -07:00
struct iavf_mac_filter * f , * ftmp ;
struct iavf_cloud_filter * cf , * cftmp ;
2018-09-14 17:37:52 -07:00
struct iavf_hw * hw = & adapter - > hw ;
2017-01-24 10:23:59 -08:00
int err ;
2017-12-18 05:16:43 -05:00
/* Indicate we are in remove and not to run reset_task */
2021-08-04 10:22:24 +02:00
mutex_lock ( & adapter - > remove_lock ) ;
i40evf: refactor reset handling
Respond better to a VF reset event. When a reset is signaled by the
PF, or detected by the watchdog task, prevent the watchdog from
processing admin queue requests, and schedule the reset task.
In the reset task, wait first for the reset to start, then for it to
complete, then reinit the driver.
If the reset never appears to complete after a long, long time (>10
seconds is possible depending on what's going on with the PF driver),
then set a flag to indicate that PF communications have failed.
If this flag is set, check for the reset to complete in the watchdog,
and attempt to do a full reinitialization of the driver from scratch.
With these changes the VF driver correctly handles a PF reset event
while running on bare metal, or in a VM.
Also update copyrights.
Change-ID: I93513efd0b50523a8345e7f6a33a5e4f8a2a5996
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-13 03:48:53 -08:00
cancel_work_sync ( & adapter - > reset_task ) ;
2021-08-19 08:47:58 +00:00
cancel_delayed_work_sync ( & adapter - > watchdog_task ) ;
2017-01-24 10:23:59 -08:00
cancel_delayed_work_sync ( & adapter - > client_task ) ;
2013-12-21 06:12:45 +00:00
if ( adapter - > netdev_registered ) {
unregister_netdev ( netdev ) ;
adapter - > netdev_registered = false ;
}
2017-01-24 10:23:59 -08:00
if ( CLIENT_ALLOWED ( adapter ) ) {
2018-09-14 17:37:46 -07:00
err = iavf_lan_del_device ( adapter ) ;
2017-01-24 10:23:59 -08:00
if ( err )
dev_warn ( & pdev - > dev , " Failed to delete client device: %d \n " ,
err ) ;
}
2014-12-09 08:53:04 +00:00
2018-09-14 17:37:46 -07:00
iavf_request_reset ( adapter ) ;
2016-03-18 12:18:10 -07:00
msleep ( 50 ) ;
2015-01-09 11:18:16 +00:00
/* If the FW isn't responding, kick it once, but only once. */
2018-09-14 17:37:46 -07:00
if ( ! iavf_asq_done ( hw ) ) {
iavf_request_reset ( adapter ) ;
2016-03-18 12:18:10 -07:00
msleep ( 50 ) ;
2015-01-09 11:18:16 +00:00
}
2021-08-04 10:22:24 +02:00
if ( iavf_lock_timeout ( & adapter - > crit_lock , 5000 ) )
dev_warn ( & adapter - > pdev - > dev , " failed to acquire crit_lock in %s \n " , __FUNCTION__ ) ;
2021-03-16 11:01:41 +01:00
2021-06-22 15:43:48 +02:00
dev_info ( & adapter - > pdev - > dev , " Removing device \n " ) ;
2021-03-16 11:01:41 +01:00
/* Shut down all the garbage mashers on the detention level */
2021-08-19 08:47:40 +00:00
iavf_change_state ( adapter , __IAVF_REMOVE ) ;
2021-03-16 11:01:41 +01:00
adapter - > aq_required = 0 ;
adapter - > flags & = ~ IAVF_FLAG_REINIT_ITR_NEEDED ;
iavf: Fix kernel BUG in free_msi_irqs
Fix driver not freeing VF's traffic irqs, prior to calling
pci_disable_msix in iavf_remove.
There were possible 2 erroneous states in which, iavf_close would
not be called.
One erroneous state is fixed by allowing netdev to register, when state
is already running. It was possible for VF adapter to enter state loop
from running to resetting, where iavf_open would subsequently fail.
If user would then unload driver/remove VF pci, iavf_close would not be
called, as the netdev was not registered, leaving traffic pcis still
allocated.
Fixed this by breaking loop, allowing netdev to open device when adapter
state is __IAVF_RUNNING and it is not explicitily downed.
Other possiblity is entering to iavf_remove from __IAVF_RESETTING state,
where iavf_close would not free irqs, but just return 0.
Fixed this by checking for last adapter state and then removing irqs.
Kernel panic:
[ 2773.628585] kernel BUG at drivers/pci/msi.c:375!
...
[ 2773.631567] RIP: 0010:free_msi_irqs+0x180/0x1b0
...
[ 2773.640939] Call Trace:
[ 2773.641572] pci_disable_msix+0xf7/0x120
[ 2773.642224] iavf_reset_interrupt_capability.part.41+0x15/0x30 [iavf]
[ 2773.642897] iavf_remove+0x12e/0x500 [iavf]
[ 2773.643578] pci_device_remove+0x3b/0xc0
[ 2773.644266] device_release_driver_internal+0x103/0x1f0
[ 2773.644948] pci_stop_bus_device+0x69/0x90
[ 2773.645576] pci_stop_and_remove_bus_device+0xe/0x20
[ 2773.646215] pci_iov_remove_virtfn+0xba/0x120
[ 2773.646862] sriov_disable+0x2f/0xe0
[ 2773.647531] ice_free_vfs+0x2f8/0x350 [ice]
[ 2773.648207] ice_sriov_configure+0x94/0x960 [ice]
[ 2773.648883] ? _kstrtoull+0x3b/0x90
[ 2773.649560] sriov_numvfs_store+0x10a/0x190
[ 2773.650249] kernfs_fop_write+0x116/0x190
[ 2773.650948] vfs_write+0xa5/0x1a0
[ 2773.651651] ksys_write+0x4f/0xb0
[ 2773.652358] do_syscall_64+0x5b/0x1a0
[ 2773.653075] entry_SYSCALL_64_after_hwframe+0x65/0xca
Fixes: 22ead37f8af8 ("i40evf: Add longer wait after remove module")
Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-10-22 10:30:14 +02:00
2018-09-14 17:37:46 -07:00
iavf_free_all_tx_resources ( adapter ) ;
iavf_free_all_rx_resources ( adapter ) ;
iavf_misc_irq_disable ( adapter ) ;
iavf_free_misc_irq ( adapter ) ;
iavf: Fix kernel BUG in free_msi_irqs
Fix driver not freeing VF's traffic irqs, prior to calling
pci_disable_msix in iavf_remove.
There were possible 2 erroneous states in which, iavf_close would
not be called.
One erroneous state is fixed by allowing netdev to register, when state
is already running. It was possible for VF adapter to enter state loop
from running to resetting, where iavf_open would subsequently fail.
If user would then unload driver/remove VF pci, iavf_close would not be
called, as the netdev was not registered, leaving traffic pcis still
allocated.
Fixed this by breaking loop, allowing netdev to open device when adapter
state is __IAVF_RUNNING and it is not explicitily downed.
Other possiblity is entering to iavf_remove from __IAVF_RESETTING state,
where iavf_close would not free irqs, but just return 0.
Fixed this by checking for last adapter state and then removing irqs.
Kernel panic:
[ 2773.628585] kernel BUG at drivers/pci/msi.c:375!
...
[ 2773.631567] RIP: 0010:free_msi_irqs+0x180/0x1b0
...
[ 2773.640939] Call Trace:
[ 2773.641572] pci_disable_msix+0xf7/0x120
[ 2773.642224] iavf_reset_interrupt_capability.part.41+0x15/0x30 [iavf]
[ 2773.642897] iavf_remove+0x12e/0x500 [iavf]
[ 2773.643578] pci_device_remove+0x3b/0xc0
[ 2773.644266] device_release_driver_internal+0x103/0x1f0
[ 2773.644948] pci_stop_bus_device+0x69/0x90
[ 2773.645576] pci_stop_and_remove_bus_device+0xe/0x20
[ 2773.646215] pci_iov_remove_virtfn+0xba/0x120
[ 2773.646862] sriov_disable+0x2f/0xe0
[ 2773.647531] ice_free_vfs+0x2f8/0x350 [ice]
[ 2773.648207] ice_sriov_configure+0x94/0x960 [ice]
[ 2773.648883] ? _kstrtoull+0x3b/0x90
[ 2773.649560] sriov_numvfs_store+0x10a/0x190
[ 2773.650249] kernfs_fop_write+0x116/0x190
[ 2773.650948] vfs_write+0xa5/0x1a0
[ 2773.651651] ksys_write+0x4f/0xb0
[ 2773.652358] do_syscall_64+0x5b/0x1a0
[ 2773.653075] entry_SYSCALL_64_after_hwframe+0x65/0xca
Fixes: 22ead37f8af8 ("i40evf: Add longer wait after remove module")
Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-10-22 10:30:14 +02:00
/* In case we enter iavf_remove from erroneous state, free traffic irqs
* here , so as to not cause a kernel crash , when calling
* iavf_reset_interrupt_capability .
*/
if ( ( adapter - > last_state = = __IAVF_RESETTING & &
prev_state ! = __IAVF_DOWN ) | |
( adapter - > last_state = = __IAVF_RUNNING & &
! ( netdev - > flags & IFF_UP ) ) )
iavf_free_traffic_irqs ( adapter ) ;
2018-09-14 17:37:46 -07:00
iavf_reset_interrupt_capability ( adapter ) ;
iavf_free_q_vectors ( adapter ) ;
2013-12-21 06:12:45 +00:00
2019-05-14 10:37:05 -07:00
cancel_delayed_work_sync ( & adapter - > watchdog_task ) ;
2014-06-04 04:22:38 +00:00
2018-08-28 10:16:08 -07:00
cancel_work_sync ( & adapter - > adminq_task ) ;
2018-09-14 17:37:46 -07:00
iavf_free_rss ( adapter ) ;
2015-10-26 19:44:34 -04:00
2013-12-21 06:12:45 +00:00
if ( hw - > aq . asq . count )
2018-09-14 17:37:46 -07:00
iavf_shutdown_adminq ( hw ) ;
2013-12-21 06:12:45 +00:00
2015-11-18 15:47:06 -08:00
/* destroy the locks only once, here */
mutex_destroy ( & hw - > aq . arq_mutex ) ;
mutex_destroy ( & hw - > aq . asq_mutex ) ;
2021-08-04 10:22:24 +02:00
mutex_destroy ( & adapter - > client_lock ) ;
mutex_unlock ( & adapter - > crit_lock ) ;
mutex_destroy ( & adapter - > crit_lock ) ;
mutex_unlock ( & adapter - > remove_lock ) ;
mutex_destroy ( & adapter - > remove_lock ) ;
2015-11-18 15:47:06 -08:00
2013-12-21 06:12:45 +00:00
iounmap ( hw - > hw_addr ) ;
pci_release_regions ( pdev ) ;
2018-09-14 17:37:46 -07:00
iavf_free_queues ( adapter ) ;
2013-12-21 06:12:45 +00:00
kfree ( adapter - > vf_res ) ;
2017-10-27 11:06:50 -04:00
spin_lock_bh ( & adapter - > mac_vlan_list_lock ) ;
2014-08-01 13:27:14 -07:00
/* If we got removed before an up/down sequence, we've got a filter
* hanging out there that we need to get rid of .
*/
list_for_each_entry_safe ( f , ftmp , & adapter - > mac_filter_list , list ) {
list_del ( & f - > list ) ;
kfree ( f ) ;
}
2018-01-22 12:00:34 -05:00
list_for_each_entry_safe ( vlf , vlftmp , & adapter - > vlan_filter_list ,
list ) {
list_del ( & vlf - > list ) ;
kfree ( vlf ) ;
2014-12-09 08:53:05 +00:00
}
2013-12-21 06:12:45 +00:00
2017-10-27 11:06:50 -04:00
spin_unlock_bh ( & adapter - > mac_vlan_list_lock ) ;
2018-01-23 08:51:05 -08:00
spin_lock_bh ( & adapter - > cloud_filter_list_lock ) ;
list_for_each_entry_safe ( cf , cftmp , & adapter - > cloud_filter_list , list ) {
list_del ( & cf - > list ) ;
kfree ( cf ) ;
}
spin_unlock_bh ( & adapter - > cloud_filter_list_lock ) ;
2021-03-09 11:08:11 +08:00
spin_lock_bh ( & adapter - > fdir_fltr_lock ) ;
list_for_each_entry_safe ( fdir , fdirtmp , & adapter - > fdir_list_head , list ) {
list_del ( & fdir - > list ) ;
kfree ( fdir ) ;
}
spin_unlock_bh ( & adapter - > fdir_fltr_lock ) ;
2021-04-13 08:48:41 +08:00
spin_lock_bh ( & adapter - > adv_rss_lock ) ;
list_for_each_entry_safe ( rss , rsstmp , & adapter - > adv_rss_list_head ,
list ) {
list_del ( & rss - > list ) ;
kfree ( rss ) ;
}
spin_unlock_bh ( & adapter - > adv_rss_lock ) ;
2013-12-21 06:12:45 +00:00
free_netdev ( netdev ) ;
pci_disable_pcie_error_reporting ( pdev ) ;
pci_disable_device ( pdev ) ;
}
2020-06-29 14:59:39 +05:30
static SIMPLE_DEV_PM_OPS ( iavf_pm_ops , iavf_suspend , iavf_resume ) ;
2018-09-14 17:37:46 -07:00
static struct pci_driver iavf_driver = {
2020-06-29 14:59:39 +05:30
. name = iavf_driver_name ,
. id_table = iavf_pci_tbl ,
. probe = iavf_probe ,
. remove = iavf_remove ,
. driver . pm = & iavf_pm_ops ,
. shutdown = iavf_shutdown ,
2013-12-21 06:12:45 +00:00
} ;
/**
2018-09-14 17:37:55 -07:00
* iavf_init_module - Driver Registration Routine
2013-12-21 06:12:45 +00:00
*
2018-09-14 17:37:55 -07:00
* iavf_init_module is the first routine called when the driver is
2013-12-21 06:12:45 +00:00
* loaded . All it does is register with the PCI subsystem .
* */
2018-09-14 17:37:46 -07:00
static int __init iavf_init_module ( void )
2013-12-21 06:12:45 +00:00
{
int ret ;
2014-11-11 20:02:42 +00:00
2020-05-29 00:18:33 -07:00
pr_info ( " iavf: %s \n " , iavf_driver_string ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
pr_info ( " %s \n " , iavf_copyright ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
iavf_wq = alloc_workqueue ( " %s " , WQ_UNBOUND | WQ_MEM_RECLAIM , 1 ,
iavf_driver_name ) ;
if ( ! iavf_wq ) {
pr_err ( " %s: Failed to create workqueue \n " , iavf_driver_name ) ;
2015-12-22 14:25:08 -08:00
return - ENOMEM ;
}
2018-09-14 17:37:46 -07:00
ret = pci_register_driver ( & iavf_driver ) ;
2013-12-21 06:12:45 +00:00
return ret ;
}
2018-09-14 17:37:46 -07:00
module_init ( iavf_init_module ) ;
2013-12-21 06:12:45 +00:00
/**
2018-09-14 17:37:55 -07:00
* iavf_exit_module - Driver Exit Cleanup Routine
2013-12-21 06:12:45 +00:00
*
2018-09-14 17:37:55 -07:00
* iavf_exit_module is called just before the driver is removed
2013-12-21 06:12:45 +00:00
* from memory .
* */
2018-09-14 17:37:46 -07:00
static void __exit iavf_exit_module ( void )
2013-12-21 06:12:45 +00:00
{
2018-09-14 17:37:46 -07:00
pci_unregister_driver ( & iavf_driver ) ;
destroy_workqueue ( iavf_wq ) ;
2013-12-21 06:12:45 +00:00
}
2018-09-14 17:37:46 -07:00
module_exit ( iavf_exit_module ) ;
2013-12-21 06:12:45 +00:00
2018-09-14 17:37:46 -07:00
/* iavf_main.c */