linux/drivers/net/ethernet/intel/ice/ice_repr.c

445 lines
11 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019-2021, Intel Corporation. */
#include "ice.h"
#include "ice_eswitch.h"
#include "devlink/devlink.h"
#include "devlink/devlink_port.h"
ice: rename ice_virtchnl_pf.c to ice_sriov.c The ice_virtchnl_pf.c and ice_virtchnl_pf.h files are where most of the code for implementing Single Root IOV virtualization resides. This code includes support for bringing up and tearing down VFs, hooks into the kernel SR-IOV netdev operations, and for handling virtchnl messages from VFs. In the future, we plan to support Scalable IOV in addition to Single Root IOV as an alternative virtualization scheme. This implementation will re-use some but not all of the code in ice_virtchnl_pf.c To prepare for this future, we want to refactor and split up the code in ice_virtchnl_pf.c into the following scheme: * ice_vf_lib.[ch] Basic VF structures and accessors. This is where scheme-independent code will reside. * ice_virtchnl.[ch] Virtchnl message handling. This is where the bulk of the logic for processing messages from VFs using the virtchnl messaging scheme will reside. This is separated from ice_vf_lib.c because it is distinct and has a bulk of the processing code. * ice_sriov.[ch] Single Root IOV implementation, including initialization and the routines for interacting with SR-IOV based netdev operations. * (future) ice_siov.[ch] Scalable IOV implementation. As a first step, lets assume that all of the code in ice_virtchnl_pf.[ch] is for Single Root IOV. Rename this file to ice_sriov.c and its header to ice_sriov.h Future changes will further split out the code in these files following the plan outlined here. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-22 16:26:49 -08:00
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
/**
* ice_repr_inc_tx_stats - increment Tx statistic by one packet
* @repr: repr to increment stats on
* @len: length of the packet
* @xmit_status: value returned by xmit function
*/
void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
int xmit_status)
{
struct ice_repr_pcpu_stats *stats;
if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
xmit_status != NET_XMIT_CN)) {
this_cpu_inc(repr->stats->tx_drops);
return;
}
stats = this_cpu_ptr(repr->stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
}
/**
* ice_repr_inc_rx_stats - increment Rx statistic by one packet
* @netdev: repr netdev to increment stats on
* @len: length of the packet
*/
void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_repr_pcpu_stats *stats;
stats = this_cpu_ptr(repr->stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
stats->rx_bytes += len;
u64_stats_update_end(&stats->syncp);
}
/**
* ice_repr_get_stats64 - get VF stats for VFPR use
* @netdev: pointer to port representor netdev
* @stats: pointer to struct where stats can be stored
*/
static void
ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_eth_stats *eth_stats;
struct ice_vsi *vsi;
if (ice_is_vf_disabled(np->repr->vf))
return;
vsi = np->repr->src_vsi;
ice_update_vsi_stats(vsi);
eth_stats = &vsi->eth_stats;
stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
eth_stats->tx_multicast;
stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
eth_stats->rx_multicast;
stats->tx_bytes = eth_stats->tx_bytes;
stats->rx_bytes = eth_stats->rx_bytes;
stats->multicast = eth_stats->rx_multicast;
stats->tx_errors = eth_stats->tx_errors;
stats->tx_dropped = eth_stats->tx_discards;
stats->rx_dropped = eth_stats->rx_discards;
}
/**
* ice_netdev_to_repr - Get port representor for given netdevice
* @netdev: pointer to port representor netdev
*/
struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
return np->repr;
}
/**
* ice_repr_open - Enable port representor's network interface
* @netdev: network interface device structure
*
* The open entry point is called when a port representor's network
* interface is made active by the system (IFF_UP). Corresponding
* VF is notified about link status change.
*
* Returns 0 on success
*/
static int ice_repr_open(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
vf = repr->vf;
vf->link_forced = true;
vf->link_up = true;
ice_vc_notify_vf_link_state(vf);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
return 0;
}
/**
* ice_repr_stop - Disable port representor's network interface
* @netdev: network interface device structure
*
* The stop entry point is called when a port representor's network
* interface is de-activated by the system. Corresponding
* VF is notified about link status change.
*
* Returns 0 on success
*/
static int ice_repr_stop(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
vf = repr->vf;
vf->link_forced = true;
vf->link_up = false;
ice_vc_notify_vf_link_state(vf);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
return 0;
}
/**
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
* @stats: netlink stats structure
*/
static int
ice_repr_sp_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct ice_repr *repr = ice_netdev_to_repr(dev);
int i;
for_each_possible_cpu(i) {
u64 tbytes, tpkts, tdrops, rbytes, rpkts;
struct ice_repr_pcpu_stats *repr_stats;
unsigned int start;
repr_stats = per_cpu_ptr(repr->stats, i);
do {
start = u64_stats_fetch_begin(&repr_stats->syncp);
tbytes = repr_stats->tx_bytes;
tpkts = repr_stats->tx_packets;
tdrops = repr_stats->tx_drops;
rbytes = repr_stats->rx_bytes;
rpkts = repr_stats->rx_packets;
} while (u64_stats_fetch_retry(&repr_stats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpkts;
stats->tx_dropped += tdrops;
stats->rx_bytes += rbytes;
stats->rx_packets += rpkts;
}
return 0;
}
static bool
ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
{
return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
}
static int
ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp)
{
if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
return -EINVAL;
}
static int
ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
struct flow_cls_offload *flower)
{
switch (flower->command) {
case FLOW_CLS_REPLACE:
return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(repr->src_vsi, flower);
default:
return -EINVAL;
}
}
static int
ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return ice_repr_setup_tc_cls_flower(np->repr, flower);
default:
return -EOPNOTSUPP;
}
}
static LIST_HEAD(ice_repr_block_cb_list);
static int
ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
switch (type) {
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple((struct flow_block_offload *)
type_data,
&ice_repr_block_cb_list,
ice_repr_setup_tc_block_cb,
np, np, true);
default:
return -EOPNOTSUPP;
}
}
static const struct net_device_ops ice_repr_netdev_ops = {
.ndo_get_stats64 = ice_repr_get_stats64,
.ndo_open = ice_repr_open,
.ndo_stop = ice_repr_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
};
/**
* ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
* @netdev: pointer to netdev
*/
bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
}
/**
* ice_repr_reg_netdev - register port representor netdev
* @netdev: pointer to port representor netdev
*/
static int
ice_repr_reg_netdev(struct net_device *netdev)
{
eth_hw_addr_random(netdev);
netdev->netdev_ops = &ice_repr_netdev_ops;
ice_set_ethtool_repr_ops(netdev);
netdev->hw_features |= NETIF_F_HW_TC;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
return register_netdev(netdev);
}
static void ice_repr_remove_node(struct devlink_port *devlink_port)
{
devl_lock(devlink_port->devlink);
devl_rate_leaf_destroy(devlink_port);
devl_unlock(devlink_port->devlink);
}
/**
* ice_repr_rem - remove representor from VF
* @repr: pointer to representor structure
*/
static void ice_repr_rem(struct ice_repr *repr)
{
free_percpu(repr->stats);
free_netdev(repr->netdev);
kfree(repr);
}
/**
* ice_repr_rem_vf - remove representor from VF
* @repr: pointer to representor structure
*/
void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_repr_remove_node(&repr->vf->devlink_port);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
ice_repr_rem(repr);
}
static void ice_repr_set_tx_topology(struct ice_pf *pf)
{
struct devlink *devlink;
/* only export if ADQ and DCB disabled and eswitch enabled*/
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
!ice_is_switchdev_running(pf))
return;
devlink = priv_to_devlink(pf);
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
}
/**
* ice_repr_add - add representor for generic VSI
* @pf: pointer to PF structure
* @src_vsi: pointer to VSI structure of device to represent
* @parent_mac: device MAC address
*/
static struct ice_repr *
ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
{
struct ice_netdev_priv *np;
struct ice_repr *repr;
int err;
repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr)
return ERR_PTR(-ENOMEM);
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
err = -ENOMEM;
goto err_alloc;
}
repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
if (!repr->stats) {
err = -ENOMEM;
goto err_stats;
}
repr->src_vsi = src_vsi;
repr->id = src_vsi->vsi_num;
np = netdev_priv(repr->netdev);
np->repr = repr;
ether_addr_copy(repr->parent_mac, parent_mac);
return repr;
err_stats:
free_netdev(repr->netdev);
err_alloc:
kfree(repr);
return ERR_PTR(err);
}
struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
{
struct ice_repr *repr;
struct ice_vsi *vsi;
int err;
vsi = ice_get_vf_vsi(vf);
if (!vsi)
return ERR_PTR(-ENOENT);
err = ice_devlink_create_vf_port(vf);
if (err)
return ERR_PTR(err);
repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
if (IS_ERR(repr)) {
err = PTR_ERR(repr);
goto err_repr_add;
}
repr->vf = vf;
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
err = ice_repr_reg_netdev(repr->netdev);
if (err)
goto err_netdev;
ice: convert vf->vc_ops to a const pointer The vc_ops structure is used to allow different handlers for virtchnl commands when the driver is in representor mode. The current implementation uses a copy of the ops table in each VF, and modifies this copy dynamically. The usual practice in kernel code is to store the ops table in a constant structure and point to different versions. This has a number of advantages: 1. Reduced memory usage. Each VF merely points to the correct table, so they're able to re-use the same constant lookup table in memory. 2. Consistency. It becomes more difficult to accidentally update or edit only one op call. Instead, the code switches to the correct able by a single pointer write. In general this is atomic, either the pointer is updated or its not. 3. Code Layout. The VF structure can store a pointer to the table without needing to have the full structure definition defined prior to the VF structure definition. This will aid in future refactoring of code by allowing the VF pointer to be kept in ice_vf_lib.h while the virtchnl ops table can be maintained in ice_virtchnl.h There is one major downside in the case of the vc_ops structure. Most of the operations in the table are the same between the two current implementations. This can appear to lead to duplication since each implementation must now fill in the complete table. It could make spotting the differences in the representor mode more challenging. Unfortunately, methods to make this less error prone either add complexity overhead (macros using CPP token concatenation) or don't work on all compilers we support (constant initializer from another constant structure). The cost of maintaining two structures does not out weigh the benefits of the constant table model. While we're making these changes, go ahead and rename the structure and implementations with "virtchnl" instead of "vc_vf_". This will more closely align with the planned file renaming, and avoid similar names when we later introduce a "vf ops" table for separating Scalable IOV and Single Root IOV implementations. Leave the accessor/assignment functions in order to avoid issues with compiling with options disabled. The interface makes it easier to handle when CONFIG_PCI_IOV is disabled in the kernel. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-22 16:26:51 -08:00
ice_virtchnl_set_repr_ops(vf);
ice_repr_set_tx_topology(vf->pf);
ice: refactor unwind cleanup in eswitch mode The code for supporting eswitch mode and port representors on VFs uses an unwind based cleanup flow when handling errors. These flows are used to cleanup and get everything back to the state prior to attempting to switch from legacy to representor mode or back. The unwind iterations make sense, but complicate a plan to refactor the VF array structure. In the future we won't have a clean method of reversing an iteration of the VFs. Instead, we can change the cleanup flow to just iterate over all VF structures and clean up appropriately. First notice that ice_repr_add_for_all_vfs and ice_repr_rem_from_all_vfs have an additional step of re-assigning the VC ops. There is no good reason to do this outside of ice_repr_add and ice_repr_rem. It can simply be done as the last step of these functions. Second, make sure ice_repr_rem is safe to call on a VF which does not have a representor. Check if vf->repr is NULL first and exit early if so. Move ice_repr_rem_from_all_vfs above ice_repr_add_for_all_vfs so that we can call it from the cleanup function. In ice_eswitch.c, replace the unwind iteration with a call to ice_eswitch_release_reprs. This will go through all of the VFs and revert the VF back to the standard model without the eswitch mode. To make this safe, ensure this function checks whether or not the represent or has been moved. Rely on the metadata destination in vf->repr->dst. This must be NULL if the representor has not been moved to eswitch mode. Ensure that we always re-assign this value back to NULL after freeing it, and move the ice_eswitch_release_reprs so that it can be called from the setup function. With these changes, eswitch cleanup no longer uses an unwind flow that is problematic for the planned VF data structure change. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-16 13:37:28 -08:00
return repr;
err_netdev:
ice_repr_rem(repr);
err_repr_add:
ice_devlink_destroy_vf_port(vf);
return ERR_PTR(err);
}
struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
{
if (!vsi->vf)
return NULL;
return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
}
/**
* ice_repr_start_tx_queues - start Tx queues of port representor
* @repr: pointer to repr structure
*/
void ice_repr_start_tx_queues(struct ice_repr *repr)
{
netif_carrier_on(repr->netdev);
netif_tx_start_all_queues(repr->netdev);
}
/**
* ice_repr_stop_tx_queues - stop Tx queues of port representor
* @repr: pointer to repr structure
*/
void ice_repr_stop_tx_queues(struct ice_repr *repr)
{
netif_carrier_off(repr->netdev);
netif_tx_stop_all_queues(repr->netdev);
}