mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-04 08:17:46 +00:00
ice: remove eswitch changing queues algorithm
Changing queues used by eswitch will be done through PF netdev. There is no need to reserve queues if the number of used queues is known. Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com> Reviewed-by: Marcin Szycik <marcin.szycik@linux.intel.com> Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com> Tested-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
parent
537c2e91d3
commit
8c67b7a914
4 changed files with 0 additions and 47 deletions
|
@ -527,12 +527,6 @@ struct ice_eswitch {
|
|||
struct ice_esw_br_offloads *br_offloads;
|
||||
struct xarray reprs;
|
||||
bool is_running;
|
||||
/* struct to allow cp queues management optimization */
|
||||
struct {
|
||||
int to_reach;
|
||||
int value;
|
||||
bool is_reaching;
|
||||
} qs;
|
||||
};
|
||||
|
||||
struct ice_agg_node {
|
||||
|
|
|
@ -455,8 +455,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
|
|||
return -ENODEV;
|
||||
|
||||
ctrl_vsi = pf->eswitch.control_vsi;
|
||||
/* cp VSI is createad with 1 queue as default */
|
||||
pf->eswitch.qs.value = 1;
|
||||
pf->eswitch.uplink_vsi = uplink_vsi;
|
||||
|
||||
if (ice_eswitch_setup_env(pf))
|
||||
|
@ -489,7 +487,6 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
|
|||
ice_vsi_release(ctrl_vsi);
|
||||
|
||||
pf->eswitch.is_running = false;
|
||||
pf->eswitch.qs.is_reaching = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -620,18 +617,6 @@ ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
|
|||
struct ice_vsi *cp = eswitch->control_vsi;
|
||||
int queues = 0;
|
||||
|
||||
if (eswitch->qs.is_reaching) {
|
||||
if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
|
||||
queues = eswitch->qs.to_reach;
|
||||
eswitch->qs.is_reaching = false;
|
||||
} else {
|
||||
queues = 0;
|
||||
}
|
||||
} else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
|
||||
change < 0) {
|
||||
queues = cp->alloc_txq + change;
|
||||
}
|
||||
|
||||
if (queues) {
|
||||
cp->req_txq = queues;
|
||||
cp->req_rxq = queues;
|
||||
|
@ -643,7 +628,6 @@ ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
|
|||
ice_vsi_open(cp);
|
||||
}
|
||||
|
||||
eswitch->qs.value += change;
|
||||
ice_eswitch_remap_rings_to_vectors(eswitch);
|
||||
}
|
||||
|
||||
|
@ -661,8 +645,6 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
|
|||
err = ice_eswitch_enable_switchdev(pf);
|
||||
if (err)
|
||||
return err;
|
||||
/* Control plane VSI is created with 1 queue as default */
|
||||
pf->eswitch.qs.to_reach -= 1;
|
||||
change = 0;
|
||||
}
|
||||
|
||||
|
@ -756,19 +738,3 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
|
||||
* @pf: pointer to PF structure
|
||||
* @change: how many more (or less) queues is needed
|
||||
*
|
||||
* Remember to call ice_eswitch_attach/detach() the "change" times.
|
||||
*/
|
||||
void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
|
||||
{
|
||||
if (pf->eswitch.qs.value + change < 0)
|
||||
return;
|
||||
|
||||
pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
|
||||
pf->eswitch.qs.is_reaching = true;
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
|
|||
struct ice_tx_offload_params *off);
|
||||
netdev_tx_t
|
||||
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
|
||||
void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
|
||||
#else /* CONFIG_ICE_SWITCHDEV */
|
||||
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
|
||||
|
||||
|
@ -77,8 +76,5 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
{
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
static inline void
|
||||
ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
|
||||
#endif /* CONFIG_ICE_SWITCHDEV */
|
||||
#endif /* _ICE_ESWITCH_H_ */
|
||||
|
|
|
@ -170,8 +170,6 @@ void ice_free_vfs(struct ice_pf *pf)
|
|||
else
|
||||
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
|
||||
|
||||
ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
|
||||
|
||||
mutex_lock(&vfs->table_lock);
|
||||
|
||||
ice_for_each_vf(pf, bkt, vf) {
|
||||
|
@ -897,7 +895,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
|
|||
goto err_unroll_sriov;
|
||||
}
|
||||
|
||||
ice_eswitch_reserve_cp_queues(pf, num_vfs);
|
||||
ret = ice_start_vfs(pf);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
|
||||
|
|
Loading…
Add table
Reference in a new issue