mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
net/mlx5: Devcom, Infrastructure changes
Update devcom infrastructure to be more generic, without depending on max supported ports definition or a device guid, and also more encapsulated so callers don't need to pass the register devcom component id per event call. Signed-off-by: Eli Cohen <elic@nvidia.com> Signed-off-by: Roi Dayan <roid@nvidia.com> Reviewed-by: Shay Drory <shayd@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
02ceda65f0
commit
88d162b479
11 changed files with 375 additions and 343 deletions
|
@ -399,15 +399,13 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
|
|||
}
|
||||
|
||||
static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
|
||||
struct mlx5_devcom *devcom,
|
||||
struct mlx5e_rep_sq *rep_sq, int i)
|
||||
{
|
||||
struct mlx5_eswitch *peer_esw = NULL;
|
||||
struct mlx5_flow_handle *flow_rule;
|
||||
int tmp;
|
||||
struct mlx5_devcom_comp_dev *tmp;
|
||||
struct mlx5_eswitch *peer_esw;
|
||||
|
||||
mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
peer_esw, tmp) {
|
||||
mlx5_devcom_for_each_peer_entry(esw->devcom, peer_esw, tmp) {
|
||||
u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
|
||||
struct mlx5e_rep_sq_peer *sq_peer;
|
||||
int err;
|
||||
|
@ -443,7 +441,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
|
|||
struct mlx5_flow_handle *flow_rule;
|
||||
struct mlx5e_rep_priv *rpriv;
|
||||
struct mlx5e_rep_sq *rep_sq;
|
||||
struct mlx5_devcom *devcom;
|
||||
bool devcom_locked = false;
|
||||
int err;
|
||||
int i;
|
||||
|
@ -451,10 +448,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
|
|||
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
|
||||
return 0;
|
||||
|
||||
devcom = esw->dev->priv.devcom;
|
||||
rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
|
||||
mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
|
||||
|
||||
if (mlx5_devcom_comp_is_ready(esw->devcom) &&
|
||||
mlx5_devcom_for_each_peer_begin(esw->devcom))
|
||||
devcom_locked = true;
|
||||
|
||||
for (i = 0; i < sqns_num; i++) {
|
||||
|
@ -477,7 +474,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
|
|||
|
||||
xa_init(&rep_sq->sq_peer);
|
||||
if (devcom_locked) {
|
||||
err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i);
|
||||
err = mlx5e_sqs2vport_add_peers_rules(esw, rep, rep_sq, i);
|
||||
if (err) {
|
||||
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
|
||||
xa_destroy(&rep_sq->sq_peer);
|
||||
|
@ -490,7 +487,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
|
|||
}
|
||||
|
||||
if (devcom_locked)
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
mlx5_devcom_for_each_peer_end(esw->devcom);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -498,7 +495,7 @@ out_err:
|
|||
mlx5e_sqs2vport_stop(esw, rep);
|
||||
|
||||
if (devcom_locked)
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
mlx5_devcom_for_each_peer_end(esw->devcom);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -1668,11 +1668,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
|
|||
{
|
||||
struct mlx5e_priv *out_priv, *route_priv;
|
||||
struct mlx5_core_dev *route_mdev;
|
||||
struct mlx5_devcom *devcom;
|
||||
struct mlx5_devcom_comp_dev *pos;
|
||||
struct mlx5_eswitch *esw;
|
||||
u16 vhca_id;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
out_priv = netdev_priv(out_dev);
|
||||
esw = out_priv->mdev->priv.eswitch;
|
||||
|
@ -1688,10 +1687,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
|
|||
return err;
|
||||
|
||||
rcu_read_lock();
|
||||
devcom = out_priv->mdev->priv.devcom;
|
||||
err = -ENODEV;
|
||||
mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
esw, i) {
|
||||
mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) {
|
||||
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
|
||||
if (!err)
|
||||
break;
|
||||
|
@ -2031,15 +2028,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
|||
struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
if (mlx5e_is_eswitch_flow(flow)) {
|
||||
struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom;
|
||||
struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;
|
||||
|
||||
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
|
||||
if (!mlx5_devcom_for_each_peer_begin(devcom)) {
|
||||
mlx5e_tc_del_fdb_flow(priv, flow);
|
||||
return;
|
||||
}
|
||||
|
||||
mlx5e_tc_del_fdb_peers_flow(flow);
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
mlx5_devcom_for_each_peer_end(devcom);
|
||||
mlx5e_tc_del_fdb_flow(priv, flow);
|
||||
} else {
|
||||
mlx5e_tc_del_nic_flow(priv, flow);
|
||||
|
@ -4216,8 +4213,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
|
|||
flow_flag_test(flow, INGRESS);
|
||||
bool act_is_encap = !!(attr->action &
|
||||
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
|
||||
bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.devcom,
|
||||
MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom);
|
||||
|
||||
if (!esw_paired)
|
||||
return false;
|
||||
|
@ -4471,14 +4467,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
|
|||
struct net_device *filter_dev,
|
||||
struct mlx5e_tc_flow **__flow)
|
||||
{
|
||||
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
|
||||
struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos;
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5_eswitch_rep *in_rep = rpriv->rep;
|
||||
struct mlx5_core_dev *in_mdev = priv->mdev;
|
||||
struct mlx5_eswitch *peer_esw;
|
||||
struct mlx5e_tc_flow *flow;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
|
||||
in_mdev);
|
||||
|
@ -4490,27 +4485,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
|
||||
if (!mlx5_devcom_for_each_peer_begin(devcom)) {
|
||||
err = -ENODEV;
|
||||
goto clean_flow;
|
||||
}
|
||||
|
||||
mlx5_devcom_for_each_peer_entry(devcom,
|
||||
MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
peer_esw, i) {
|
||||
mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
|
||||
err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
|
||||
if (err)
|
||||
goto peer_clean;
|
||||
}
|
||||
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
mlx5_devcom_for_each_peer_end(devcom);
|
||||
|
||||
*__flow = flow;
|
||||
return 0;
|
||||
|
||||
peer_clean:
|
||||
mlx5e_tc_del_fdb_peers_flow(flow);
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
mlx5_devcom_for_each_peer_end(devcom);
|
||||
clean_flow:
|
||||
mlx5e_tc_del_fdb_flow(priv, flow);
|
||||
return err;
|
||||
|
@ -4728,7 +4721,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
|
|||
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
||||
struct flow_cls_offload *f, unsigned long flags)
|
||||
{
|
||||
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
|
||||
struct mlx5e_tc_flow *flow;
|
||||
struct mlx5_fc *counter;
|
||||
|
@ -4764,7 +4757,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
|||
/* Under multipath it's possible for one rule to be currently
|
||||
* un-offloaded while the other rule is offloaded.
|
||||
*/
|
||||
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
|
||||
if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom))
|
||||
goto out;
|
||||
|
||||
if (flow_flag_test(flow, DUP)) {
|
||||
|
@ -4795,7 +4788,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
|||
}
|
||||
|
||||
no_peer_counter:
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
if (esw)
|
||||
mlx5_devcom_for_each_peer_end(esw->devcom);
|
||||
out:
|
||||
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
|
|
|
@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
|
|||
struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
|
||||
struct mlx5_esw_bridge *bridge)
|
||||
{
|
||||
struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
|
||||
struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
|
||||
struct mlx5_eswitch *tmp, *peer_esw = NULL;
|
||||
static struct mlx5_flow_handle *handle;
|
||||
int i;
|
||||
|
||||
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
|
||||
if (!mlx5_devcom_for_each_peer_begin(devcom))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
mlx5_devcom_for_each_peer_entry(devcom,
|
||||
MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
tmp, i) {
|
||||
mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
|
||||
if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
|
||||
peer_esw = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!peer_esw) {
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
return ERR_PTR(-ENODEV);
|
||||
handle = ERR_PTR(-ENODEV);
|
||||
goto out;
|
||||
}
|
||||
|
||||
handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
|
||||
bridge, peer_esw);
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
|
||||
out:
|
||||
mlx5_devcom_for_each_peer_end(devcom);
|
||||
return handle;
|
||||
}
|
||||
|
||||
|
@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
|
|||
mlx5_fc_id(counter), bridge);
|
||||
if (IS_ERR(handle)) {
|
||||
err = PTR_ERR(handle);
|
||||
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
|
||||
vport_num, err);
|
||||
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
|
||||
vport_num, err, peer);
|
||||
goto err_ingress_flow_create;
|
||||
}
|
||||
entry->ingress_handle = handle;
|
||||
|
|
|
@ -539,30 +539,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
|
|||
static struct mlx5_flow_handle *
|
||||
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
|
||||
{
|
||||
struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
|
||||
struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
|
||||
struct mlx5_eswitch *tmp, *peer_esw = NULL;
|
||||
static struct mlx5_flow_handle *handle;
|
||||
int i;
|
||||
|
||||
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
|
||||
if (!mlx5_devcom_for_each_peer_begin(devcom))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
mlx5_devcom_for_each_peer_entry(devcom,
|
||||
MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
tmp, i) {
|
||||
mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
|
||||
if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
|
||||
peer_esw = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!peer_esw) {
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
return ERR_PTR(-ENODEV);
|
||||
handle = ERR_PTR(-ENODEV);
|
||||
goto out;
|
||||
}
|
||||
|
||||
handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
|
||||
|
||||
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
out:
|
||||
mlx5_devcom_for_each_peer_end(devcom);
|
||||
return handle;
|
||||
}
|
||||
|
||||
|
|
|
@ -354,6 +354,7 @@ struct mlx5_eswitch {
|
|||
} params;
|
||||
struct blocking_notifier_head n_head;
|
||||
struct xarray paired;
|
||||
struct mlx5_devcom_comp_dev *devcom;
|
||||
};
|
||||
|
||||
void esw_offloads_disable(struct mlx5_eswitch *esw);
|
||||
|
@ -383,6 +384,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
|
|||
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
|
||||
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
|
||||
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
|
||||
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
|
||||
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
||||
u16 vport, const u8 *mac);
|
||||
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
||||
|
@ -818,6 +820,7 @@ static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool cle
|
|||
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
|
||||
static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
|
||||
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
|
||||
static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
|
||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
|
||||
static inline
|
||||
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
|
||||
|
|
|
@ -2811,7 +2811,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
|
|||
void *event_data)
|
||||
{
|
||||
struct mlx5_eswitch *esw = my_data;
|
||||
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
|
||||
struct mlx5_eswitch *peer_esw = event_data;
|
||||
u16 esw_i, peer_esw_i;
|
||||
bool esw_paired;
|
||||
|
@ -2833,6 +2832,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
|
|||
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = mlx5_esw_offloads_pair(esw, peer_esw);
|
||||
if (err)
|
||||
goto err_peer;
|
||||
|
@ -2851,7 +2851,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
|
|||
|
||||
esw->num_peers++;
|
||||
peer_esw->num_peers++;
|
||||
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
|
||||
mlx5_devcom_comp_set_ready(esw->devcom, true);
|
||||
break;
|
||||
|
||||
case ESW_OFFLOADS_DEVCOM_UNPAIR:
|
||||
|
@ -2861,7 +2861,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
|
|||
peer_esw->num_peers--;
|
||||
esw->num_peers--;
|
||||
if (!esw->num_peers && !peer_esw->num_peers)
|
||||
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
|
||||
mlx5_devcom_comp_set_ready(esw->devcom, false);
|
||||
xa_erase(&peer_esw->paired, esw_i);
|
||||
xa_erase(&esw->paired, peer_esw_i);
|
||||
mlx5_esw_offloads_unpair(peer_esw, esw);
|
||||
|
@ -2888,7 +2888,7 @@ err_out:
|
|||
|
||||
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
|
||||
u64 guid;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
|
@ -2902,34 +2902,41 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
|
|||
return;
|
||||
|
||||
xa_init(&esw->paired);
|
||||
mlx5_devcom_register_component(devcom,
|
||||
MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
mlx5_esw_offloads_devcom_event,
|
||||
esw);
|
||||
guid = mlx5_query_nic_system_image_guid(esw->dev);
|
||||
|
||||
esw->num_peers = 0;
|
||||
mlx5_devcom_send_event(devcom,
|
||||
MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
|
||||
MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
guid,
|
||||
mlx5_esw_offloads_devcom_event,
|
||||
esw);
|
||||
if (IS_ERR_OR_NULL(esw->devcom))
|
||||
return;
|
||||
|
||||
mlx5_devcom_send_event(esw->devcom,
|
||||
ESW_OFFLOADS_DEVCOM_PAIR,
|
||||
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
|
||||
ESW_OFFLOADS_DEVCOM_UNPAIR,
|
||||
esw);
|
||||
}
|
||||
|
||||
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
|
||||
|
||||
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
if (IS_ERR_OR_NULL(esw->devcom))
|
||||
return;
|
||||
|
||||
if (!mlx5_lag_is_supported(esw->dev))
|
||||
return;
|
||||
|
||||
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
mlx5_devcom_send_event(esw->devcom,
|
||||
ESW_OFFLOADS_DEVCOM_UNPAIR,
|
||||
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
|
||||
ESW_OFFLOADS_DEVCOM_UNPAIR,
|
||||
esw);
|
||||
|
||||
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
mlx5_devcom_unregister_component(esw->devcom);
|
||||
xa_destroy(&esw->paired);
|
||||
esw->devcom = NULL;
|
||||
}
|
||||
|
||||
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
|
||||
{
|
||||
return mlx5_devcom_comp_is_ready(esw->devcom);
|
||||
}
|
||||
|
||||
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
|
||||
|
|
|
@ -835,7 +835,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
|
|||
dev = ldev->pf[MLX5_LAG_P1].dev;
|
||||
if (is_mdev_switchdev_mode(dev) &&
|
||||
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
|
||||
mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
|
||||
mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
|
||||
MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
|
||||
mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
|
||||
return true;
|
||||
|
|
|
@ -2,214 +2,273 @@
|
|||
/* Copyright (c) 2018 Mellanox Technologies */
|
||||
|
||||
#include <linux/mlx5/vport.h>
|
||||
#include <linux/list.h>
|
||||
#include "lib/devcom.h"
|
||||
#include "mlx5_core.h"
|
||||
|
||||
static LIST_HEAD(devcom_list);
|
||||
static LIST_HEAD(devcom_dev_list);
|
||||
static LIST_HEAD(devcom_comp_list);
|
||||
/* protect device list */
|
||||
static DEFINE_MUTEX(dev_list_lock);
|
||||
/* protect component list */
|
||||
static DEFINE_MUTEX(comp_list_lock);
|
||||
|
||||
#define devcom_for_each_component(priv, comp, iter) \
|
||||
for (iter = 0; \
|
||||
comp = &(priv)->components[iter], iter < MLX5_DEVCOM_NUM_COMPONENTS; \
|
||||
iter++)
|
||||
#define devcom_for_each_component(iter) \
|
||||
list_for_each_entry(iter, &devcom_comp_list, comp_list)
|
||||
|
||||
struct mlx5_devcom_component {
|
||||
struct {
|
||||
void __rcu *data;
|
||||
} device[MLX5_DEVCOM_PORTS_SUPPORTED];
|
||||
|
||||
mlx5_devcom_event_handler_t handler;
|
||||
struct rw_semaphore sem;
|
||||
bool ready;
|
||||
};
|
||||
|
||||
struct mlx5_devcom_list {
|
||||
struct mlx5_devcom_dev {
|
||||
struct list_head list;
|
||||
|
||||
struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS];
|
||||
struct mlx5_core_dev *devs[MLX5_DEVCOM_PORTS_SUPPORTED];
|
||||
struct mlx5_core_dev *dev;
|
||||
struct kref ref;
|
||||
};
|
||||
|
||||
struct mlx5_devcom {
|
||||
struct mlx5_devcom_list *priv;
|
||||
int idx;
|
||||
struct mlx5_devcom_comp {
|
||||
struct list_head comp_list;
|
||||
enum mlx5_devcom_component id;
|
||||
u64 key;
|
||||
struct list_head comp_dev_list_head;
|
||||
mlx5_devcom_event_handler_t handler;
|
||||
struct kref ref;
|
||||
bool ready;
|
||||
struct rw_semaphore sem;
|
||||
};
|
||||
|
||||
static struct mlx5_devcom_list *mlx5_devcom_list_alloc(void)
|
||||
struct mlx5_devcom_comp_dev {
|
||||
struct list_head list;
|
||||
struct mlx5_devcom_comp *comp;
|
||||
struct mlx5_devcom_dev *devc;
|
||||
void __rcu *data;
|
||||
};
|
||||
|
||||
static bool devcom_dev_exists(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_devcom_component *comp;
|
||||
struct mlx5_devcom_list *priv;
|
||||
int i;
|
||||
struct mlx5_devcom_dev *iter;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return NULL;
|
||||
list_for_each_entry(iter, &devcom_dev_list, list)
|
||||
if (iter->dev == dev)
|
||||
return true;
|
||||
|
||||
devcom_for_each_component(priv, comp, i)
|
||||
init_rwsem(&comp->sem);
|
||||
|
||||
return priv;
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct mlx5_devcom *mlx5_devcom_alloc(struct mlx5_devcom_list *priv,
|
||||
u8 idx)
|
||||
static struct mlx5_devcom_dev *
|
||||
mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_devcom *devcom;
|
||||
struct mlx5_devcom_dev *devc;
|
||||
|
||||
devc = kzalloc(sizeof(*devc), GFP_KERNEL);
|
||||
if (!devc)
|
||||
return NULL;
|
||||
|
||||
devc->dev = dev;
|
||||
kref_init(&devc->ref);
|
||||
return devc;
|
||||
}
|
||||
|
||||
struct mlx5_devcom_dev *
|
||||
mlx5_devcom_register_device(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_devcom_dev *devc;
|
||||
|
||||
mutex_lock(&dev_list_lock);
|
||||
|
||||
if (devcom_dev_exists(dev)) {
|
||||
devc = ERR_PTR(-EEXIST);
|
||||
goto out;
|
||||
}
|
||||
|
||||
devc = mlx5_devcom_dev_alloc(dev);
|
||||
if (!devc) {
|
||||
devc = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add_tail(&devc->list, &devcom_dev_list);
|
||||
out:
|
||||
mutex_unlock(&dev_list_lock);
|
||||
return devc;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_devcom_dev_release(struct kref *ref)
|
||||
{
|
||||
struct mlx5_devcom_dev *devc = container_of(ref, struct mlx5_devcom_dev, ref);
|
||||
|
||||
mutex_lock(&dev_list_lock);
|
||||
list_del(&devc->list);
|
||||
mutex_unlock(&dev_list_lock);
|
||||
kfree(devc);
|
||||
}
|
||||
|
||||
void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(devc))
|
||||
kref_put(&devc->ref, mlx5_devcom_dev_release);
|
||||
}
|
||||
|
||||
static struct mlx5_devcom_comp *
|
||||
mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
|
||||
{
|
||||
struct mlx5_devcom_comp *comp;
|
||||
|
||||
comp = kzalloc(sizeof(*comp), GFP_KERNEL);
|
||||
if (!comp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
comp->id = id;
|
||||
comp->key = key;
|
||||
comp->handler = handler;
|
||||
init_rwsem(&comp->sem);
|
||||
kref_init(&comp->ref);
|
||||
INIT_LIST_HEAD(&comp->comp_dev_list_head);
|
||||
|
||||
return comp;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_devcom_comp_release(struct kref *ref)
|
||||
{
|
||||
struct mlx5_devcom_comp *comp = container_of(ref, struct mlx5_devcom_comp, ref);
|
||||
|
||||
mutex_lock(&comp_list_lock);
|
||||
list_del(&comp->comp_list);
|
||||
mutex_unlock(&comp_list_lock);
|
||||
kfree(comp);
|
||||
}
|
||||
|
||||
static struct mlx5_devcom_comp_dev *
|
||||
devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
|
||||
struct mlx5_devcom_comp *comp,
|
||||
void *data)
|
||||
{
|
||||
struct mlx5_devcom_comp_dev *devcom;
|
||||
|
||||
devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
|
||||
if (!devcom)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
kref_get(&devc->ref);
|
||||
devcom->devc = devc;
|
||||
devcom->comp = comp;
|
||||
rcu_assign_pointer(devcom->data, data);
|
||||
|
||||
down_write(&comp->sem);
|
||||
list_add_tail(&devcom->list, &comp->comp_dev_list_head);
|
||||
up_write(&comp->sem);
|
||||
|
||||
devcom->priv = priv;
|
||||
devcom->idx = idx;
|
||||
return devcom;
|
||||
}
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
|
||||
static void
|
||||
devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
|
||||
{
|
||||
struct mlx5_devcom_list *priv = NULL, *iter;
|
||||
struct mlx5_devcom *devcom = NULL;
|
||||
bool new_priv = false;
|
||||
u64 sguid0, sguid1;
|
||||
int idx, i;
|
||||
struct mlx5_devcom_comp *comp = devcom->comp;
|
||||
|
||||
if (!mlx5_core_is_pf(dev))
|
||||
return NULL;
|
||||
if (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_DEVCOM_PORTS_SUPPORTED)
|
||||
return NULL;
|
||||
|
||||
mlx5_dev_list_lock();
|
||||
sguid0 = mlx5_query_nic_system_image_guid(dev);
|
||||
list_for_each_entry(iter, &devcom_list, list) {
|
||||
/* There is at least one device in iter */
|
||||
struct mlx5_core_dev *tmp_dev;
|
||||
|
||||
idx = -1;
|
||||
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
|
||||
if (iter->devs[i])
|
||||
tmp_dev = iter->devs[i];
|
||||
else
|
||||
idx = i;
|
||||
}
|
||||
|
||||
if (idx == -1)
|
||||
continue;
|
||||
|
||||
sguid1 = mlx5_query_nic_system_image_guid(tmp_dev);
|
||||
if (sguid0 != sguid1)
|
||||
continue;
|
||||
|
||||
priv = iter;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!priv) {
|
||||
priv = mlx5_devcom_list_alloc();
|
||||
if (!priv) {
|
||||
devcom = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
idx = 0;
|
||||
new_priv = true;
|
||||
}
|
||||
|
||||
priv->devs[idx] = dev;
|
||||
devcom = mlx5_devcom_alloc(priv, idx);
|
||||
if (!devcom) {
|
||||
if (new_priv)
|
||||
kfree(priv);
|
||||
devcom = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (new_priv)
|
||||
list_add(&priv->list, &devcom_list);
|
||||
out:
|
||||
mlx5_dev_list_unlock();
|
||||
return devcom;
|
||||
}
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
|
||||
{
|
||||
struct mlx5_devcom_list *priv;
|
||||
int i;
|
||||
|
||||
if (IS_ERR_OR_NULL(devcom))
|
||||
return;
|
||||
|
||||
mlx5_dev_list_lock();
|
||||
priv = devcom->priv;
|
||||
priv->devs[devcom->idx] = NULL;
|
||||
down_write(&comp->sem);
|
||||
list_del(&devcom->list);
|
||||
up_write(&comp->sem);
|
||||
|
||||
kref_put(&devcom->devc->ref, mlx5_devcom_dev_release);
|
||||
kfree(devcom);
|
||||
|
||||
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
|
||||
if (priv->devs[i])
|
||||
break;
|
||||
|
||||
if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
|
||||
goto out;
|
||||
|
||||
list_del(&priv->list);
|
||||
kfree(priv);
|
||||
out:
|
||||
mlx5_dev_list_unlock();
|
||||
kref_put(&comp->ref, mlx5_devcom_comp_release);
|
||||
}
|
||||
|
||||
void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id,
|
||||
mlx5_devcom_event_handler_t handler,
|
||||
void *data)
|
||||
static bool
|
||||
devcom_component_equal(struct mlx5_devcom_comp *devcom,
|
||||
enum mlx5_devcom_component id,
|
||||
u64 key)
|
||||
{
|
||||
struct mlx5_devcom_component *comp;
|
||||
|
||||
if (IS_ERR_OR_NULL(devcom))
|
||||
return;
|
||||
|
||||
WARN_ON(!data);
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
down_write(&comp->sem);
|
||||
comp->handler = handler;
|
||||
rcu_assign_pointer(comp->device[devcom->idx].data, data);
|
||||
up_write(&comp->sem);
|
||||
return devcom->id == id && devcom->key == key;
|
||||
}
|
||||
|
||||
void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id)
|
||||
static struct mlx5_devcom_comp *
|
||||
devcom_component_get(struct mlx5_devcom_dev *devc,
|
||||
enum mlx5_devcom_component id,
|
||||
u64 key,
|
||||
mlx5_devcom_event_handler_t handler)
|
||||
{
|
||||
struct mlx5_devcom_component *comp;
|
||||
struct mlx5_devcom_comp *comp;
|
||||
|
||||
if (IS_ERR_OR_NULL(devcom))
|
||||
return;
|
||||
devcom_for_each_component(comp) {
|
||||
if (devcom_component_equal(comp, id, key)) {
|
||||
if (handler == comp->handler) {
|
||||
kref_get(&comp->ref);
|
||||
return comp;
|
||||
}
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
down_write(&comp->sem);
|
||||
RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL);
|
||||
up_write(&comp->sem);
|
||||
synchronize_rcu();
|
||||
mlx5_core_err(devc->dev,
|
||||
"Cannot register existing devcom component with different handler\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id,
|
||||
struct mlx5_devcom_comp_dev *
|
||||
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
|
||||
enum mlx5_devcom_component id,
|
||||
u64 key,
|
||||
mlx5_devcom_event_handler_t handler,
|
||||
void *data)
|
||||
{
|
||||
struct mlx5_devcom_comp_dev *devcom;
|
||||
struct mlx5_devcom_comp *comp;
|
||||
|
||||
if (IS_ERR_OR_NULL(devc))
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&comp_list_lock);
|
||||
comp = devcom_component_get(devc, id, key, handler);
|
||||
if (IS_ERR(comp)) {
|
||||
devcom = ERR_PTR(-EINVAL);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!comp) {
|
||||
comp = mlx5_devcom_comp_alloc(id, key, handler);
|
||||
if (IS_ERR(comp)) {
|
||||
devcom = ERR_CAST(comp);
|
||||
goto out_unlock;
|
||||
}
|
||||
list_add_tail(&comp->comp_list, &devcom_comp_list);
|
||||
}
|
||||
mutex_unlock(&comp_list_lock);
|
||||
|
||||
devcom = devcom_alloc_comp_dev(devc, comp, data);
|
||||
if (IS_ERR(devcom))
|
||||
kref_put(&comp->ref, mlx5_devcom_comp_release);
|
||||
|
||||
return devcom;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&comp_list_lock);
|
||||
return devcom;
|
||||
}
|
||||
|
||||
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(devcom))
|
||||
devcom_free_comp_dev(devcom);
|
||||
}
|
||||
|
||||
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
|
||||
int event, int rollback_event,
|
||||
void *event_data)
|
||||
{
|
||||
struct mlx5_devcom_component *comp;
|
||||
int err = -ENODEV, i;
|
||||
struct mlx5_devcom_comp *comp = devcom->comp;
|
||||
struct mlx5_devcom_comp_dev *pos;
|
||||
int err = 0;
|
||||
void *data;
|
||||
|
||||
if (IS_ERR_OR_NULL(devcom))
|
||||
return err;
|
||||
return -ENODEV;
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
down_write(&comp->sem);
|
||||
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
|
||||
void *data = rcu_dereference_protected(comp->device[i].data,
|
||||
lockdep_is_held(&comp->sem));
|
||||
list_for_each_entry(pos, &comp->comp_dev_list_head, list) {
|
||||
data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
|
||||
|
||||
if (i != devcom->idx && data) {
|
||||
if (pos != devcom && data) {
|
||||
err = comp->handler(event, data, event_data);
|
||||
if (err)
|
||||
goto rollback;
|
||||
|
@ -220,48 +279,43 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
|
|||
return 0;
|
||||
|
||||
rollback:
|
||||
while (i--) {
|
||||
void *data = rcu_dereference_protected(comp->device[i].data,
|
||||
lockdep_is_held(&comp->sem));
|
||||
if (list_entry_is_head(pos, &comp->comp_dev_list_head, list))
|
||||
goto out;
|
||||
pos = list_prev_entry(pos, list);
|
||||
list_for_each_entry_from_reverse(pos, &comp->comp_dev_list_head, list) {
|
||||
data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
|
||||
|
||||
if (i != devcom->idx && data)
|
||||
if (pos != devcom && data)
|
||||
comp->handler(rollback_event, data, event_data);
|
||||
}
|
||||
|
||||
out:
|
||||
up_write(&comp->sem);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id,
|
||||
bool ready)
|
||||
void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
|
||||
{
|
||||
struct mlx5_devcom_component *comp;
|
||||
WARN_ON(!rwsem_is_locked(&devcom->comp->sem));
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
WARN_ON(!rwsem_is_locked(&comp->sem));
|
||||
|
||||
WRITE_ONCE(comp->ready, ready);
|
||||
WRITE_ONCE(devcom->comp->ready, ready);
|
||||
}
|
||||
|
||||
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id)
|
||||
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(devcom))
|
||||
return false;
|
||||
|
||||
return READ_ONCE(devcom->priv->components[id].ready);
|
||||
return READ_ONCE(devcom->comp->ready);
|
||||
}
|
||||
|
||||
bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id)
|
||||
bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
|
||||
{
|
||||
struct mlx5_devcom_component *comp;
|
||||
struct mlx5_devcom_comp *comp;
|
||||
|
||||
if (IS_ERR_OR_NULL(devcom))
|
||||
return false;
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
comp = devcom->comp;
|
||||
down_read(&comp->sem);
|
||||
if (!READ_ONCE(comp->ready)) {
|
||||
up_read(&comp->sem);
|
||||
|
@ -271,74 +325,60 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
|
|||
return true;
|
||||
}
|
||||
|
||||
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id)
|
||||
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom)
|
||||
{
|
||||
struct mlx5_devcom_component *comp = &devcom->priv->components[id];
|
||||
|
||||
up_read(&comp->sem);
|
||||
up_read(&devcom->comp->sem);
|
||||
}
|
||||
|
||||
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id,
|
||||
int *i)
|
||||
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
|
||||
struct mlx5_devcom_comp_dev **pos)
|
||||
{
|
||||
struct mlx5_devcom_component *comp;
|
||||
void *ret;
|
||||
int idx;
|
||||
struct mlx5_devcom_comp *comp = devcom->comp;
|
||||
struct mlx5_devcom_comp_dev *tmp;
|
||||
void *data;
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
|
||||
|
||||
if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
|
||||
return NULL;
|
||||
for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
|
||||
if (idx != devcom->idx) {
|
||||
ret = rcu_dereference_protected(comp->device[idx].data,
|
||||
lockdep_is_held(&comp->sem));
|
||||
if (ret)
|
||||
list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
|
||||
if (tmp != devcom) {
|
||||
data = rcu_dereference_protected(tmp->data, lockdep_is_held(&comp->sem));
|
||||
if (data)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
|
||||
*i = idx;
|
||||
if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
|
||||
return NULL;
|
||||
}
|
||||
*i = idx + 1;
|
||||
|
||||
return ret;
|
||||
*pos = tmp;
|
||||
return data;
|
||||
}
|
||||
|
||||
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id,
|
||||
int *i)
|
||||
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
|
||||
struct mlx5_devcom_comp_dev **pos)
|
||||
{
|
||||
struct mlx5_devcom_component *comp;
|
||||
void *ret;
|
||||
int idx;
|
||||
struct mlx5_devcom_comp *comp = devcom->comp;
|
||||
struct mlx5_devcom_comp_dev *tmp;
|
||||
void *data;
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
|
||||
|
||||
if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
|
||||
return NULL;
|
||||
for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
|
||||
if (idx != devcom->idx) {
|
||||
list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
|
||||
if (tmp != devcom) {
|
||||
/* This can change concurrently, however 'data' pointer will remain
|
||||
* valid for the duration of RCU read section.
|
||||
*/
|
||||
if (!READ_ONCE(comp->ready))
|
||||
return NULL;
|
||||
ret = rcu_dereference(comp->device[idx].data);
|
||||
if (ret)
|
||||
data = rcu_dereference(tmp->data);
|
||||
if (data)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
|
||||
*i = idx;
|
||||
if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
|
||||
return NULL;
|
||||
}
|
||||
*i = idx + 1;
|
||||
|
||||
return ret;
|
||||
*pos = tmp;
|
||||
return data;
|
||||
}
|
||||
|
|
|
@ -6,11 +6,8 @@
|
|||
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#define MLX5_DEVCOM_PORTS_SUPPORTED 4
|
||||
|
||||
enum mlx5_devcom_components {
|
||||
enum mlx5_devcom_component {
|
||||
MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
|
||||
MLX5_DEVCOM_NUM_COMPONENTS,
|
||||
};
|
||||
|
||||
|
@ -18,45 +15,40 @@ typedef int (*mlx5_devcom_event_handler_t)(int event,
|
|||
void *my_data,
|
||||
void *event_data);
|
||||
|
||||
struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
|
||||
void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom);
|
||||
struct mlx5_devcom_dev *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
|
||||
void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc);
|
||||
|
||||
void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id,
|
||||
mlx5_devcom_event_handler_t handler,
|
||||
void *data);
|
||||
void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id);
|
||||
struct mlx5_devcom_comp_dev *
|
||||
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
|
||||
enum mlx5_devcom_component id,
|
||||
u64 key,
|
||||
mlx5_devcom_event_handler_t handler,
|
||||
void *data);
|
||||
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
|
||||
|
||||
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id,
|
||||
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
|
||||
int event, int rollback_event,
|
||||
void *event_data);
|
||||
|
||||
void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id,
|
||||
bool ready);
|
||||
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id);
|
||||
void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready);
|
||||
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom);
|
||||
|
||||
bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id);
|
||||
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id);
|
||||
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id, int *i);
|
||||
bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom);
|
||||
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom);
|
||||
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
|
||||
struct mlx5_devcom_comp_dev **pos);
|
||||
|
||||
#define mlx5_devcom_for_each_peer_entry(devcom, id, data, i) \
|
||||
for (i = 0, data = mlx5_devcom_get_next_peer_data(devcom, id, &i); \
|
||||
data; \
|
||||
data = mlx5_devcom_get_next_peer_data(devcom, id, &i))
|
||||
#define mlx5_devcom_for_each_peer_entry(devcom, data, pos) \
|
||||
for (pos = NULL, data = mlx5_devcom_get_next_peer_data(devcom, &pos); \
|
||||
data; \
|
||||
data = mlx5_devcom_get_next_peer_data(devcom, &pos))
|
||||
|
||||
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id, int *i);
|
||||
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
|
||||
struct mlx5_devcom_comp_dev **pos);
|
||||
|
||||
#define mlx5_devcom_for_each_peer_entry_rcu(devcom, id, data, i) \
|
||||
for (i = 0, data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i); \
|
||||
data; \
|
||||
data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i))
|
||||
#define mlx5_devcom_for_each_peer_entry_rcu(devcom, data, pos) \
|
||||
for (pos = NULL, data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos); \
|
||||
data; \
|
||||
data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos))
|
||||
|
||||
#endif
|
||||
#endif /* __LIB_MLX5_DEVCOM_H__ */
|
||||
|
|
|
@ -951,10 +951,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
|||
{
|
||||
int err;
|
||||
|
||||
dev->priv.devcom = mlx5_devcom_register_device(dev);
|
||||
if (IS_ERR(dev->priv.devcom))
|
||||
mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
|
||||
dev->priv.devcom);
|
||||
dev->priv.devc = mlx5_devcom_register_device(dev);
|
||||
if (IS_ERR(dev->priv.devc))
|
||||
mlx5_core_warn(dev, "failed to register devcom device %ld\n",
|
||||
PTR_ERR(dev->priv.devc));
|
||||
|
||||
err = mlx5_query_board_id(dev);
|
||||
if (err) {
|
||||
|
@ -1089,7 +1089,7 @@ err_eq_cleanup:
|
|||
err_irq_cleanup:
|
||||
mlx5_irq_table_cleanup(dev);
|
||||
err_devcom:
|
||||
mlx5_devcom_unregister_device(dev->priv.devcom);
|
||||
mlx5_devcom_unregister_device(dev->priv.devc);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1118,7 +1118,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
|
|||
mlx5_events_cleanup(dev);
|
||||
mlx5_eq_table_cleanup(dev);
|
||||
mlx5_irq_table_cleanup(dev);
|
||||
mlx5_devcom_unregister_device(dev->priv.devcom);
|
||||
mlx5_devcom_unregister_device(dev->priv.devc);
|
||||
}
|
||||
|
||||
static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout)
|
||||
|
|
|
@ -501,7 +501,7 @@ struct mlx5_events;
|
|||
struct mlx5_mpfs;
|
||||
struct mlx5_eswitch;
|
||||
struct mlx5_lag;
|
||||
struct mlx5_devcom;
|
||||
struct mlx5_devcom_dev;
|
||||
struct mlx5_fw_reset;
|
||||
struct mlx5_eq_table;
|
||||
struct mlx5_irq_table;
|
||||
|
@ -618,7 +618,7 @@ struct mlx5_priv {
|
|||
struct mlx5_core_sriov sriov;
|
||||
struct mlx5_lag *lag;
|
||||
u32 flags;
|
||||
struct mlx5_devcom *devcom;
|
||||
struct mlx5_devcom_dev *devc;
|
||||
struct mlx5_fw_reset *fw_reset;
|
||||
struct mlx5_core_roce roce;
|
||||
struct mlx5_fc_stats fc_stats;
|
||||
|
|
Loading…
Add table
Reference in a new issue