2020-09-16 19:31:01 +03:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
|
|
|
|
/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
|
|
|
|
|
|
|
|
#include <linux/if_bridge.h>
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <net/netevent.h>
|
|
|
|
#include <net/switchdev.h>
|
|
|
|
|
|
|
|
#include "prestera.h"
|
|
|
|
#include "prestera_hw.h"
|
|
|
|
#include "prestera_switchdev.h"
|
|
|
|
|
|
|
|
#define PRESTERA_VID_ALL (0xffff)
|
|
|
|
|
|
|
|
#define PRESTERA_DEFAULT_AGEING_TIME_MS 300000
|
|
|
|
#define PRESTERA_MAX_AGEING_TIME_MS 1000000000
|
|
|
|
#define PRESTERA_MIN_AGEING_TIME_MS 32000
|
|
|
|
|
|
|
|
struct prestera_fdb_event_work {
|
|
|
|
struct work_struct work;
|
|
|
|
struct switchdev_notifier_fdb_info fdb_info;
|
|
|
|
struct net_device *dev;
|
|
|
|
unsigned long event;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct prestera_switchdev {
|
|
|
|
struct prestera_switch *sw;
|
|
|
|
struct list_head bridge_list;
|
|
|
|
bool bridge_8021q_exists;
|
|
|
|
struct notifier_block swdev_nb_blk;
|
|
|
|
struct notifier_block swdev_nb;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct prestera_bridge {
|
|
|
|
struct list_head head;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct prestera_switchdev *swdev;
|
|
|
|
struct list_head port_list;
|
2022-07-11 14:28:22 +03:00
|
|
|
struct list_head br_mdb_entry_list;
|
|
|
|
bool mrouter_exist;
|
2020-09-16 19:31:01 +03:00
|
|
|
bool vlan_enabled;
|
2022-07-11 14:28:22 +03:00
|
|
|
bool multicast_enabled;
|
2020-09-16 19:31:01 +03:00
|
|
|
u16 bridge_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct prestera_bridge_port {
|
|
|
|
struct list_head head;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
struct list_head vlan_list;
|
2022-07-11 14:28:22 +03:00
|
|
|
struct list_head br_mdb_port_list;
|
2020-09-16 19:31:01 +03:00
|
|
|
refcount_t ref_count;
|
|
|
|
unsigned long flags;
|
2022-07-11 14:28:22 +03:00
|
|
|
bool mrouter;
|
2020-09-16 19:31:01 +03:00
|
|
|
u8 stp_state;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct prestera_bridge_vlan {
|
|
|
|
struct list_head head;
|
|
|
|
struct list_head port_vlan_list;
|
|
|
|
u16 vid;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct prestera_port_vlan {
|
|
|
|
struct list_head br_vlan_head;
|
|
|
|
struct list_head port_head;
|
|
|
|
struct prestera_port *port;
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
u16 vid;
|
|
|
|
};
|
|
|
|
|
2022-07-11 14:28:22 +03:00
|
|
|
struct prestera_br_mdb_port {
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct list_head br_mdb_port_node;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Software representation of MDB table. */
|
|
|
|
struct prestera_br_mdb_entry {
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
struct prestera_mdb_entry *mdb;
|
|
|
|
struct list_head br_mdb_port_list;
|
|
|
|
struct list_head br_mdb_entry_node;
|
|
|
|
bool enabled;
|
|
|
|
};
|
|
|
|
|
2020-09-16 19:31:01 +03:00
|
|
|
static struct workqueue_struct *swdev_wq;
|
|
|
|
|
|
|
|
static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
|
|
|
|
|
|
|
|
static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
|
|
|
|
u8 state);
|
|
|
|
|
2022-07-11 14:28:22 +03:00
|
|
|
static struct prestera_bridge *
|
|
|
|
prestera_bridge_find(const struct prestera_switch *sw,
|
|
|
|
const struct net_device *br_dev)
|
|
|
|
{
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
|
|
|
|
list_for_each_entry(bridge, &sw->swdev->bridge_list, head)
|
|
|
|
if (bridge->dev == br_dev)
|
|
|
|
return bridge;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_bridge_port *
|
|
|
|
__prestera_bridge_port_find(const struct prestera_bridge *bridge,
|
|
|
|
const struct net_device *brport_dev)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
|
|
|
|
list_for_each_entry(br_port, &bridge->port_list, head)
|
|
|
|
if (br_port->dev == brport_dev)
|
|
|
|
return br_port;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_bridge_port *
|
|
|
|
prestera_bridge_port_find(struct prestera_switch *sw,
|
|
|
|
struct net_device *brport_dev)
|
|
|
|
{
|
|
|
|
struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
|
|
|
|
if (!br_dev)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
bridge = prestera_bridge_find(sw, br_dev);
|
|
|
|
if (!bridge)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return __prestera_bridge_port_find(bridge, brport_dev);
|
|
|
|
}
|
|
|
|
|
2022-07-11 14:28:19 +03:00
|
|
|
static void
|
|
|
|
prestera_br_port_flags_reset(struct prestera_bridge_port *br_port,
|
|
|
|
struct prestera_port *port)
|
|
|
|
{
|
|
|
|
prestera_port_uc_flood_set(port, false);
|
|
|
|
prestera_port_mc_flood_set(port, false);
|
|
|
|
prestera_port_learning_set(port, false);
|
2022-08-22 21:03:15 +03:00
|
|
|
prestera_port_br_locked_set(port, false);
|
2022-07-11 14:28:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
|
|
|
|
struct prestera_port *port)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = prestera_port_uc_flood_set(port, br_port->flags & BR_FLOOD);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
err = prestera_port_mc_flood_set(port, br_port->flags & BR_MCAST_FLOOD);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
err = prestera_port_learning_set(port, br_port->flags & BR_LEARNING);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
2022-08-22 21:03:15 +03:00
|
|
|
err = prestera_port_br_locked_set(port,
|
|
|
|
br_port->flags & BR_PORT_LOCKED);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
2022-07-11 14:28:19 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
prestera_br_port_flags_reset(br_port, port);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-09-16 19:31:01 +03:00
|
|
|
static struct prestera_bridge_vlan *
|
|
|
|
prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_vlan *br_vlan;
|
|
|
|
|
|
|
|
br_vlan = kzalloc(sizeof(*br_vlan), GFP_KERNEL);
|
|
|
|
if (!br_vlan)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&br_vlan->port_vlan_list);
|
|
|
|
br_vlan->vid = vid;
|
|
|
|
list_add(&br_vlan->head, &br_port->vlan_list);
|
|
|
|
|
|
|
|
return br_vlan;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_bridge_vlan_destroy(struct prestera_bridge_vlan *br_vlan)
|
|
|
|
{
|
|
|
|
list_del(&br_vlan->head);
|
|
|
|
WARN_ON(!list_empty(&br_vlan->port_vlan_list));
|
|
|
|
kfree(br_vlan);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_bridge_vlan *
|
|
|
|
prestera_bridge_vlan_by_vid(struct prestera_bridge_port *br_port, u16 vid)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_vlan *br_vlan;
|
|
|
|
|
|
|
|
list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
|
|
|
|
if (br_vlan->vid == vid)
|
|
|
|
return br_vlan;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_bridge_vlan_port_count(struct prestera_bridge *bridge,
|
|
|
|
u16 vid)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_bridge_vlan *br_vlan;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
list_for_each_entry(br_port, &bridge->port_list, head) {
|
|
|
|
list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
|
|
|
|
if (br_vlan->vid == vid) {
|
|
|
|
count += 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_bridge_vlan_put(struct prestera_bridge_vlan *br_vlan)
|
|
|
|
{
|
|
|
|
if (list_empty(&br_vlan->port_vlan_list))
|
|
|
|
prestera_bridge_vlan_destroy(br_vlan);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_port_vlan *
|
|
|
|
prestera_port_vlan_by_vid(struct prestera_port *port, u16 vid)
|
|
|
|
{
|
|
|
|
struct prestera_port_vlan *port_vlan;
|
|
|
|
|
|
|
|
list_for_each_entry(port_vlan, &port->vlans_list, port_head) {
|
|
|
|
if (port_vlan->vid == vid)
|
|
|
|
return port_vlan;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_port_vlan *
|
|
|
|
prestera_port_vlan_create(struct prestera_port *port, u16 vid, bool untagged)
|
|
|
|
{
|
|
|
|
struct prestera_port_vlan *port_vlan;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
port_vlan = prestera_port_vlan_by_vid(port, vid);
|
|
|
|
if (port_vlan)
|
|
|
|
return ERR_PTR(-EEXIST);
|
|
|
|
|
|
|
|
err = prestera_hw_vlan_port_set(port, vid, true, untagged);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
port_vlan = kzalloc(sizeof(*port_vlan), GFP_KERNEL);
|
|
|
|
if (!port_vlan) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_port_vlan_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
port_vlan->port = port;
|
|
|
|
port_vlan->vid = vid;
|
|
|
|
|
|
|
|
list_add(&port_vlan->port_head, &port->vlans_list);
|
|
|
|
|
|
|
|
return port_vlan;
|
|
|
|
|
|
|
|
err_port_vlan_alloc:
|
|
|
|
prestera_hw_vlan_port_set(port, vid, false, false);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2021-06-10 18:43:11 +03:00
|
|
|
static int prestera_fdb_add(struct prestera_port *port,
|
|
|
|
const unsigned char *mac, u16 vid, bool dynamic)
|
|
|
|
{
|
|
|
|
if (prestera_port_is_lag_member(port))
|
|
|
|
return prestera_hw_lag_fdb_add(port->sw, prestera_port_lag_id(port),
|
|
|
|
mac, vid, dynamic);
|
|
|
|
|
|
|
|
return prestera_hw_fdb_add(port, mac, vid, dynamic);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_fdb_del(struct prestera_port *port,
|
|
|
|
const unsigned char *mac, u16 vid)
|
|
|
|
{
|
|
|
|
if (prestera_port_is_lag_member(port))
|
|
|
|
return prestera_hw_lag_fdb_del(port->sw, prestera_port_lag_id(port),
|
|
|
|
mac, vid);
|
|
|
|
else
|
|
|
|
return prestera_hw_fdb_del(port, mac, vid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
|
|
|
|
u32 mode)
|
|
|
|
{
|
|
|
|
if (prestera_port_is_lag_member(port))
|
|
|
|
return prestera_hw_fdb_flush_lag_vlan(port->sw, prestera_port_lag_id(port),
|
|
|
|
vid, mode);
|
|
|
|
else
|
|
|
|
return prestera_hw_fdb_flush_port_vlan(port, vid, mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode)
|
|
|
|
{
|
|
|
|
if (prestera_port_is_lag_member(port))
|
|
|
|
return prestera_hw_fdb_flush_lag(port->sw, prestera_port_lag_id(port),
|
|
|
|
mode);
|
|
|
|
else
|
|
|
|
return prestera_hw_fdb_flush_port(port, mode);
|
|
|
|
}
|
|
|
|
|
2022-07-11 14:28:22 +03:00
|
|
|
static void
|
|
|
|
prestera_mdb_port_del(struct prestera_mdb_entry *mdb,
|
|
|
|
struct net_device *orig_dev)
|
|
|
|
{
|
|
|
|
struct prestera_flood_domain *fl_domain = mdb->flood_domain;
|
|
|
|
struct prestera_flood_domain_port *flood_domain_port;
|
|
|
|
|
|
|
|
flood_domain_port = prestera_flood_domain_port_find(fl_domain,
|
|
|
|
orig_dev,
|
|
|
|
mdb->vid);
|
|
|
|
if (flood_domain_port)
|
|
|
|
prestera_flood_domain_port_destroy(flood_domain_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
prestera_br_mdb_entry_put(struct prestera_br_mdb_entry *br_mdb)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
|
|
|
|
if (list_empty(&br_mdb->br_mdb_port_list)) {
|
|
|
|
list_for_each_entry(br_port, &br_mdb->bridge->port_list, head)
|
|
|
|
prestera_mdb_port_del(br_mdb->mdb, br_port->dev);
|
|
|
|
|
|
|
|
prestera_mdb_entry_destroy(br_mdb->mdb);
|
|
|
|
list_del(&br_mdb->br_mdb_entry_node);
|
|
|
|
kfree(br_mdb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
prestera_br_mdb_port_del(struct prestera_br_mdb_entry *br_mdb,
|
|
|
|
struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_port *br_mdb_port, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(br_mdb_port, tmp, &br_mdb->br_mdb_port_list,
|
|
|
|
br_mdb_port_node) {
|
|
|
|
if (br_mdb_port->br_port == br_port) {
|
|
|
|
list_del(&br_mdb_port->br_mdb_port_node);
|
|
|
|
kfree(br_mdb_port);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
prestera_mdb_flush_bridge_port(struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_port *br_mdb_port, *tmp_port;
|
|
|
|
struct prestera_br_mdb_entry *br_mdb, *br_mdb_tmp;
|
|
|
|
struct prestera_bridge *br_dev = br_port->bridge;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(br_mdb, br_mdb_tmp, &br_dev->br_mdb_entry_list,
|
|
|
|
br_mdb_entry_node) {
|
|
|
|
list_for_each_entry_safe(br_mdb_port, tmp_port,
|
|
|
|
&br_mdb->br_mdb_port_list,
|
|
|
|
br_mdb_port_node) {
|
|
|
|
prestera_mdb_port_del(br_mdb->mdb,
|
|
|
|
br_mdb_port->br_port->dev);
|
|
|
|
prestera_br_mdb_port_del(br_mdb, br_mdb_port->br_port);
|
|
|
|
}
|
|
|
|
prestera_br_mdb_entry_put(br_mdb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 19:31:01 +03:00
|
|
|
static void
|
|
|
|
prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
|
|
|
|
{
|
|
|
|
u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
|
|
|
|
struct prestera_port *port = port_vlan->port;
|
|
|
|
struct prestera_bridge_vlan *br_vlan;
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
bool last_port, last_vlan;
|
|
|
|
u16 vid = port_vlan->vid;
|
|
|
|
int port_count;
|
|
|
|
|
|
|
|
br_port = port_vlan->br_port;
|
|
|
|
port_count = prestera_bridge_vlan_port_count(br_port->bridge, vid);
|
|
|
|
br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
|
|
|
|
|
|
|
|
last_vlan = list_is_singular(&br_port->vlan_list);
|
|
|
|
last_port = port_count == 1;
|
|
|
|
|
|
|
|
if (last_vlan)
|
2021-06-10 18:43:11 +03:00
|
|
|
prestera_fdb_flush_port(port, fdb_flush_mode);
|
2020-09-16 19:31:01 +03:00
|
|
|
else if (last_port)
|
|
|
|
prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode);
|
|
|
|
else
|
2021-06-10 18:43:11 +03:00
|
|
|
prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
|
2020-09-16 19:31:01 +03:00
|
|
|
|
2022-07-11 14:28:22 +03:00
|
|
|
prestera_mdb_flush_bridge_port(br_port);
|
|
|
|
|
2020-09-16 19:31:01 +03:00
|
|
|
list_del(&port_vlan->br_vlan_head);
|
|
|
|
prestera_bridge_vlan_put(br_vlan);
|
|
|
|
prestera_bridge_port_put(br_port);
|
|
|
|
port_vlan->br_port = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_port_vlan_destroy(struct prestera_port_vlan *port_vlan)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = port_vlan->port;
|
|
|
|
u16 vid = port_vlan->vid;
|
|
|
|
|
|
|
|
if (port_vlan->br_port)
|
|
|
|
prestera_port_vlan_bridge_leave(port_vlan);
|
|
|
|
|
|
|
|
prestera_hw_vlan_port_set(port, vid, false, false);
|
|
|
|
list_del(&port_vlan->port_head);
|
|
|
|
kfree(port_vlan);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_bridge *
|
|
|
|
prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
|
|
|
|
{
|
|
|
|
bool vlan_enabled = br_vlan_enabled(dev);
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
u16 bridge_id;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (vlan_enabled && swdev->bridge_8021q_exists) {
|
|
|
|
netdev_err(dev, "Only one VLAN-aware bridge is supported\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
|
|
|
|
if (!bridge)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
if (vlan_enabled) {
|
|
|
|
swdev->bridge_8021q_exists = true;
|
|
|
|
} else {
|
|
|
|
err = prestera_hw_bridge_create(swdev->sw, &bridge_id);
|
|
|
|
if (err) {
|
|
|
|
kfree(bridge);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
bridge->bridge_id = bridge_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
bridge->vlan_enabled = vlan_enabled;
|
|
|
|
bridge->swdev = swdev;
|
|
|
|
bridge->dev = dev;
|
2022-07-11 14:28:22 +03:00
|
|
|
bridge->multicast_enabled = br_multicast_enabled(dev);
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&bridge->port_list);
|
2022-07-11 14:28:22 +03:00
|
|
|
INIT_LIST_HEAD(&bridge->br_mdb_entry_list);
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
list_add(&bridge->head, &swdev->bridge_list);
|
|
|
|
|
|
|
|
return bridge;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_bridge_destroy(struct prestera_bridge *bridge)
|
|
|
|
{
|
|
|
|
struct prestera_switchdev *swdev = bridge->swdev;
|
|
|
|
|
|
|
|
list_del(&bridge->head);
|
|
|
|
|
|
|
|
if (bridge->vlan_enabled)
|
|
|
|
swdev->bridge_8021q_exists = false;
|
|
|
|
else
|
|
|
|
prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
|
|
|
|
|
2022-07-11 14:28:22 +03:00
|
|
|
WARN_ON(!list_empty(&bridge->br_mdb_entry_list));
|
2020-09-16 19:31:01 +03:00
|
|
|
WARN_ON(!list_empty(&bridge->port_list));
|
|
|
|
kfree(bridge);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_bridge_put(struct prestera_bridge *bridge)
|
|
|
|
{
|
|
|
|
if (list_empty(&bridge->port_list))
|
|
|
|
prestera_bridge_destroy(bridge);
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
struct prestera_bridge *prestera_bridge_by_dev(struct prestera_switchdev *swdev,
|
|
|
|
const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
|
|
|
|
list_for_each_entry(bridge, &swdev->bridge_list, head)
|
|
|
|
if (bridge->dev == dev)
|
|
|
|
return bridge;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_bridge_port *
|
|
|
|
__prestera_bridge_port_by_dev(struct prestera_bridge *bridge,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
|
|
|
|
list_for_each_entry(br_port, &bridge->port_list, head) {
|
|
|
|
if (br_port->dev == dev)
|
|
|
|
return br_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-06-10 18:43:11 +03:00
|
|
|
static int prestera_match_upper_bridge_dev(struct net_device *dev,
|
|
|
|
struct netdev_nested_priv *priv)
|
|
|
|
{
|
|
|
|
if (netif_is_bridge_master(dev))
|
|
|
|
priv->data = dev;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct net_device *prestera_get_upper_bridge_dev(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct netdev_nested_priv priv = { };
|
|
|
|
|
|
|
|
netdev_walk_all_upper_dev_rcu(dev, prestera_match_upper_bridge_dev,
|
|
|
|
&priv);
|
|
|
|
return priv.data;
|
|
|
|
}
|
|
|
|
|
2020-09-16 19:31:01 +03:00
|
|
|
static struct prestera_bridge_port *
|
|
|
|
prestera_bridge_port_by_dev(struct prestera_switchdev *swdev,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
2021-06-10 18:43:11 +03:00
|
|
|
struct net_device *br_dev = prestera_get_upper_bridge_dev(dev);
|
2020-09-16 19:31:01 +03:00
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
|
|
|
|
if (!br_dev)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
bridge = prestera_bridge_by_dev(swdev, br_dev);
|
|
|
|
if (!bridge)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return __prestera_bridge_port_by_dev(bridge, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_bridge_port *
|
|
|
|
prestera_bridge_port_create(struct prestera_bridge *bridge,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
|
|
|
|
br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
|
|
|
|
if (!br_port)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
br_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
|
|
|
|
BR_MCAST_FLOOD;
|
|
|
|
br_port->stp_state = BR_STATE_DISABLED;
|
|
|
|
refcount_set(&br_port->ref_count, 1);
|
|
|
|
br_port->bridge = bridge;
|
|
|
|
br_port->dev = dev;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&br_port->vlan_list);
|
|
|
|
list_add(&br_port->head, &bridge->port_list);
|
2022-07-11 14:28:22 +03:00
|
|
|
INIT_LIST_HEAD(&br_port->br_mdb_port_list);
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
return br_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
list_del(&br_port->head);
|
|
|
|
WARN_ON(!list_empty(&br_port->vlan_list));
|
2022-07-11 14:28:22 +03:00
|
|
|
WARN_ON(!list_empty(&br_port->br_mdb_port_list));
|
2020-09-16 19:31:01 +03:00
|
|
|
kfree(br_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_bridge_port_get(struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
refcount_inc(&br_port->ref_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_bridge_port_put(struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
struct prestera_bridge *bridge = br_port->bridge;
|
|
|
|
|
|
|
|
if (refcount_dec_and_test(&br_port->ref_count)) {
|
|
|
|
prestera_bridge_port_destroy(br_port);
|
|
|
|
prestera_bridge_put(bridge);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_bridge_port *
|
|
|
|
prestera_bridge_port_add(struct prestera_bridge *bridge, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
|
|
|
|
br_port = __prestera_bridge_port_by_dev(bridge, dev);
|
|
|
|
if (br_port) {
|
|
|
|
prestera_bridge_port_get(br_port);
|
|
|
|
return br_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
br_port = prestera_bridge_port_create(bridge, dev);
|
|
|
|
if (!br_port)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
return br_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(br_port->dev);
|
|
|
|
struct prestera_bridge *bridge = br_port->bridge;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = prestera_hw_bridge_port_add(port, bridge->bridge_id);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2022-07-11 14:28:19 +03:00
|
|
|
err = prestera_br_port_flags_set(br_port, port);
|
2020-09-16 19:31:01 +03:00
|
|
|
if (err)
|
2022-07-11 14:28:19 +03:00
|
|
|
goto err_flags2port_set;
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2022-07-11 14:28:19 +03:00
|
|
|
err_flags2port_set:
|
2020-09-16 19:31:01 +03:00
|
|
|
prestera_hw_bridge_port_delete(port, bridge->bridge_id);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-06-10 18:43:10 +03:00
|
|
|
int prestera_bridge_port_join(struct net_device *br_dev,
|
net: bridge: switchdev: let drivers inform which bridge ports are offloaded
On reception of an skb, the bridge checks if it was marked as 'already
forwarded in hardware' (checks if skb->offload_fwd_mark == 1), and if it
is, it assigns the source hardware domain of that skb based on the
hardware domain of the ingress port. Then during forwarding, it enforces
that the egress port must have a different hardware domain than the
ingress one (this is done in nbp_switchdev_allowed_egress).
Non-switchdev drivers don't report any physical switch id (neither
through devlink nor .ndo_get_port_parent_id), therefore the bridge
assigns them a hardware domain of 0, and packets coming from them will
always have skb->offload_fwd_mark = 0. So there aren't any restrictions.
Problems appear due to the fact that DSA would like to perform software
fallback for bonding and team interfaces that the physical switch cannot
offload.
+-- br0 ---+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
There, it is desirable that the presence of swp3 and swp4 under a
non-offloaded LAG does not preclude us from doing hardware bridging
beteen swp0, swp1 and swp2. The bandwidth of the CPU is often times high
enough that software bridging between {swp0,swp1,swp2} and bond0 is not
impractical.
But this creates an impossible paradox given the current way in which
port hardware domains are assigned. When the driver receives a packet
from swp0 (say, due to flooding), it must set skb->offload_fwd_mark to
something.
- If we set it to 0, then the bridge will forward it towards swp1, swp2
and bond0. But the switch has already forwarded it towards swp1 and
swp2 (not to bond0, remember, that isn't offloaded, so as far as the
switch is concerned, ports swp3 and swp4 are not looking up the FDB,
and the entire bond0 is a destination that is strictly behind the
CPU). But we don't want duplicated traffic towards swp1 and swp2, so
it's not ok to set skb->offload_fwd_mark = 0.
- If we set it to 1, then the bridge will not forward the skb towards
the ports with the same switchdev mark, i.e. not to swp1, swp2 and
bond0. Towards swp1 and swp2 that's ok, but towards bond0? It should
have forwarded the skb there.
So the real issue is that bond0 will be assigned the same hardware
domain as {swp0,swp1,swp2}, because the function that assigns hardware
domains to bridge ports, nbp_switchdev_add(), recurses through bond0's
lower interfaces until it finds something that implements devlink (calls
dev_get_port_parent_id with bool recurse = true). This is a problem
because the fact that bond0 can be offloaded by swp3 and swp4 in our
example is merely an assumption.
A solution is to give the bridge explicit hints as to what hardware
domain it should use for each port.
Currently, the bridging offload is very 'silent': a driver registers a
netdevice notifier, which is put on the netns's notifier chain, and
which sniffs around for NETDEV_CHANGEUPPER events where the upper is a
bridge, and the lower is an interface it knows about (one registered by
this driver, normally). Then, from within that notifier, it does a bunch
of stuff behind the bridge's back, without the bridge necessarily
knowing that there's somebody offloading that port. It looks like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v
call_netdevice_notifiers
|
v
dsa_slave_netdevice_event
|
v
oh, hey! it's for me!
|
v
.port_bridge_join
What we do to solve the conundrum is to be less silent, and change the
switchdev drivers to present themselves to the bridge. Something like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | hardware domain for
v | this port, and zero
dsa_slave_netdevice_event | if I got nothing.
| |
v |
oh, hey! it's for me! |
| |
v |
.port_bridge_join |
| |
+------------------------+
switchdev_bridge_port_offload(swp0, swp0)
Then stacked interfaces (like bond0 on top of swp3/swp4) would be
treated differently in DSA, depending on whether we can or cannot
offload them.
The offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | switchdev mark for
v | bond0.
dsa_slave_netdevice_event | Coincidentally (or not),
| | bond0 and swp0, swp1, swp2
v | all have the same switchdev
hmm, it's not quite for me, | mark now, since the ASIC
but my driver has already | is able to forward towards
called .port_lag_join | all these ports in hw.
for it, because I have |
a port with dp->lag_dev == bond0. |
| |
v |
.port_bridge_join |
for swp3 and swp4 |
| |
+------------------------+
switchdev_bridge_port_offload(bond0, swp3)
switchdev_bridge_port_offload(bond0, swp4)
And the non-offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge waiting:
call_netdevice_notifiers ^ huh, switchdev_bridge_port_offload
| | wasn't called, okay, I'll use a
v | hwdom of zero for this one.
dsa_slave_netdevice_event : Then packets received on swp0 will
| : not be software-forwarded towards
v : swp1, but they will towards bond0.
it's not for me, but
bond0 is an upper of swp3
and swp4, but their dp->lag_dev
is NULL because they couldn't
offload it.
Basically we can draw the conclusion that the lowers of a bridge port
can come and go, so depending on the configuration of lowers for a
bridge port, it can dynamically toggle between offloaded and unoffloaded.
Therefore, we need an equivalent switchdev_bridge_port_unoffload too.
This patch changes the way any switchdev driver interacts with the
bridge. From now on, everybody needs to call switchdev_bridge_port_offload
and switchdev_bridge_port_unoffload, otherwise the bridge will treat the
port as non-offloaded and allow software flooding to other ports from
the same ASIC.
Note that these functions lay the ground for a more complex handshake
between switchdev drivers and the bridge in the future.
For drivers that will request a replay of the switchdev objects when
they offload and unoffload a bridge port (DSA, dpaa2-switch, ocelot), we
place the call to switchdev_bridge_port_unoffload() strategically inside
the NETDEV_PRECHANGEUPPER notifier's code path, and not inside
NETDEV_CHANGEUPPER. This is because the switchdev object replay helpers
need the netdev adjacency lists to be valid, and that is only true in
NETDEV_PRECHANGEUPPER.
Cc: Vadym Kochan <vkochan@marvell.com>
Cc: Taras Chornyi <tchornyi@marvell.com>
Cc: Ioana Ciornei <ioana.ciornei@nxp.com>
Cc: Lars Povlsen <lars.povlsen@microchip.com>
Cc: Steen Hegelund <Steen.Hegelund@microchip.com>
Cc: UNGLinuxDriver@microchip.com
Cc: Claudiu Manoil <claudiu.manoil@nxp.com>
Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch: regression
Acked-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch
Tested-by: Horatiu Vultur <horatiu.vultur@microchip.com> # ocelot-switch
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-21 19:24:01 +03:00
|
|
|
struct prestera_port *port,
|
|
|
|
struct netlink_ext_ack *extack)
|
2020-09-16 19:31:01 +03:00
|
|
|
{
|
|
|
|
struct prestera_switchdev *swdev = port->sw->swdev;
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
int err;
|
|
|
|
|
2021-06-10 18:43:10 +03:00
|
|
|
bridge = prestera_bridge_by_dev(swdev, br_dev);
|
2020-09-16 19:31:01 +03:00
|
|
|
if (!bridge) {
|
2021-06-10 18:43:10 +03:00
|
|
|
bridge = prestera_bridge_create(swdev, br_dev);
|
2020-09-16 19:31:01 +03:00
|
|
|
if (IS_ERR(bridge))
|
|
|
|
return PTR_ERR(bridge);
|
|
|
|
}
|
|
|
|
|
|
|
|
br_port = prestera_bridge_port_add(bridge, port->dev);
|
|
|
|
if (IS_ERR(br_port)) {
|
2021-11-18 21:51:40 +02:00
|
|
|
prestera_bridge_put(bridge);
|
|
|
|
return PTR_ERR(br_port);
|
2020-09-16 19:31:01 +03:00
|
|
|
}
|
|
|
|
|
net: bridge: move the switchdev object replay helpers to "push" mode
Starting with commit 4f2673b3a2b6 ("net: bridge: add helper to replay
port and host-joined mdb entries"), DSA has introduced some bridge
helpers that replay switchdev events (FDB/MDB/VLAN additions and
deletions) that can be lost by the switchdev drivers in a variety of
circumstances:
- an IP multicast group was host-joined on the bridge itself before any
switchdev port joined the bridge, leading to the host MDB entries
missing in the hardware database.
- during the bridge creation process, the MAC address of the bridge was
added to the FDB as an entry pointing towards the bridge device
itself, but with no switchdev ports being part of the bridge yet, this
local FDB entry would remain unknown to the switchdev hardware
database.
- a VLAN/FDB/MDB was added to a bridge port that is a LAG interface,
before any switchdev port joined that LAG, leading to the hardware
database missing those entries.
- a switchdev port left a LAG that is a bridge port, while the LAG
remained part of the bridge, and all FDB/MDB/VLAN entries remained
installed in the hardware database of the switchdev port.
Also, since commit 0d2cfbd41c4a ("net: bridge: ignore switchdev events
for LAG ports which didn't request replay"), DSA introduced a method,
based on a const void *ctx, to ensure that two switchdev ports under the
same LAG that is a bridge port do not see the same MDB/VLAN entry being
replayed twice by the bridge, once for every bridge port that joins the
LAG.
With so many ordering corner cases being possible, it seems unreasonable
to expect a switchdev driver writer to get it right from the first try.
Therefore, now that DSA has experimented with the bridge replay helpers
for a little bit, we can move the code to the bridge driver where it is
more readily available to all switchdev drivers.
To convert the switchdev object replay helpers from "pull mode" (where
the driver asks for them) to a "push mode" (where the bridge offers them
automatically), the biggest problem is that the bridge needs to be aware
when a switchdev port joins and leaves, even when the switchdev is only
indirectly a bridge port (for example when the bridge port is a LAG
upper of the switchdev).
Luckily, we already have a hook for that, in the form of the newly
introduced switchdev_bridge_port_offload() and
switchdev_bridge_port_unoffload() calls. These offer a natural place for
hooking the object addition and deletion replays.
Extend the above 2 functions with:
- pointers to the switchdev atomic notifier (for FDB replays) and the
blocking notifier (for MDB and VLAN replays).
- the "const void *ctx" argument required for drivers to be able to
disambiguate between which port is targeted, when multiple ports are
lowers of the same LAG that is a bridge port. Most of the drivers pass
NULL to this argument, except the ones that support LAG offload and have
the proper context check already in place in the switchdev blocking
notifier handler.
Also unexport the replay helpers, since nobody except the bridge calls
them directly now.
Note that:
(a) we abuse the terminology slightly, because FDB entries are not
"switchdev objects", but we count them as objects nonetheless.
With no direct way to prove it, I think they are not modeled as
switchdev objects because those can only be installed by the bridge
to the hardware (as opposed to FDB entries which can be propagated
in the other direction too). This is merely an abuse of terms, FDB
entries are replayed too, despite not being objects.
(b) the bridge does not attempt to sync port attributes to newly joined
ports, just the countable stuff (the objects). The reason for this
is simple: no universal and symmetric way to sync and unsync them is
known. For example, VLAN filtering: what to do on unsync, disable or
leave it enabled? Similarly, STP state, ageing timer, etc etc. What
a switchdev port does when it becomes standalone again is not really
up to the bridge's competence, and the driver should deal with it.
On the other hand, replaying deletions of switchdev objects can be
seen a matter of cleanup and therefore be treated by the bridge,
hence this patch.
We make the replay helpers opt-in for drivers, because they might not
bring immediate benefits for them:
- nbp_vlan_init() is called _after_ netdev_master_upper_dev_link(),
so br_vlan_replay() should not do anything for the new drivers on
which we call it. The existing drivers where there was even a slight
possibility for there to exist a VLAN on a bridge port before they
join it are already guarded against this: mlxsw and prestera deny
joining LAG interfaces that are members of a bridge.
- br_fdb_replay() should now notify of local FDB entries, but I patched
all drivers except DSA to ignore these new entries in commit
2c4eca3ef716 ("net: bridge: switchdev: include local flag in FDB
notifications"). Driver authors can lift this restriction as they
wish, and when they do, they can also opt into the FDB replay
functionality.
- br_mdb_replay() should fix a real issue which is described in commit
4f2673b3a2b6 ("net: bridge: add helper to replay port and host-joined
mdb entries"). However most drivers do not offload the
SWITCHDEV_OBJ_ID_HOST_MDB to see this issue: only cpsw and am65_cpsw
offload this switchdev object, and I don't completely understand the
way in which they offload this switchdev object anyway. So I'll leave
it up to these drivers' respective maintainers to opt into
br_mdb_replay().
So most of the drivers pass NULL notifier blocks for the replay helpers,
except:
- dpaa2-switch which was already acked/regression-tested with the
helpers enabled (and there isn't much of a downside in having them)
- ocelot which already had replay logic in "pull" mode
- DSA which already had replay logic in "pull" mode
An important observation is that the drivers which don't currently
request bridge event replays don't even have the
switchdev_bridge_port_{offload,unoffload} calls placed in proper places
right now. This was done to avoid unnecessary rework for drivers which
might never even add support for this. For driver writers who wish to
add replay support, this can be used as a tentative placement guide:
https://patchwork.kernel.org/project/netdevbpf/patch/20210720134655.892334-11-vladimir.oltean@nxp.com/
Cc: Vadym Kochan <vkochan@marvell.com>
Cc: Taras Chornyi <tchornyi@marvell.com>
Cc: Ioana Ciornei <ioana.ciornei@nxp.com>
Cc: Lars Povlsen <lars.povlsen@microchip.com>
Cc: Steen Hegelund <Steen.Hegelund@microchip.com>
Cc: UNGLinuxDriver@microchip.com
Cc: Claudiu Manoil <claudiu.manoil@nxp.com>
Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-21 19:24:03 +03:00
|
|
|
err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL,
|
2021-07-22 18:55:38 +03:00
|
|
|
NULL, NULL, false, extack);
|
net: bridge: switchdev: let drivers inform which bridge ports are offloaded
On reception of an skb, the bridge checks if it was marked as 'already
forwarded in hardware' (checks if skb->offload_fwd_mark == 1), and if it
is, it assigns the source hardware domain of that skb based on the
hardware domain of the ingress port. Then during forwarding, it enforces
that the egress port must have a different hardware domain than the
ingress one (this is done in nbp_switchdev_allowed_egress).
Non-switchdev drivers don't report any physical switch id (neither
through devlink nor .ndo_get_port_parent_id), therefore the bridge
assigns them a hardware domain of 0, and packets coming from them will
always have skb->offload_fwd_mark = 0. So there aren't any restrictions.
Problems appear due to the fact that DSA would like to perform software
fallback for bonding and team interfaces that the physical switch cannot
offload.
+-- br0 ---+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
There, it is desirable that the presence of swp3 and swp4 under a
non-offloaded LAG does not preclude us from doing hardware bridging
beteen swp0, swp1 and swp2. The bandwidth of the CPU is often times high
enough that software bridging between {swp0,swp1,swp2} and bond0 is not
impractical.
But this creates an impossible paradox given the current way in which
port hardware domains are assigned. When the driver receives a packet
from swp0 (say, due to flooding), it must set skb->offload_fwd_mark to
something.
- If we set it to 0, then the bridge will forward it towards swp1, swp2
and bond0. But the switch has already forwarded it towards swp1 and
swp2 (not to bond0, remember, that isn't offloaded, so as far as the
switch is concerned, ports swp3 and swp4 are not looking up the FDB,
and the entire bond0 is a destination that is strictly behind the
CPU). But we don't want duplicated traffic towards swp1 and swp2, so
it's not ok to set skb->offload_fwd_mark = 0.
- If we set it to 1, then the bridge will not forward the skb towards
the ports with the same switchdev mark, i.e. not to swp1, swp2 and
bond0. Towards swp1 and swp2 that's ok, but towards bond0? It should
have forwarded the skb there.
So the real issue is that bond0 will be assigned the same hardware
domain as {swp0,swp1,swp2}, because the function that assigns hardware
domains to bridge ports, nbp_switchdev_add(), recurses through bond0's
lower interfaces until it finds something that implements devlink (calls
dev_get_port_parent_id with bool recurse = true). This is a problem
because the fact that bond0 can be offloaded by swp3 and swp4 in our
example is merely an assumption.
A solution is to give the bridge explicit hints as to what hardware
domain it should use for each port.
Currently, the bridging offload is very 'silent': a driver registers a
netdevice notifier, which is put on the netns's notifier chain, and
which sniffs around for NETDEV_CHANGEUPPER events where the upper is a
bridge, and the lower is an interface it knows about (one registered by
this driver, normally). Then, from within that notifier, it does a bunch
of stuff behind the bridge's back, without the bridge necessarily
knowing that there's somebody offloading that port. It looks like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v
call_netdevice_notifiers
|
v
dsa_slave_netdevice_event
|
v
oh, hey! it's for me!
|
v
.port_bridge_join
What we do to solve the conundrum is to be less silent, and change the
switchdev drivers to present themselves to the bridge. Something like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | hardware domain for
v | this port, and zero
dsa_slave_netdevice_event | if I got nothing.
| |
v |
oh, hey! it's for me! |
| |
v |
.port_bridge_join |
| |
+------------------------+
switchdev_bridge_port_offload(swp0, swp0)
Then stacked interfaces (like bond0 on top of swp3/swp4) would be
treated differently in DSA, depending on whether we can or cannot
offload them.
The offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | switchdev mark for
v | bond0.
dsa_slave_netdevice_event | Coincidentally (or not),
| | bond0 and swp0, swp1, swp2
v | all have the same switchdev
hmm, it's not quite for me, | mark now, since the ASIC
but my driver has already | is able to forward towards
called .port_lag_join | all these ports in hw.
for it, because I have |
a port with dp->lag_dev == bond0. |
| |
v |
.port_bridge_join |
for swp3 and swp4 |
| |
+------------------------+
switchdev_bridge_port_offload(bond0, swp3)
switchdev_bridge_port_offload(bond0, swp4)
And the non-offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge waiting:
call_netdevice_notifiers ^ huh, switchdev_bridge_port_offload
| | wasn't called, okay, I'll use a
v | hwdom of zero for this one.
dsa_slave_netdevice_event : Then packets received on swp0 will
| : not be software-forwarded towards
v : swp1, but they will towards bond0.
it's not for me, but
bond0 is an upper of swp3
and swp4, but their dp->lag_dev
is NULL because they couldn't
offload it.
Basically we can draw the conclusion that the lowers of a bridge port
can come and go, so depending on the configuration of lowers for a
bridge port, it can dynamically toggle between offloaded and unoffloaded.
Therefore, we need an equivalent switchdev_bridge_port_unoffload too.
This patch changes the way any switchdev driver interacts with the
bridge. From now on, everybody needs to call switchdev_bridge_port_offload
and switchdev_bridge_port_unoffload, otherwise the bridge will treat the
port as non-offloaded and allow software flooding to other ports from
the same ASIC.
Note that these functions lay the ground for a more complex handshake
between switchdev drivers and the bridge in the future.
For drivers that will request a replay of the switchdev objects when
they offload and unoffload a bridge port (DSA, dpaa2-switch, ocelot), we
place the call to switchdev_bridge_port_unoffload() strategically inside
the NETDEV_PRECHANGEUPPER notifier's code path, and not inside
NETDEV_CHANGEUPPER. This is because the switchdev object replay helpers
need the netdev adjacency lists to be valid, and that is only true in
NETDEV_PRECHANGEUPPER.
Cc: Vadym Kochan <vkochan@marvell.com>
Cc: Taras Chornyi <tchornyi@marvell.com>
Cc: Ioana Ciornei <ioana.ciornei@nxp.com>
Cc: Lars Povlsen <lars.povlsen@microchip.com>
Cc: Steen Hegelund <Steen.Hegelund@microchip.com>
Cc: UNGLinuxDriver@microchip.com
Cc: Claudiu Manoil <claudiu.manoil@nxp.com>
Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch: regression
Acked-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch
Tested-by: Horatiu Vultur <horatiu.vultur@microchip.com> # ocelot-switch
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-21 19:24:01 +03:00
|
|
|
if (err)
|
|
|
|
goto err_switchdev_offload;
|
|
|
|
|
2020-09-16 19:31:01 +03:00
|
|
|
if (bridge->vlan_enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = prestera_bridge_1d_port_join(br_port);
|
|
|
|
if (err)
|
|
|
|
goto err_port_join;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_port_join:
|
net: bridge: move the switchdev object replay helpers to "push" mode
Starting with commit 4f2673b3a2b6 ("net: bridge: add helper to replay
port and host-joined mdb entries"), DSA has introduced some bridge
helpers that replay switchdev events (FDB/MDB/VLAN additions and
deletions) that can be lost by the switchdev drivers in a variety of
circumstances:
- an IP multicast group was host-joined on the bridge itself before any
switchdev port joined the bridge, leading to the host MDB entries
missing in the hardware database.
- during the bridge creation process, the MAC address of the bridge was
added to the FDB as an entry pointing towards the bridge device
itself, but with no switchdev ports being part of the bridge yet, this
local FDB entry would remain unknown to the switchdev hardware
database.
- a VLAN/FDB/MDB was added to a bridge port that is a LAG interface,
before any switchdev port joined that LAG, leading to the hardware
database missing those entries.
- a switchdev port left a LAG that is a bridge port, while the LAG
remained part of the bridge, and all FDB/MDB/VLAN entries remained
installed in the hardware database of the switchdev port.
Also, since commit 0d2cfbd41c4a ("net: bridge: ignore switchdev events
for LAG ports which didn't request replay"), DSA introduced a method,
based on a const void *ctx, to ensure that two switchdev ports under the
same LAG that is a bridge port do not see the same MDB/VLAN entry being
replayed twice by the bridge, once for every bridge port that joins the
LAG.
With so many ordering corner cases being possible, it seems unreasonable
to expect a switchdev driver writer to get it right from the first try.
Therefore, now that DSA has experimented with the bridge replay helpers
for a little bit, we can move the code to the bridge driver where it is
more readily available to all switchdev drivers.
To convert the switchdev object replay helpers from "pull mode" (where
the driver asks for them) to a "push mode" (where the bridge offers them
automatically), the biggest problem is that the bridge needs to be aware
when a switchdev port joins and leaves, even when the switchdev is only
indirectly a bridge port (for example when the bridge port is a LAG
upper of the switchdev).
Luckily, we already have a hook for that, in the form of the newly
introduced switchdev_bridge_port_offload() and
switchdev_bridge_port_unoffload() calls. These offer a natural place for
hooking the object addition and deletion replays.
Extend the above 2 functions with:
- pointers to the switchdev atomic notifier (for FDB replays) and the
blocking notifier (for MDB and VLAN replays).
- the "const void *ctx" argument required for drivers to be able to
disambiguate between which port is targeted, when multiple ports are
lowers of the same LAG that is a bridge port. Most of the drivers pass
NULL to this argument, except the ones that support LAG offload and have
the proper context check already in place in the switchdev blocking
notifier handler.
Also unexport the replay helpers, since nobody except the bridge calls
them directly now.
Note that:
(a) we abuse the terminology slightly, because FDB entries are not
"switchdev objects", but we count them as objects nonetheless.
With no direct way to prove it, I think they are not modeled as
switchdev objects because those can only be installed by the bridge
to the hardware (as opposed to FDB entries which can be propagated
in the other direction too). This is merely an abuse of terms, FDB
entries are replayed too, despite not being objects.
(b) the bridge does not attempt to sync port attributes to newly joined
ports, just the countable stuff (the objects). The reason for this
is simple: no universal and symmetric way to sync and unsync them is
known. For example, VLAN filtering: what to do on unsync, disable or
leave it enabled? Similarly, STP state, ageing timer, etc etc. What
a switchdev port does when it becomes standalone again is not really
up to the bridge's competence, and the driver should deal with it.
On the other hand, replaying deletions of switchdev objects can be
seen a matter of cleanup and therefore be treated by the bridge,
hence this patch.
We make the replay helpers opt-in for drivers, because they might not
bring immediate benefits for them:
- nbp_vlan_init() is called _after_ netdev_master_upper_dev_link(),
so br_vlan_replay() should not do anything for the new drivers on
which we call it. The existing drivers where there was even a slight
possibility for there to exist a VLAN on a bridge port before they
join it are already guarded against this: mlxsw and prestera deny
joining LAG interfaces that are members of a bridge.
- br_fdb_replay() should now notify of local FDB entries, but I patched
all drivers except DSA to ignore these new entries in commit
2c4eca3ef716 ("net: bridge: switchdev: include local flag in FDB
notifications"). Driver authors can lift this restriction as they
wish, and when they do, they can also opt into the FDB replay
functionality.
- br_mdb_replay() should fix a real issue which is described in commit
4f2673b3a2b6 ("net: bridge: add helper to replay port and host-joined
mdb entries"). However most drivers do not offload the
SWITCHDEV_OBJ_ID_HOST_MDB to see this issue: only cpsw and am65_cpsw
offload this switchdev object, and I don't completely understand the
way in which they offload this switchdev object anyway. So I'll leave
it up to these drivers' respective maintainers to opt into
br_mdb_replay().
So most of the drivers pass NULL notifier blocks for the replay helpers,
except:
- dpaa2-switch which was already acked/regression-tested with the
helpers enabled (and there isn't much of a downside in having them)
- ocelot which already had replay logic in "pull" mode
- DSA which already had replay logic in "pull" mode
An important observation is that the drivers which don't currently
request bridge event replays don't even have the
switchdev_bridge_port_{offload,unoffload} calls placed in proper places
right now. This was done to avoid unnecessary rework for drivers which
might never even add support for this. For driver writers who wish to
add replay support, this can be used as a tentative placement guide:
https://patchwork.kernel.org/project/netdevbpf/patch/20210720134655.892334-11-vladimir.oltean@nxp.com/
Cc: Vadym Kochan <vkochan@marvell.com>
Cc: Taras Chornyi <tchornyi@marvell.com>
Cc: Ioana Ciornei <ioana.ciornei@nxp.com>
Cc: Lars Povlsen <lars.povlsen@microchip.com>
Cc: Steen Hegelund <Steen.Hegelund@microchip.com>
Cc: UNGLinuxDriver@microchip.com
Cc: Claudiu Manoil <claudiu.manoil@nxp.com>
Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-21 19:24:03 +03:00
|
|
|
switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
|
net: bridge: switchdev: let drivers inform which bridge ports are offloaded
On reception of an skb, the bridge checks if it was marked as 'already
forwarded in hardware' (checks if skb->offload_fwd_mark == 1), and if it
is, it assigns the source hardware domain of that skb based on the
hardware domain of the ingress port. Then during forwarding, it enforces
that the egress port must have a different hardware domain than the
ingress one (this is done in nbp_switchdev_allowed_egress).
Non-switchdev drivers don't report any physical switch id (neither
through devlink nor .ndo_get_port_parent_id), therefore the bridge
assigns them a hardware domain of 0, and packets coming from them will
always have skb->offload_fwd_mark = 0. So there aren't any restrictions.
Problems appear due to the fact that DSA would like to perform software
fallback for bonding and team interfaces that the physical switch cannot
offload.
+-- br0 ---+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
There, it is desirable that the presence of swp3 and swp4 under a
non-offloaded LAG does not preclude us from doing hardware bridging
beteen swp0, swp1 and swp2. The bandwidth of the CPU is often times high
enough that software bridging between {swp0,swp1,swp2} and bond0 is not
impractical.
But this creates an impossible paradox given the current way in which
port hardware domains are assigned. When the driver receives a packet
from swp0 (say, due to flooding), it must set skb->offload_fwd_mark to
something.
- If we set it to 0, then the bridge will forward it towards swp1, swp2
and bond0. But the switch has already forwarded it towards swp1 and
swp2 (not to bond0, remember, that isn't offloaded, so as far as the
switch is concerned, ports swp3 and swp4 are not looking up the FDB,
and the entire bond0 is a destination that is strictly behind the
CPU). But we don't want duplicated traffic towards swp1 and swp2, so
it's not ok to set skb->offload_fwd_mark = 0.
- If we set it to 1, then the bridge will not forward the skb towards
the ports with the same switchdev mark, i.e. not to swp1, swp2 and
bond0. Towards swp1 and swp2 that's ok, but towards bond0? It should
have forwarded the skb there.
So the real issue is that bond0 will be assigned the same hardware
domain as {swp0,swp1,swp2}, because the function that assigns hardware
domains to bridge ports, nbp_switchdev_add(), recurses through bond0's
lower interfaces until it finds something that implements devlink (calls
dev_get_port_parent_id with bool recurse = true). This is a problem
because the fact that bond0 can be offloaded by swp3 and swp4 in our
example is merely an assumption.
A solution is to give the bridge explicit hints as to what hardware
domain it should use for each port.
Currently, the bridging offload is very 'silent': a driver registers a
netdevice notifier, which is put on the netns's notifier chain, and
which sniffs around for NETDEV_CHANGEUPPER events where the upper is a
bridge, and the lower is an interface it knows about (one registered by
this driver, normally). Then, from within that notifier, it does a bunch
of stuff behind the bridge's back, without the bridge necessarily
knowing that there's somebody offloading that port. It looks like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v
call_netdevice_notifiers
|
v
dsa_slave_netdevice_event
|
v
oh, hey! it's for me!
|
v
.port_bridge_join
What we do to solve the conundrum is to be less silent, and change the
switchdev drivers to present themselves to the bridge. Something like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | hardware domain for
v | this port, and zero
dsa_slave_netdevice_event | if I got nothing.
| |
v |
oh, hey! it's for me! |
| |
v |
.port_bridge_join |
| |
+------------------------+
switchdev_bridge_port_offload(swp0, swp0)
Then stacked interfaces (like bond0 on top of swp3/swp4) would be
treated differently in DSA, depending on whether we can or cannot
offload them.
The offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | switchdev mark for
v | bond0.
dsa_slave_netdevice_event | Coincidentally (or not),
| | bond0 and swp0, swp1, swp2
v | all have the same switchdev
hmm, it's not quite for me, | mark now, since the ASIC
but my driver has already | is able to forward towards
called .port_lag_join | all these ports in hw.
for it, because I have |
a port with dp->lag_dev == bond0. |
| |
v |
.port_bridge_join |
for swp3 and swp4 |
| |
+------------------------+
switchdev_bridge_port_offload(bond0, swp3)
switchdev_bridge_port_offload(bond0, swp4)
And the non-offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge waiting:
call_netdevice_notifiers ^ huh, switchdev_bridge_port_offload
| | wasn't called, okay, I'll use a
v | hwdom of zero for this one.
dsa_slave_netdevice_event : Then packets received on swp0 will
| : not be software-forwarded towards
v : swp1, but they will towards bond0.
it's not for me, but
bond0 is an upper of swp3
and swp4, but their dp->lag_dev
is NULL because they couldn't
offload it.
Basically we can draw the conclusion that the lowers of a bridge port
can come and go, so depending on the configuration of lowers for a
bridge port, it can dynamically toggle between offloaded and unoffloaded.
Therefore, we need an equivalent switchdev_bridge_port_unoffload too.
This patch changes the way any switchdev driver interacts with the
bridge. From now on, everybody needs to call switchdev_bridge_port_offload
and switchdev_bridge_port_unoffload, otherwise the bridge will treat the
port as non-offloaded and allow software flooding to other ports from
the same ASIC.
Note that these functions lay the ground for a more complex handshake
between switchdev drivers and the bridge in the future.
For drivers that will request a replay of the switchdev objects when
they offload and unoffload a bridge port (DSA, dpaa2-switch, ocelot), we
place the call to switchdev_bridge_port_unoffload() strategically inside
the NETDEV_PRECHANGEUPPER notifier's code path, and not inside
NETDEV_CHANGEUPPER. This is because the switchdev object replay helpers
need the netdev adjacency lists to be valid, and that is only true in
NETDEV_PRECHANGEUPPER.
Cc: Vadym Kochan <vkochan@marvell.com>
Cc: Taras Chornyi <tchornyi@marvell.com>
Cc: Ioana Ciornei <ioana.ciornei@nxp.com>
Cc: Lars Povlsen <lars.povlsen@microchip.com>
Cc: Steen Hegelund <Steen.Hegelund@microchip.com>
Cc: UNGLinuxDriver@microchip.com
Cc: Claudiu Manoil <claudiu.manoil@nxp.com>
Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch: regression
Acked-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch
Tested-by: Horatiu Vultur <horatiu.vultur@microchip.com> # ocelot-switch
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-21 19:24:01 +03:00
|
|
|
err_switchdev_offload:
|
2020-09-16 19:31:01 +03:00
|
|
|
prestera_bridge_port_put(br_port);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_bridge_1q_port_leave(struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(br_port->dev);
|
|
|
|
|
|
|
|
prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
|
|
|
|
prestera_port_pvid_set(port, PRESTERA_DEFAULT_VID);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_bridge_1d_port_leave(struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(br_port->dev);
|
|
|
|
|
|
|
|
prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
|
|
|
|
prestera_hw_bridge_port_delete(port, br_port->bridge->bridge_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
|
|
|
|
u8 state)
|
|
|
|
{
|
|
|
|
u8 hw_state = state;
|
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case BR_STATE_DISABLED:
|
|
|
|
hw_state = PRESTERA_STP_DISABLED;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BR_STATE_BLOCKING:
|
|
|
|
case BR_STATE_LISTENING:
|
|
|
|
hw_state = PRESTERA_STP_BLOCK_LISTEN;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BR_STATE_LEARNING:
|
|
|
|
hw_state = PRESTERA_STP_LEARN;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BR_STATE_FORWARDING:
|
|
|
|
hw_state = PRESTERA_STP_FORWARD;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return prestera_hw_vlan_port_stp_set(port, vid, hw_state);
|
|
|
|
}
|
|
|
|
|
2021-06-10 18:43:10 +03:00
|
|
|
void prestera_bridge_port_leave(struct net_device *br_dev,
|
|
|
|
struct prestera_port *port)
|
2020-09-16 19:31:01 +03:00
|
|
|
{
|
|
|
|
struct prestera_switchdev *swdev = port->sw->swdev;
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
|
2021-06-10 18:43:10 +03:00
|
|
|
bridge = prestera_bridge_by_dev(swdev, br_dev);
|
2020-09-16 19:31:01 +03:00
|
|
|
if (!bridge)
|
|
|
|
return;
|
|
|
|
|
|
|
|
br_port = __prestera_bridge_port_by_dev(bridge, port->dev);
|
|
|
|
if (!br_port)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bridge = br_port->bridge;
|
|
|
|
|
|
|
|
if (bridge->vlan_enabled)
|
|
|
|
prestera_bridge_1q_port_leave(br_port);
|
|
|
|
else
|
|
|
|
prestera_bridge_1d_port_leave(br_port);
|
|
|
|
|
net: bridge: move the switchdev object replay helpers to "push" mode
Starting with commit 4f2673b3a2b6 ("net: bridge: add helper to replay
port and host-joined mdb entries"), DSA has introduced some bridge
helpers that replay switchdev events (FDB/MDB/VLAN additions and
deletions) that can be lost by the switchdev drivers in a variety of
circumstances:
- an IP multicast group was host-joined on the bridge itself before any
switchdev port joined the bridge, leading to the host MDB entries
missing in the hardware database.
- during the bridge creation process, the MAC address of the bridge was
added to the FDB as an entry pointing towards the bridge device
itself, but with no switchdev ports being part of the bridge yet, this
local FDB entry would remain unknown to the switchdev hardware
database.
- a VLAN/FDB/MDB was added to a bridge port that is a LAG interface,
before any switchdev port joined that LAG, leading to the hardware
database missing those entries.
- a switchdev port left a LAG that is a bridge port, while the LAG
remained part of the bridge, and all FDB/MDB/VLAN entries remained
installed in the hardware database of the switchdev port.
Also, since commit 0d2cfbd41c4a ("net: bridge: ignore switchdev events
for LAG ports which didn't request replay"), DSA introduced a method,
based on a const void *ctx, to ensure that two switchdev ports under the
same LAG that is a bridge port do not see the same MDB/VLAN entry being
replayed twice by the bridge, once for every bridge port that joins the
LAG.
With so many ordering corner cases being possible, it seems unreasonable
to expect a switchdev driver writer to get it right from the first try.
Therefore, now that DSA has experimented with the bridge replay helpers
for a little bit, we can move the code to the bridge driver where it is
more readily available to all switchdev drivers.
To convert the switchdev object replay helpers from "pull mode" (where
the driver asks for them) to a "push mode" (where the bridge offers them
automatically), the biggest problem is that the bridge needs to be aware
when a switchdev port joins and leaves, even when the switchdev is only
indirectly a bridge port (for example when the bridge port is a LAG
upper of the switchdev).
Luckily, we already have a hook for that, in the form of the newly
introduced switchdev_bridge_port_offload() and
switchdev_bridge_port_unoffload() calls. These offer a natural place for
hooking the object addition and deletion replays.
Extend the above 2 functions with:
- pointers to the switchdev atomic notifier (for FDB replays) and the
blocking notifier (for MDB and VLAN replays).
- the "const void *ctx" argument required for drivers to be able to
disambiguate between which port is targeted, when multiple ports are
lowers of the same LAG that is a bridge port. Most of the drivers pass
NULL to this argument, except the ones that support LAG offload and have
the proper context check already in place in the switchdev blocking
notifier handler.
Also unexport the replay helpers, since nobody except the bridge calls
them directly now.
Note that:
(a) we abuse the terminology slightly, because FDB entries are not
"switchdev objects", but we count them as objects nonetheless.
With no direct way to prove it, I think they are not modeled as
switchdev objects because those can only be installed by the bridge
to the hardware (as opposed to FDB entries which can be propagated
in the other direction too). This is merely an abuse of terms, FDB
entries are replayed too, despite not being objects.
(b) the bridge does not attempt to sync port attributes to newly joined
ports, just the countable stuff (the objects). The reason for this
is simple: no universal and symmetric way to sync and unsync them is
known. For example, VLAN filtering: what to do on unsync, disable or
leave it enabled? Similarly, STP state, ageing timer, etc etc. What
a switchdev port does when it becomes standalone again is not really
up to the bridge's competence, and the driver should deal with it.
On the other hand, replaying deletions of switchdev objects can be
seen a matter of cleanup and therefore be treated by the bridge,
hence this patch.
We make the replay helpers opt-in for drivers, because they might not
bring immediate benefits for them:
- nbp_vlan_init() is called _after_ netdev_master_upper_dev_link(),
so br_vlan_replay() should not do anything for the new drivers on
which we call it. The existing drivers where there was even a slight
possibility for there to exist a VLAN on a bridge port before they
join it are already guarded against this: mlxsw and prestera deny
joining LAG interfaces that are members of a bridge.
- br_fdb_replay() should now notify of local FDB entries, but I patched
all drivers except DSA to ignore these new entries in commit
2c4eca3ef716 ("net: bridge: switchdev: include local flag in FDB
notifications"). Driver authors can lift this restriction as they
wish, and when they do, they can also opt into the FDB replay
functionality.
- br_mdb_replay() should fix a real issue which is described in commit
4f2673b3a2b6 ("net: bridge: add helper to replay port and host-joined
mdb entries"). However most drivers do not offload the
SWITCHDEV_OBJ_ID_HOST_MDB to see this issue: only cpsw and am65_cpsw
offload this switchdev object, and I don't completely understand the
way in which they offload this switchdev object anyway. So I'll leave
it up to these drivers' respective maintainers to opt into
br_mdb_replay().
So most of the drivers pass NULL notifier blocks for the replay helpers,
except:
- dpaa2-switch which was already acked/regression-tested with the
helpers enabled (and there isn't much of a downside in having them)
- ocelot which already had replay logic in "pull" mode
- DSA which already had replay logic in "pull" mode
An important observation is that the drivers which don't currently
request bridge event replays don't even have the
switchdev_bridge_port_{offload,unoffload} calls placed in proper places
right now. This was done to avoid unnecessary rework for drivers which
might never even add support for this. For driver writers who wish to
add replay support, this can be used as a tentative placement guide:
https://patchwork.kernel.org/project/netdevbpf/patch/20210720134655.892334-11-vladimir.oltean@nxp.com/
Cc: Vadym Kochan <vkochan@marvell.com>
Cc: Taras Chornyi <tchornyi@marvell.com>
Cc: Ioana Ciornei <ioana.ciornei@nxp.com>
Cc: Lars Povlsen <lars.povlsen@microchip.com>
Cc: Steen Hegelund <Steen.Hegelund@microchip.com>
Cc: UNGLinuxDriver@microchip.com
Cc: Claudiu Manoil <claudiu.manoil@nxp.com>
Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-21 19:24:03 +03:00
|
|
|
switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
|
net: bridge: switchdev: let drivers inform which bridge ports are offloaded
On reception of an skb, the bridge checks if it was marked as 'already
forwarded in hardware' (checks if skb->offload_fwd_mark == 1), and if it
is, it assigns the source hardware domain of that skb based on the
hardware domain of the ingress port. Then during forwarding, it enforces
that the egress port must have a different hardware domain than the
ingress one (this is done in nbp_switchdev_allowed_egress).
Non-switchdev drivers don't report any physical switch id (neither
through devlink nor .ndo_get_port_parent_id), therefore the bridge
assigns them a hardware domain of 0, and packets coming from them will
always have skb->offload_fwd_mark = 0. So there aren't any restrictions.
Problems appear due to the fact that DSA would like to perform software
fallback for bonding and team interfaces that the physical switch cannot
offload.
+-- br0 ---+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
There, it is desirable that the presence of swp3 and swp4 under a
non-offloaded LAG does not preclude us from doing hardware bridging
beteen swp0, swp1 and swp2. The bandwidth of the CPU is often times high
enough that software bridging between {swp0,swp1,swp2} and bond0 is not
impractical.
But this creates an impossible paradox given the current way in which
port hardware domains are assigned. When the driver receives a packet
from swp0 (say, due to flooding), it must set skb->offload_fwd_mark to
something.
- If we set it to 0, then the bridge will forward it towards swp1, swp2
and bond0. But the switch has already forwarded it towards swp1 and
swp2 (not to bond0, remember, that isn't offloaded, so as far as the
switch is concerned, ports swp3 and swp4 are not looking up the FDB,
and the entire bond0 is a destination that is strictly behind the
CPU). But we don't want duplicated traffic towards swp1 and swp2, so
it's not ok to set skb->offload_fwd_mark = 0.
- If we set it to 1, then the bridge will not forward the skb towards
the ports with the same switchdev mark, i.e. not to swp1, swp2 and
bond0. Towards swp1 and swp2 that's ok, but towards bond0? It should
have forwarded the skb there.
So the real issue is that bond0 will be assigned the same hardware
domain as {swp0,swp1,swp2}, because the function that assigns hardware
domains to bridge ports, nbp_switchdev_add(), recurses through bond0's
lower interfaces until it finds something that implements devlink (calls
dev_get_port_parent_id with bool recurse = true). This is a problem
because the fact that bond0 can be offloaded by swp3 and swp4 in our
example is merely an assumption.
A solution is to give the bridge explicit hints as to what hardware
domain it should use for each port.
Currently, the bridging offload is very 'silent': a driver registers a
netdevice notifier, which is put on the netns's notifier chain, and
which sniffs around for NETDEV_CHANGEUPPER events where the upper is a
bridge, and the lower is an interface it knows about (one registered by
this driver, normally). Then, from within that notifier, it does a bunch
of stuff behind the bridge's back, without the bridge necessarily
knowing that there's somebody offloading that port. It looks like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v
call_netdevice_notifiers
|
v
dsa_slave_netdevice_event
|
v
oh, hey! it's for me!
|
v
.port_bridge_join
What we do to solve the conundrum is to be less silent, and change the
switchdev drivers to present themselves to the bridge. Something like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | hardware domain for
v | this port, and zero
dsa_slave_netdevice_event | if I got nothing.
| |
v |
oh, hey! it's for me! |
| |
v |
.port_bridge_join |
| |
+------------------------+
switchdev_bridge_port_offload(swp0, swp0)
Then stacked interfaces (like bond0 on top of swp3/swp4) would be
treated differently in DSA, depending on whether we can or cannot
offload them.
The offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | switchdev mark for
v | bond0.
dsa_slave_netdevice_event | Coincidentally (or not),
| | bond0 and swp0, swp1, swp2
v | all have the same switchdev
hmm, it's not quite for me, | mark now, since the ASIC
but my driver has already | is able to forward towards
called .port_lag_join | all these ports in hw.
for it, because I have |
a port with dp->lag_dev == bond0. |
| |
v |
.port_bridge_join |
for swp3 and swp4 |
| |
+------------------------+
switchdev_bridge_port_offload(bond0, swp3)
switchdev_bridge_port_offload(bond0, swp4)
And the non-offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge waiting:
call_netdevice_notifiers ^ huh, switchdev_bridge_port_offload
| | wasn't called, okay, I'll use a
v | hwdom of zero for this one.
dsa_slave_netdevice_event : Then packets received on swp0 will
| : not be software-forwarded towards
v : swp1, but they will towards bond0.
it's not for me, but
bond0 is an upper of swp3
and swp4, but their dp->lag_dev
is NULL because they couldn't
offload it.
Basically we can draw the conclusion that the lowers of a bridge port
can come and go, so depending on the configuration of lowers for a
bridge port, it can dynamically toggle between offloaded and unoffloaded.
Therefore, we need an equivalent switchdev_bridge_port_unoffload too.
This patch changes the way any switchdev driver interacts with the
bridge. From now on, everybody needs to call switchdev_bridge_port_offload
and switchdev_bridge_port_unoffload, otherwise the bridge will treat the
port as non-offloaded and allow software flooding to other ports from
the same ASIC.
Note that these functions lay the ground for a more complex handshake
between switchdev drivers and the bridge in the future.
For drivers that will request a replay of the switchdev objects when
they offload and unoffload a bridge port (DSA, dpaa2-switch, ocelot), we
place the call to switchdev_bridge_port_unoffload() strategically inside
the NETDEV_PRECHANGEUPPER notifier's code path, and not inside
NETDEV_CHANGEUPPER. This is because the switchdev object replay helpers
need the netdev adjacency lists to be valid, and that is only true in
NETDEV_PRECHANGEUPPER.
Cc: Vadym Kochan <vkochan@marvell.com>
Cc: Taras Chornyi <tchornyi@marvell.com>
Cc: Ioana Ciornei <ioana.ciornei@nxp.com>
Cc: Lars Povlsen <lars.povlsen@microchip.com>
Cc: Steen Hegelund <Steen.Hegelund@microchip.com>
Cc: UNGLinuxDriver@microchip.com
Cc: Claudiu Manoil <claudiu.manoil@nxp.com>
Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch: regression
Acked-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch
Tested-by: Horatiu Vultur <horatiu.vultur@microchip.com> # ocelot-switch
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-21 19:24:01 +03:00
|
|
|
|
2022-07-11 14:28:22 +03:00
|
|
|
prestera_mdb_flush_bridge_port(br_port);
|
|
|
|
|
2022-07-11 14:28:19 +03:00
|
|
|
prestera_br_port_flags_reset(br_port, port);
|
2020-09-16 19:31:01 +03:00
|
|
|
prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
|
|
|
|
prestera_bridge_port_put(br_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_attr_br_flags_set(struct prestera_port *port,
|
|
|
|
struct net_device *dev,
|
net: switchdev: pass flags and mask to both {PRE_,}BRIDGE_FLAGS attributes
This switchdev attribute offers a counterproductive API for a driver
writer, because although br_switchdev_set_port_flag gets passed a
"flags" and a "mask", those are passed piecemeal to the driver, so while
the PRE_BRIDGE_FLAGS listener knows what changed because it has the
"mask", the BRIDGE_FLAGS listener doesn't, because it only has the final
value. But certain drivers can offload only certain combinations of
settings, like for example they cannot change unicast flooding
independently of multicast flooding - they must be both on or both off.
The way the information is passed to switchdev makes drivers not
expressive enough, and unable to reject this request ahead of time, in
the PRE_BRIDGE_FLAGS notifier, so they are forced to reject it during
the deferred BRIDGE_FLAGS attribute, where the rejection is currently
ignored.
This patch also changes drivers to make use of the "mask" field for edge
detection when possible.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-02-12 17:15:55 +02:00
|
|
|
struct switchdev_brport_flags flags)
|
2020-09-16 19:31:01 +03:00
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
|
|
|
|
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
|
|
|
|
if (!br_port)
|
|
|
|
return 0;
|
|
|
|
|
2022-07-11 14:28:19 +03:00
|
|
|
br_port->flags &= ~flags.mask;
|
|
|
|
br_port->flags |= flags.val & flags.mask;
|
|
|
|
return prestera_br_port_flags_set(br_port, port);
|
2020-09-16 19:31:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
|
|
|
|
unsigned long ageing_clock_t)
|
|
|
|
{
|
|
|
|
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
|
|
|
|
u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
|
|
|
|
struct prestera_switch *sw = port->sw;
|
|
|
|
|
net: switchdev: remove the transaction structure from port attributes
Since the introduction of the switchdev API, port attributes were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
attribute notifier structures, and converts drivers to not look at this
member.
In part, this patch contains a revert of my previous commit 2e554a7a5d8a
("net: dsa: propagate switchdev vlan_filtering prepare phase to
drivers").
For the most part, the conversion was trivial except for:
- Rocker's world implementation based on Broadcom OF-DPA had an odd
implementation of ofdpa_port_attr_bridge_flags_set. The conversion was
done mechanically, by pasting the implementation twice, then only
keeping the code that would get executed during prepare phase on top,
then only keeping the code that gets executed during the commit phase
on bottom, then simplifying the resulting code until this was obtained.
- DSA's offloading of STP state, bridge flags, VLAN filtering and
multicast router could be converted right away. But the ageing time
could not, so a shim was introduced and this was left for a further
commit.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Linus Walleij <linus.walleij@linaro.org> # RTL8366RB
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:50 +02:00
|
|
|
if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
|
|
|
|
ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
|
|
|
|
return -ERANGE;
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
|
|
|
|
struct net_device *dev,
|
|
|
|
bool vlan_enabled)
|
|
|
|
{
|
|
|
|
struct prestera_switch *sw = port->sw;
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
|
|
|
|
bridge = prestera_bridge_by_dev(sw->swdev, dev);
|
|
|
|
if (WARN_ON(!bridge))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (bridge->vlan_enabled == vlan_enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
netdev_err(bridge->dev, "VLAN filtering can't be changed for existing bridge\n");
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
|
|
|
|
struct prestera_bridge_vlan *br_vlan,
|
|
|
|
u8 state)
|
|
|
|
{
|
|
|
|
struct prestera_port_vlan *port_vlan;
|
|
|
|
|
|
|
|
list_for_each_entry(port_vlan, &br_vlan->port_vlan_list, br_vlan_head) {
|
|
|
|
if (port_vlan->port != port)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
return prestera_port_vid_stp_set(port, br_vlan->vid, state);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-01-08 21:06:22 -08:00
|
|
|
static int prestera_port_attr_stp_state_set(struct prestera_port *port,
|
|
|
|
struct net_device *dev,
|
|
|
|
u8 state)
|
2020-09-16 19:31:01 +03:00
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_bridge_vlan *br_vlan;
|
|
|
|
int err;
|
|
|
|
u16 vid;
|
|
|
|
|
|
|
|
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
|
|
|
|
if (!br_port)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!br_port->bridge->vlan_enabled) {
|
|
|
|
vid = br_port->bridge->bridge_id;
|
|
|
|
err = prestera_port_vid_stp_set(port, vid, state);
|
|
|
|
if (err)
|
|
|
|
goto err_port_stp_set;
|
|
|
|
} else {
|
|
|
|
list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
|
|
|
|
err = prestera_port_bridge_vlan_stp_set(port, br_vlan,
|
|
|
|
state);
|
|
|
|
if (err)
|
|
|
|
goto err_port_vlan_stp_set;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
br_port->stp_state = state;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_port_vlan_stp_set:
|
|
|
|
list_for_each_entry_continue_reverse(br_vlan, &br_port->vlan_list, head)
|
|
|
|
prestera_port_bridge_vlan_stp_set(port, br_vlan, br_port->stp_state);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err_port_stp_set:
|
|
|
|
prestera_port_vid_stp_set(port, vid, br_port->stp_state);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2022-07-11 14:28:22 +03:00
|
|
|
static int
|
|
|
|
prestera_br_port_lag_mdb_mc_enable_sync(struct prestera_bridge_port *br_port,
|
|
|
|
bool enabled)
|
|
|
|
{
|
|
|
|
struct prestera_port *pr_port;
|
|
|
|
struct prestera_switch *sw;
|
|
|
|
u16 lag_id;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
pr_port = prestera_port_dev_lower_find(br_port->dev);
|
|
|
|
if (!pr_port)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
sw = pr_port->sw;
|
|
|
|
err = prestera_lag_id(sw, br_port->dev, &lag_id);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
list_for_each_entry(pr_port, &sw->port_list, list) {
|
|
|
|
if (pr_port->lag->lag_id == lag_id) {
|
|
|
|
err = prestera_port_mc_flood_set(pr_port, enabled);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_br_mdb_mc_enable_sync(struct prestera_bridge *br_dev)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_port *port;
|
|
|
|
bool enabled;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* if mrouter exists:
|
|
|
|
* - make sure every mrouter receives unreg mcast traffic;
|
|
|
|
* if mrouter doesn't exists:
|
|
|
|
* - make sure every port receives unreg mcast traffic;
|
|
|
|
*/
|
|
|
|
list_for_each_entry(br_port, &br_dev->port_list, head) {
|
|
|
|
if (br_dev->multicast_enabled && br_dev->mrouter_exist)
|
|
|
|
enabled = br_port->mrouter;
|
|
|
|
else
|
|
|
|
enabled = br_port->flags & BR_MCAST_FLOOD;
|
|
|
|
|
|
|
|
if (netif_is_lag_master(br_port->dev)) {
|
|
|
|
err = prestera_br_port_lag_mdb_mc_enable_sync(br_port,
|
|
|
|
enabled);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
port = prestera_port_dev_lower_find(br_port->dev);
|
|
|
|
if (!port)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
err = prestera_port_mc_flood_set(port, enabled);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
prestera_br_mdb_port_is_member(struct prestera_br_mdb_entry *br_mdb,
|
|
|
|
struct net_device *orig_dev)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_port *tmp_port;
|
|
|
|
|
|
|
|
list_for_each_entry(tmp_port, &br_mdb->br_mdb_port_list,
|
|
|
|
br_mdb_port_node)
|
|
|
|
if (tmp_port->br_port->dev == orig_dev)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
prestera_mdb_port_add(struct prestera_mdb_entry *mdb,
|
|
|
|
struct net_device *orig_dev,
|
|
|
|
const unsigned char addr[ETH_ALEN], u16 vid)
|
|
|
|
{
|
|
|
|
struct prestera_flood_domain *flood_domain = mdb->flood_domain;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!prestera_flood_domain_port_find(flood_domain,
|
|
|
|
orig_dev, vid)) {
|
|
|
|
err = prestera_flood_domain_port_create(flood_domain, orig_dev,
|
|
|
|
vid);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sync bridge mdb (software table) with HW table (if MC is enabled). */
|
|
|
|
static int prestera_br_mdb_sync(struct prestera_bridge *br_dev)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_port *br_mdb_port;
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_br_mdb_entry *br_mdb;
|
|
|
|
struct prestera_mdb_entry *mdb;
|
|
|
|
struct prestera_port *pr_port;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!br_dev->multicast_enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
|
|
|
|
br_mdb_entry_node) {
|
|
|
|
mdb = br_mdb->mdb;
|
|
|
|
/* Make sure every port that explicitly been added to the mdb
|
|
|
|
* joins the specified group.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
|
|
|
|
br_mdb_port_node) {
|
|
|
|
br_port = br_mdb_port->br_port;
|
|
|
|
pr_port = prestera_port_dev_lower_find(br_port->dev);
|
|
|
|
|
|
|
|
/* Match only mdb and br_mdb ports that belong to the
|
|
|
|
* same broadcast domain.
|
|
|
|
*/
|
|
|
|
if (br_dev->vlan_enabled &&
|
|
|
|
!prestera_port_vlan_by_vid(pr_port,
|
|
|
|
mdb->vid))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* If port is not in MDB or there's no Mrouter
|
|
|
|
* clear HW mdb.
|
|
|
|
*/
|
|
|
|
if (prestera_br_mdb_port_is_member(br_mdb,
|
|
|
|
br_mdb_port->br_port->dev) &&
|
|
|
|
br_dev->mrouter_exist)
|
|
|
|
err = prestera_mdb_port_add(mdb, br_port->dev,
|
|
|
|
mdb->addr,
|
|
|
|
mdb->vid);
|
|
|
|
else
|
|
|
|
prestera_mdb_port_del(mdb, br_port->dev);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure that every mrouter port joins every MC group int
|
|
|
|
* broadcast domain. If it's not an mrouter - it should leave
|
|
|
|
*/
|
|
|
|
list_for_each_entry(br_port, &br_dev->port_list, head) {
|
|
|
|
pr_port = prestera_port_dev_lower_find(br_port->dev);
|
|
|
|
|
|
|
|
/* Make sure mrouter woudln't receive traffci from
|
|
|
|
* another broadcast domain (e.g. from a vlan, which
|
|
|
|
* mrouter port is not a member of).
|
|
|
|
*/
|
|
|
|
if (br_dev->vlan_enabled &&
|
|
|
|
!prestera_port_vlan_by_vid(pr_port,
|
|
|
|
mdb->vid))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (br_port->mrouter) {
|
|
|
|
err = prestera_mdb_port_add(mdb, br_port->dev,
|
|
|
|
mdb->addr,
|
|
|
|
mdb->vid);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
} else if (!br_port->mrouter &&
|
|
|
|
!prestera_br_mdb_port_is_member
|
|
|
|
(br_mdb, br_port->dev)) {
|
|
|
|
prestera_mdb_port_del(mdb, br_port->dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
prestera_mdb_enable_set(struct prestera_br_mdb_entry *br_mdb, bool enable)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (enable != br_mdb->enabled) {
|
|
|
|
if (enable)
|
|
|
|
err = prestera_hw_mdb_create(br_mdb->mdb);
|
|
|
|
else
|
|
|
|
err = prestera_hw_mdb_destroy(br_mdb->mdb);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
br_mdb->enabled = enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
prestera_br_mdb_enable_set(struct prestera_bridge *br_dev, bool enable)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_entry *br_mdb;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
|
|
|
|
br_mdb_entry_node) {
|
|
|
|
err = prestera_mdb_enable_set(br_mdb, enable);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_attr_br_mc_disabled_set(struct prestera_port *port,
|
|
|
|
struct net_device *orig_dev,
|
|
|
|
bool mc_disabled)
|
|
|
|
{
|
|
|
|
struct prestera_switch *sw = port->sw;
|
|
|
|
struct prestera_bridge *br_dev;
|
|
|
|
|
|
|
|
br_dev = prestera_bridge_find(sw, orig_dev);
|
|
|
|
if (!br_dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
br_dev->multicast_enabled = !mc_disabled;
|
|
|
|
|
|
|
|
/* There's no point in enabling mdb back if router is missing. */
|
|
|
|
WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
|
|
|
|
br_dev->mrouter_exist));
|
|
|
|
|
|
|
|
WARN_ON(prestera_br_mdb_sync(br_dev));
|
|
|
|
|
|
|
|
WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
prestera_bridge_mdb_mc_mrouter_exists(struct prestera_bridge *br_dev)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
|
|
|
|
list_for_each_entry(br_port, &br_dev->port_list, head)
|
|
|
|
if (br_port->mrouter)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
prestera_port_attr_mrouter_set(struct prestera_port *port,
|
|
|
|
struct net_device *orig_dev,
|
|
|
|
bool is_port_mrouter)
|
|
|
|
{
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_bridge *br_dev;
|
|
|
|
|
|
|
|
br_port = prestera_bridge_port_find(port->sw, orig_dev);
|
|
|
|
if (!br_port)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
br_dev = br_port->bridge;
|
|
|
|
br_port->mrouter = is_port_mrouter;
|
|
|
|
|
|
|
|
br_dev->mrouter_exist = prestera_bridge_mdb_mc_mrouter_exists(br_dev);
|
|
|
|
|
|
|
|
/* Enable MDB processing if both mrouter exists and mc is enabled.
|
|
|
|
* In case if MC enabled, but there is no mrouter, device would flood
|
|
|
|
* all multicast traffic (even if MDB table is not empty) with the use
|
|
|
|
* of bridge's flood capabilities (without the use of flood_domain).
|
|
|
|
*/
|
|
|
|
WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
|
|
|
|
br_dev->mrouter_exist));
|
|
|
|
|
|
|
|
WARN_ON(prestera_br_mdb_sync(br_dev));
|
|
|
|
|
|
|
|
WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-27 14:54:24 +03:00
|
|
|
static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
|
2021-02-12 17:15:51 +02:00
|
|
|
const struct switchdev_attr *attr,
|
|
|
|
struct netlink_ext_ack *extack)
|
2020-09-16 19:31:01 +03:00
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
switch (attr->id) {
|
|
|
|
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
|
2021-01-08 21:06:22 -08:00
|
|
|
err = prestera_port_attr_stp_state_set(port, attr->orig_dev,
|
|
|
|
attr->u.stp_state);
|
2020-09-16 19:31:01 +03:00
|
|
|
break;
|
|
|
|
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
|
net: switchdev: pass flags and mask to both {PRE_,}BRIDGE_FLAGS attributes
This switchdev attribute offers a counterproductive API for a driver
writer, because although br_switchdev_set_port_flag gets passed a
"flags" and a "mask", those are passed piecemeal to the driver, so while
the PRE_BRIDGE_FLAGS listener knows what changed because it has the
"mask", the BRIDGE_FLAGS listener doesn't, because it only has the final
value. But certain drivers can offload only certain combinations of
settings, like for example they cannot change unicast flooding
independently of multicast flooding - they must be both on or both off.
The way the information is passed to switchdev makes drivers not
expressive enough, and unable to reject this request ahead of time, in
the PRE_BRIDGE_FLAGS notifier, so they are forced to reject it during
the deferred BRIDGE_FLAGS attribute, where the rejection is currently
ignored.
This patch also changes drivers to make use of the "mask" field for edge
detection when possible.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-02-12 17:15:55 +02:00
|
|
|
if (attr->u.brport_flags.mask &
|
2022-08-22 21:03:15 +03:00
|
|
|
~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_PORT_LOCKED))
|
2020-09-16 19:31:01 +03:00
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
|
net: switchdev: remove the transaction structure from port attributes
Since the introduction of the switchdev API, port attributes were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
attribute notifier structures, and converts drivers to not look at this
member.
In part, this patch contains a revert of my previous commit 2e554a7a5d8a
("net: dsa: propagate switchdev vlan_filtering prepare phase to
drivers").
For the most part, the conversion was trivial except for:
- Rocker's world implementation based on Broadcom OF-DPA had an odd
implementation of ofdpa_port_attr_bridge_flags_set. The conversion was
done mechanically, by pasting the implementation twice, then only
keeping the code that would get executed during prepare phase on top,
then only keeping the code that gets executed during the commit phase
on bottom, then simplifying the resulting code until this was obtained.
- DSA's offloading of STP state, bridge flags, VLAN filtering and
multicast router could be converted right away. But the ageing time
could not, so a shim was introduced and this was left for a further
commit.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Linus Walleij <linus.walleij@linaro.org> # RTL8366RB
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:50 +02:00
|
|
|
err = prestera_port_attr_br_flags_set(port, attr->orig_dev,
|
2020-09-16 19:31:01 +03:00
|
|
|
attr->u.brport_flags);
|
|
|
|
break;
|
|
|
|
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
|
net: switchdev: remove the transaction structure from port attributes
Since the introduction of the switchdev API, port attributes were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
attribute notifier structures, and converts drivers to not look at this
member.
In part, this patch contains a revert of my previous commit 2e554a7a5d8a
("net: dsa: propagate switchdev vlan_filtering prepare phase to
drivers").
For the most part, the conversion was trivial except for:
- Rocker's world implementation based on Broadcom OF-DPA had an odd
implementation of ofdpa_port_attr_bridge_flags_set. The conversion was
done mechanically, by pasting the implementation twice, then only
keeping the code that would get executed during prepare phase on top,
then only keeping the code that gets executed during the commit phase
on bottom, then simplifying the resulting code until this was obtained.
- DSA's offloading of STP state, bridge flags, VLAN filtering and
multicast router could be converted right away. But the ageing time
could not, so a shim was introduced and this was left for a further
commit.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Linus Walleij <linus.walleij@linaro.org> # RTL8366RB
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:50 +02:00
|
|
|
err = prestera_port_attr_br_ageing_set(port,
|
2020-09-16 19:31:01 +03:00
|
|
|
attr->u.ageing_time);
|
|
|
|
break;
|
|
|
|
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
|
net: switchdev: remove the transaction structure from port attributes
Since the introduction of the switchdev API, port attributes were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
attribute notifier structures, and converts drivers to not look at this
member.
In part, this patch contains a revert of my previous commit 2e554a7a5d8a
("net: dsa: propagate switchdev vlan_filtering prepare phase to
drivers").
For the most part, the conversion was trivial except for:
- Rocker's world implementation based on Broadcom OF-DPA had an odd
implementation of ofdpa_port_attr_bridge_flags_set. The conversion was
done mechanically, by pasting the implementation twice, then only
keeping the code that would get executed during prepare phase on top,
then only keeping the code that gets executed during the commit phase
on bottom, then simplifying the resulting code until this was obtained.
- DSA's offloading of STP state, bridge flags, VLAN filtering and
multicast router could be converted right away. But the ageing time
could not, so a shim was introduced and this was left for a further
commit.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Linus Walleij <linus.walleij@linaro.org> # RTL8366RB
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:50 +02:00
|
|
|
err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
|
2020-09-16 19:31:01 +03:00
|
|
|
attr->u.vlan_filtering);
|
|
|
|
break;
|
2022-07-11 14:28:22 +03:00
|
|
|
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
|
|
|
|
err = prestera_port_attr_mrouter_set(port, attr->orig_dev,
|
|
|
|
attr->u.mrouter);
|
|
|
|
break;
|
|
|
|
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
|
|
|
|
err = prestera_port_attr_br_mc_disabled_set(port, attr->orig_dev,
|
|
|
|
attr->u.mc_disabled);
|
|
|
|
break;
|
2020-09-16 19:31:01 +03:00
|
|
|
default:
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
prestera_fdb_offload_notify(struct prestera_port *port,
|
|
|
|
struct switchdev_notifier_fdb_info *info)
|
|
|
|
{
|
2021-08-10 14:50:24 +03:00
|
|
|
struct switchdev_notifier_fdb_info send_info = {};
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
send_info.addr = info->addr;
|
|
|
|
send_info.vid = info->vid;
|
|
|
|
send_info.offloaded = true;
|
|
|
|
|
|
|
|
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, port->dev,
|
|
|
|
&send_info.info, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_fdb_set(struct prestera_port *port,
|
|
|
|
struct switchdev_notifier_fdb_info *fdb_info,
|
|
|
|
bool adding)
|
|
|
|
{
|
|
|
|
struct prestera_switch *sw = port->sw;
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
int err;
|
|
|
|
u16 vid;
|
|
|
|
|
|
|
|
br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
|
|
|
|
if (!br_port)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
bridge = br_port->bridge;
|
|
|
|
|
|
|
|
if (bridge->vlan_enabled)
|
|
|
|
vid = fdb_info->vid;
|
|
|
|
else
|
|
|
|
vid = bridge->bridge_id;
|
|
|
|
|
|
|
|
if (adding)
|
2021-06-10 18:43:11 +03:00
|
|
|
err = prestera_fdb_add(port, fdb_info->addr, vid, false);
|
2020-09-16 19:31:01 +03:00
|
|
|
else
|
2021-06-10 18:43:11 +03:00
|
|
|
err = prestera_fdb_del(port, fdb_info->addr, vid);
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_fdb_event_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct switchdev_notifier_fdb_info *fdb_info;
|
|
|
|
struct prestera_fdb_event_work *swdev_work;
|
|
|
|
struct prestera_port *port;
|
|
|
|
struct net_device *dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
swdev_work = container_of(work, struct prestera_fdb_event_work, work);
|
|
|
|
dev = swdev_work->dev;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
|
|
|
|
port = prestera_port_dev_lower_find(dev);
|
|
|
|
if (!port)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
switch (swdev_work->event) {
|
|
|
|
case SWITCHDEV_FDB_ADD_TO_DEVICE:
|
|
|
|
fdb_info = &swdev_work->fdb_info;
|
2021-04-14 19:52:56 +03:00
|
|
|
if (!fdb_info->added_by_user || fdb_info->is_local)
|
2020-09-16 19:31:01 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
err = prestera_port_fdb_set(port, fdb_info, true);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
|
|
|
|
prestera_fdb_offload_notify(port, fdb_info);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SWITCHDEV_FDB_DEL_TO_DEVICE:
|
|
|
|
fdb_info = &swdev_work->fdb_info;
|
|
|
|
prestera_port_fdb_set(port, fdb_info, false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
kfree(swdev_work->fdb_info.addr);
|
|
|
|
kfree(swdev_work);
|
|
|
|
dev_put(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_switchdev_event(struct notifier_block *unused,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
|
|
|
|
struct switchdev_notifier_fdb_info *fdb_info;
|
|
|
|
struct switchdev_notifier_info *info = ptr;
|
|
|
|
struct prestera_fdb_event_work *swdev_work;
|
|
|
|
struct net_device *upper;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (event == SWITCHDEV_PORT_ATTR_SET) {
|
|
|
|
err = switchdev_handle_port_attr_set(dev, ptr,
|
|
|
|
prestera_netdev_check,
|
|
|
|
prestera_port_obj_attr_set);
|
|
|
|
return notifier_from_errno(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!prestera_netdev_check(dev))
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
upper = netdev_master_upper_dev_get_rcu(dev);
|
|
|
|
if (!upper)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
if (!netif_is_bridge_master(upper))
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
swdev_work = kzalloc(sizeof(*swdev_work), GFP_ATOMIC);
|
|
|
|
if (!swdev_work)
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
|
|
|
|
swdev_work->event = event;
|
|
|
|
swdev_work->dev = dev;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case SWITCHDEV_FDB_ADD_TO_DEVICE:
|
|
|
|
case SWITCHDEV_FDB_DEL_TO_DEVICE:
|
|
|
|
fdb_info = container_of(info,
|
|
|
|
struct switchdev_notifier_fdb_info,
|
|
|
|
info);
|
|
|
|
|
|
|
|
INIT_WORK(&swdev_work->work, prestera_fdb_event_work);
|
|
|
|
memcpy(&swdev_work->fdb_info, ptr,
|
|
|
|
sizeof(swdev_work->fdb_info));
|
|
|
|
|
|
|
|
swdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
|
|
|
|
if (!swdev_work->fdb_info.addr)
|
|
|
|
goto out_bad;
|
|
|
|
|
|
|
|
ether_addr_copy((u8 *)swdev_work->fdb_info.addr,
|
|
|
|
fdb_info->addr);
|
|
|
|
dev_hold(dev);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
kfree(swdev_work);
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
queue_work(swdev_wq, &swdev_work->work);
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
out_bad:
|
|
|
|
kfree(swdev_work);
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
|
|
|
|
struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = port_vlan->port;
|
|
|
|
struct prestera_bridge_vlan *br_vlan;
|
|
|
|
u16 vid = port_vlan->vid;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (port_vlan->br_port)
|
|
|
|
return 0;
|
|
|
|
|
2022-07-11 14:28:19 +03:00
|
|
|
err = prestera_br_port_flags_set(br_port, port);
|
2020-09-16 19:31:01 +03:00
|
|
|
if (err)
|
2022-07-11 14:28:19 +03:00
|
|
|
goto err_flags2port_set;
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
|
|
|
|
if (err)
|
|
|
|
goto err_port_vid_stp_set;
|
|
|
|
|
|
|
|
br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
|
|
|
|
if (!br_vlan) {
|
|
|
|
br_vlan = prestera_bridge_vlan_create(br_port, vid);
|
|
|
|
if (!br_vlan) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_bridge_vlan_get;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add(&port_vlan->br_vlan_head, &br_vlan->port_vlan_list);
|
|
|
|
|
|
|
|
prestera_bridge_port_get(br_port);
|
|
|
|
port_vlan->br_port = br_port;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_bridge_vlan_get:
|
|
|
|
prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
|
|
|
|
err_port_vid_stp_set:
|
2022-07-11 14:28:19 +03:00
|
|
|
prestera_br_port_flags_reset(br_port, port);
|
|
|
|
err_flags2port_set:
|
2020-09-16 19:31:01 +03:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
prestera_bridge_port_vlan_add(struct prestera_port *port,
|
|
|
|
struct prestera_bridge_port *br_port,
|
|
|
|
u16 vid, bool is_untagged, bool is_pvid,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct prestera_port_vlan *port_vlan;
|
|
|
|
u16 old_pvid = port->pvid;
|
|
|
|
u16 pvid;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (is_pvid)
|
|
|
|
pvid = vid;
|
|
|
|
else
|
|
|
|
pvid = port->pvid == vid ? 0 : port->pvid;
|
|
|
|
|
|
|
|
port_vlan = prestera_port_vlan_by_vid(port, vid);
|
|
|
|
if (port_vlan && port_vlan->br_port != br_port)
|
|
|
|
return -EEXIST;
|
|
|
|
|
|
|
|
if (!port_vlan) {
|
|
|
|
port_vlan = prestera_port_vlan_create(port, vid, is_untagged);
|
|
|
|
if (IS_ERR(port_vlan))
|
|
|
|
return PTR_ERR(port_vlan);
|
|
|
|
} else {
|
|
|
|
err = prestera_hw_vlan_port_set(port, vid, true, is_untagged);
|
|
|
|
if (err)
|
|
|
|
goto err_port_vlan_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = prestera_port_pvid_set(port, pvid);
|
|
|
|
if (err)
|
|
|
|
goto err_port_pvid_set;
|
|
|
|
|
|
|
|
err = prestera_port_vlan_bridge_join(port_vlan, br_port);
|
|
|
|
if (err)
|
|
|
|
goto err_port_vlan_bridge_join;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_port_vlan_bridge_join:
|
|
|
|
prestera_port_pvid_set(port, old_pvid);
|
|
|
|
err_port_pvid_set:
|
|
|
|
prestera_hw_vlan_port_set(port, vid, false, false);
|
|
|
|
err_port_vlan_set:
|
|
|
|
prestera_port_vlan_destroy(port_vlan);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
prestera_bridge_port_vlan_del(struct prestera_port *port,
|
|
|
|
struct prestera_bridge_port *br_port, u16 vid)
|
|
|
|
{
|
|
|
|
u16 pvid = port->pvid == vid ? 0 : port->pvid;
|
|
|
|
struct prestera_port_vlan *port_vlan;
|
|
|
|
|
|
|
|
port_vlan = prestera_port_vlan_by_vid(port, vid);
|
|
|
|
if (WARN_ON(!port_vlan))
|
|
|
|
return;
|
|
|
|
|
|
|
|
prestera_port_vlan_bridge_leave(port_vlan);
|
|
|
|
prestera_port_pvid_set(port, pvid);
|
|
|
|
prestera_port_vlan_destroy(port_vlan);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_vlans_add(struct prestera_port *port,
|
|
|
|
const struct switchdev_obj_port_vlan *vlan,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
|
|
|
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
|
2021-06-10 18:43:11 +03:00
|
|
|
struct net_device *orig_dev = vlan->obj.orig_dev;
|
2020-09-16 19:31:01 +03:00
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_switch *sw = port->sw;
|
|
|
|
struct prestera_bridge *bridge;
|
|
|
|
|
2021-06-10 18:43:11 +03:00
|
|
|
if (netif_is_bridge_master(orig_dev))
|
2020-09-16 19:31:01 +03:00
|
|
|
return 0;
|
|
|
|
|
2021-06-10 18:43:11 +03:00
|
|
|
br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
|
2020-09-16 19:31:01 +03:00
|
|
|
if (WARN_ON(!br_port))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
bridge = br_port->bridge;
|
|
|
|
if (!bridge->vlan_enabled)
|
|
|
|
return 0;
|
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
return prestera_bridge_port_vlan_add(port, br_port,
|
2021-01-14 10:35:56 +02:00
|
|
|
vlan->vid, flag_untagged,
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
flag_pvid, extack);
|
2020-09-16 19:31:01 +03:00
|
|
|
}
|
|
|
|
|
2022-07-11 14:28:22 +03:00
|
|
|
static struct prestera_br_mdb_entry *
|
|
|
|
prestera_br_mdb_entry_create(struct prestera_switch *sw,
|
|
|
|
struct prestera_bridge *br_dev,
|
|
|
|
const unsigned char *addr, u16 vid)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_entry *br_mdb_entry;
|
|
|
|
struct prestera_mdb_entry *mdb_entry;
|
|
|
|
|
|
|
|
br_mdb_entry = kzalloc(sizeof(*br_mdb_entry), GFP_KERNEL);
|
|
|
|
if (!br_mdb_entry)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
mdb_entry = prestera_mdb_entry_create(sw, addr, vid);
|
|
|
|
if (!mdb_entry)
|
|
|
|
goto err_mdb_alloc;
|
|
|
|
|
|
|
|
br_mdb_entry->mdb = mdb_entry;
|
|
|
|
br_mdb_entry->bridge = br_dev;
|
|
|
|
br_mdb_entry->enabled = true;
|
|
|
|
INIT_LIST_HEAD(&br_mdb_entry->br_mdb_port_list);
|
|
|
|
|
|
|
|
list_add(&br_mdb_entry->br_mdb_entry_node, &br_dev->br_mdb_entry_list);
|
|
|
|
|
|
|
|
return br_mdb_entry;
|
|
|
|
|
|
|
|
err_mdb_alloc:
|
|
|
|
kfree(br_mdb_entry);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_br_mdb_port_add(struct prestera_br_mdb_entry *br_mdb,
|
|
|
|
struct prestera_bridge_port *br_port)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_port *br_mdb_port;
|
|
|
|
|
|
|
|
list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
|
|
|
|
br_mdb_port_node)
|
|
|
|
if (br_mdb_port->br_port == br_port)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
br_mdb_port = kzalloc(sizeof(*br_mdb_port), GFP_KERNEL);
|
|
|
|
if (!br_mdb_port)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
br_mdb_port->br_port = br_port;
|
|
|
|
list_add(&br_mdb_port->br_mdb_port_node,
|
|
|
|
&br_mdb->br_mdb_port_list);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_br_mdb_entry *
|
|
|
|
prestera_br_mdb_entry_find(struct prestera_bridge *br_dev,
|
|
|
|
const unsigned char *addr, u16 vid)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_entry *br_mdb;
|
|
|
|
|
|
|
|
list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
|
|
|
|
br_mdb_entry_node)
|
|
|
|
if (ether_addr_equal(&br_mdb->mdb->addr[0], addr) &&
|
|
|
|
vid == br_mdb->mdb->vid)
|
|
|
|
return br_mdb;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_br_mdb_entry *
|
|
|
|
prestera_br_mdb_entry_get(struct prestera_switch *sw,
|
|
|
|
struct prestera_bridge *br_dev,
|
|
|
|
const unsigned char *addr, u16 vid)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_entry *br_mdb;
|
|
|
|
|
|
|
|
br_mdb = prestera_br_mdb_entry_find(br_dev, addr, vid);
|
|
|
|
if (br_mdb)
|
|
|
|
return br_mdb;
|
|
|
|
|
|
|
|
return prestera_br_mdb_entry_create(sw, br_dev, addr, vid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
prestera_mdb_port_addr_obj_add(const struct switchdev_obj_port_mdb *mdb)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_entry *br_mdb;
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_bridge *br_dev;
|
|
|
|
struct prestera_switch *sw;
|
|
|
|
struct prestera_port *port;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
sw = prestera_switch_get(mdb->obj.orig_dev);
|
|
|
|
port = prestera_port_dev_lower_find(mdb->obj.orig_dev);
|
|
|
|
|
|
|
|
br_port = prestera_bridge_port_find(sw, mdb->obj.orig_dev);
|
|
|
|
if (!br_port)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
br_dev = br_port->bridge;
|
|
|
|
|
|
|
|
if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (mdb->vid)
|
|
|
|
br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
|
|
|
|
mdb->vid);
|
|
|
|
else
|
|
|
|
br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
|
|
|
|
br_dev->bridge_id);
|
|
|
|
|
|
|
|
if (!br_mdb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Make sure newly allocated MDB entry gets disabled if either MC is
|
|
|
|
* disabled, or the mrouter does not exist.
|
|
|
|
*/
|
|
|
|
WARN_ON(prestera_mdb_enable_set(br_mdb, br_dev->multicast_enabled &&
|
|
|
|
br_dev->mrouter_exist));
|
|
|
|
|
|
|
|
err = prestera_br_mdb_port_add(br_mdb, br_port);
|
|
|
|
if (err) {
|
|
|
|
prestera_br_mdb_entry_put(br_mdb);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = prestera_br_mdb_sync(br_dev);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-27 14:54:24 +03:00
|
|
|
static int prestera_port_obj_add(struct net_device *dev, const void *ctx,
|
2020-09-16 19:31:01 +03:00
|
|
|
const struct switchdev_obj *obj,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
|
|
|
const struct switchdev_obj_port_vlan *vlan;
|
2022-07-11 14:28:22 +03:00
|
|
|
const struct switchdev_obj_port_mdb *mdb;
|
|
|
|
int err = 0;
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
switch (obj->id) {
|
|
|
|
case SWITCHDEV_OBJ_ID_PORT_VLAN:
|
|
|
|
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:48 +02:00
|
|
|
return prestera_port_vlans_add(port, vlan, extack);
|
2022-07-11 14:28:22 +03:00
|
|
|
case SWITCHDEV_OBJ_ID_PORT_MDB:
|
|
|
|
mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
|
|
|
|
err = prestera_mdb_port_addr_obj_add(mdb);
|
|
|
|
break;
|
|
|
|
case SWITCHDEV_OBJ_ID_HOST_MDB:
|
|
|
|
fallthrough;
|
2020-09-16 19:31:01 +03:00
|
|
|
default:
|
2022-07-11 14:28:22 +03:00
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
break;
|
2020-09-16 19:31:01 +03:00
|
|
|
}
|
2022-07-11 14:28:22 +03:00
|
|
|
|
|
|
|
return err;
|
2020-09-16 19:31:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_vlans_del(struct prestera_port *port,
|
|
|
|
const struct switchdev_obj_port_vlan *vlan)
|
|
|
|
{
|
2021-06-10 18:43:11 +03:00
|
|
|
struct net_device *orig_dev = vlan->obj.orig_dev;
|
2020-09-16 19:31:01 +03:00
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_switch *sw = port->sw;
|
|
|
|
|
2021-06-10 18:43:11 +03:00
|
|
|
if (netif_is_bridge_master(orig_dev))
|
2020-09-16 19:31:01 +03:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2021-06-10 18:43:11 +03:00
|
|
|
br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
|
2020-09-16 19:31:01 +03:00
|
|
|
if (WARN_ON(!br_port))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!br_port->bridge->vlan_enabled)
|
|
|
|
return 0;
|
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
prestera_bridge_port_vlan_del(port, br_port, vlan->vid);
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-07-11 14:28:22 +03:00
|
|
|
static int
|
|
|
|
prestera_mdb_port_addr_obj_del(struct prestera_port *port,
|
|
|
|
const struct switchdev_obj_port_mdb *mdb)
|
|
|
|
{
|
|
|
|
struct prestera_br_mdb_entry *br_mdb;
|
|
|
|
struct prestera_bridge_port *br_port;
|
|
|
|
struct prestera_bridge *br_dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Bridge port no longer exists - and so does this MDB entry */
|
|
|
|
br_port = prestera_bridge_port_find(port->sw, mdb->obj.orig_dev);
|
|
|
|
if (!br_port)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Removing MDB with non-existing VLAN - not supported; */
|
|
|
|
if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
br_dev = br_port->bridge;
|
|
|
|
|
|
|
|
if (br_port->bridge->vlan_enabled)
|
|
|
|
br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
|
|
|
|
mdb->vid);
|
|
|
|
else
|
|
|
|
br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
|
|
|
|
br_port->bridge->bridge_id);
|
|
|
|
|
|
|
|
if (!br_mdb)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Since there might be a situation that this port was the last in the
|
|
|
|
* MDB group, we have to both remove this port from software and HW MDB,
|
|
|
|
* sync MDB table, and then destroy software MDB (if needed).
|
|
|
|
*/
|
|
|
|
prestera_br_mdb_port_del(br_mdb, br_port);
|
|
|
|
|
|
|
|
prestera_br_mdb_entry_put(br_mdb);
|
|
|
|
|
|
|
|
err = prestera_br_mdb_sync(br_dev);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-27 14:54:24 +03:00
|
|
|
static int prestera_port_obj_del(struct net_device *dev, const void *ctx,
|
2020-09-16 19:31:01 +03:00
|
|
|
const struct switchdev_obj *obj)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
2022-07-11 14:28:22 +03:00
|
|
|
const struct switchdev_obj_port_mdb *mdb;
|
|
|
|
int err = 0;
|
2020-09-16 19:31:01 +03:00
|
|
|
|
|
|
|
switch (obj->id) {
|
|
|
|
case SWITCHDEV_OBJ_ID_PORT_VLAN:
|
|
|
|
return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
|
2022-07-11 14:28:22 +03:00
|
|
|
case SWITCHDEV_OBJ_ID_PORT_MDB:
|
|
|
|
mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
|
|
|
|
err = prestera_mdb_port_addr_obj_del(port, mdb);
|
|
|
|
break;
|
2020-09-16 19:31:01 +03:00
|
|
|
default:
|
2022-07-11 14:28:22 +03:00
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
break;
|
2020-09-16 19:31:01 +03:00
|
|
|
}
|
2022-07-11 14:28:22 +03:00
|
|
|
|
|
|
|
return err;
|
2020-09-16 19:31:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_switchdev_blk_event(struct notifier_block *unused,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case SWITCHDEV_PORT_OBJ_ADD:
|
|
|
|
err = switchdev_handle_port_obj_add(dev, ptr,
|
|
|
|
prestera_netdev_check,
|
|
|
|
prestera_port_obj_add);
|
|
|
|
break;
|
|
|
|
case SWITCHDEV_PORT_OBJ_DEL:
|
|
|
|
err = switchdev_handle_port_obj_del(dev, ptr,
|
|
|
|
prestera_netdev_check,
|
|
|
|
prestera_port_obj_del);
|
|
|
|
break;
|
|
|
|
case SWITCHDEV_PORT_ATTR_SET:
|
|
|
|
err = switchdev_handle_port_attr_set(dev, ptr,
|
|
|
|
prestera_netdev_check,
|
|
|
|
prestera_port_obj_attr_set);
|
|
|
|
break;
|
|
|
|
default:
|
2021-11-18 21:48:03 +02:00
|
|
|
return NOTIFY_DONE;
|
2020-09-16 19:31:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return notifier_from_errno(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_fdb_event(struct prestera_switch *sw,
|
|
|
|
struct prestera_event *evt, void *arg)
|
|
|
|
{
|
2021-08-10 14:50:24 +03:00
|
|
|
struct switchdev_notifier_fdb_info info = {};
|
2021-06-10 18:43:11 +03:00
|
|
|
struct net_device *dev = NULL;
|
2020-09-16 19:31:01 +03:00
|
|
|
struct prestera_port *port;
|
2021-06-10 18:43:11 +03:00
|
|
|
struct prestera_lag *lag;
|
2020-09-16 19:31:01 +03:00
|
|
|
|
2021-06-10 18:43:11 +03:00
|
|
|
switch (evt->fdb_evt.type) {
|
|
|
|
case PRESTERA_FDB_ENTRY_TYPE_REG_PORT:
|
|
|
|
port = prestera_find_port(sw, evt->fdb_evt.dest.port_id);
|
|
|
|
if (port)
|
|
|
|
dev = port->dev;
|
|
|
|
break;
|
|
|
|
case PRESTERA_FDB_ENTRY_TYPE_LAG:
|
|
|
|
lag = prestera_lag_by_id(sw, evt->fdb_evt.dest.lag_id);
|
|
|
|
if (lag)
|
|
|
|
dev = lag->dev;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dev)
|
2020-09-16 19:31:01 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
info.addr = evt->fdb_evt.data.mac;
|
|
|
|
info.vid = evt->fdb_evt.vid;
|
|
|
|
info.offloaded = true;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
|
|
|
|
switch (evt->id) {
|
|
|
|
case PRESTERA_FDB_EVENT_LEARNED:
|
|
|
|
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
|
2021-06-10 18:43:11 +03:00
|
|
|
dev, &info.info, NULL);
|
2020-09-16 19:31:01 +03:00
|
|
|
break;
|
|
|
|
case PRESTERA_FDB_EVENT_AGED:
|
|
|
|
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
|
2021-06-10 18:43:11 +03:00
|
|
|
dev, &info.info, NULL);
|
2020-09-16 19:31:01 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_fdb_init(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_FDB,
|
|
|
|
prestera_fdb_event, NULL);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = prestera_hw_switch_ageing_set(sw, PRESTERA_DEFAULT_AGEING_TIME_MS);
|
|
|
|
if (err)
|
|
|
|
goto err_ageing_set;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_ageing_set:
|
|
|
|
prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
|
|
|
|
prestera_fdb_event);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_fdb_fini(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
|
|
|
|
prestera_fdb_event);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_switchdev_handler_init(struct prestera_switchdev *swdev)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
swdev->swdev_nb.notifier_call = prestera_switchdev_event;
|
|
|
|
err = register_switchdev_notifier(&swdev->swdev_nb);
|
|
|
|
if (err)
|
|
|
|
goto err_register_swdev_notifier;
|
|
|
|
|
|
|
|
swdev->swdev_nb_blk.notifier_call = prestera_switchdev_blk_event;
|
|
|
|
err = register_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
|
|
|
|
if (err)
|
|
|
|
goto err_register_blk_swdev_notifier;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_register_blk_swdev_notifier:
|
|
|
|
unregister_switchdev_notifier(&swdev->swdev_nb);
|
|
|
|
err_register_swdev_notifier:
|
|
|
|
destroy_workqueue(swdev_wq);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_switchdev_handler_fini(struct prestera_switchdev *swdev)
|
|
|
|
{
|
|
|
|
unregister_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
|
|
|
|
unregister_switchdev_notifier(&swdev->swdev_nb);
|
|
|
|
}
|
|
|
|
|
|
|
|
int prestera_switchdev_init(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
struct prestera_switchdev *swdev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
swdev = kzalloc(sizeof(*swdev), GFP_KERNEL);
|
|
|
|
if (!swdev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
sw->swdev = swdev;
|
|
|
|
swdev->sw = sw;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&swdev->bridge_list);
|
|
|
|
|
|
|
|
swdev_wq = alloc_ordered_workqueue("%s_ordered", 0, "prestera_br");
|
|
|
|
if (!swdev_wq) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_alloc_wq;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = prestera_switchdev_handler_init(swdev);
|
|
|
|
if (err)
|
|
|
|
goto err_swdev_init;
|
|
|
|
|
|
|
|
err = prestera_fdb_init(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_fdb_init;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_fdb_init:
|
|
|
|
err_swdev_init:
|
|
|
|
destroy_workqueue(swdev_wq);
|
|
|
|
err_alloc_wq:
|
|
|
|
kfree(swdev);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void prestera_switchdev_fini(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
struct prestera_switchdev *swdev = sw->swdev;
|
|
|
|
|
|
|
|
prestera_fdb_fini(sw);
|
|
|
|
prestera_switchdev_handler_fini(swdev);
|
|
|
|
destroy_workqueue(swdev_wq);
|
|
|
|
kfree(swdev);
|
|
|
|
}
|