2018-11-20 15:55:09 -08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Microchip KSZ9477 switch driver main logic
|
|
|
|
*
|
2019-02-22 16:36:47 -08:00
|
|
|
* Copyright (C) 2017-2019 Microchip Technology Inc.
|
2018-11-20 15:55:09 -08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
2019-02-22 16:36:48 -08:00
|
|
|
#include <linux/iopoll.h>
|
2018-11-20 15:55:09 -08:00
|
|
|
#include <linux/platform_data/microchip-ksz.h>
|
|
|
|
#include <linux/phy.h>
|
|
|
|
#include <linux/if_bridge.h>
|
2022-03-08 14:58:57 +01:00
|
|
|
#include <linux/if_vlan.h>
|
2018-11-20 15:55:09 -08:00
|
|
|
#include <net/dsa.h>
|
|
|
|
#include <net/switchdev.h>
|
|
|
|
|
2018-11-20 15:55:10 -08:00
|
|
|
#include "ksz9477_reg.h"
|
2019-02-22 16:36:48 -08:00
|
|
|
#include "ksz_common.h"
|
2022-06-22 14:34:23 +05:30
|
|
|
#include "ksz9477.h"
|
2018-11-20 15:55:09 -08:00
|
|
|
|
2019-06-26 01:43:42 +02:00
|
|
|
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
|
|
|
|
{
|
2023-05-26 09:34:42 +02:00
|
|
|
regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
|
2019-06-26 01:43:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
|
|
|
|
bool set)
|
|
|
|
{
|
2023-05-26 09:34:42 +02:00
|
|
|
regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
|
2019-06-26 01:43:48 +02:00
|
|
|
bits, set ? bits : 0);
|
2019-06-26 01:43:42 +02:00
|
|
|
}
|
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
|
|
|
|
{
|
2023-05-26 09:34:42 +02:00
|
|
|
regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0);
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
|
|
|
|
u32 bits, bool set)
|
|
|
|
{
|
2023-05-26 09:34:42 +02:00
|
|
|
regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset),
|
2019-06-26 01:43:48 +02:00
|
|
|
bits, set ? bits : 0);
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
|
2022-03-08 14:58:57 +01:00
|
|
|
{
|
2022-12-05 06:22:28 +01:00
|
|
|
u16 frame_size;
|
2022-03-08 14:58:57 +01:00
|
|
|
|
2022-12-05 06:22:28 +01:00
|
|
|
if (!dsa_is_cpu_port(dev->ds, port))
|
|
|
|
return 0;
|
2022-03-08 14:58:57 +01:00
|
|
|
|
2022-12-05 06:22:28 +01:00
|
|
|
frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
|
2022-03-08 14:58:57 +01:00
|
|
|
|
2023-05-26 09:34:42 +02:00
|
|
|
return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2,
|
2022-12-05 06:22:28 +01:00
|
|
|
REG_SW_MTU_MASK, frame_size);
|
2022-03-08 14:58:57 +01:00
|
|
|
}
|
|
|
|
|
2023-10-23 11:33:38 +02:00
|
|
|
/**
|
|
|
|
* ksz9477_handle_wake_reason - Handle wake reason on a specified port.
|
|
|
|
* @dev: The device structure.
|
|
|
|
* @port: The port number.
|
|
|
|
*
|
|
|
|
* This function reads the PME (Power Management Event) status register of a
|
|
|
|
* specified port to determine the wake reason. If there is no wake event, it
|
|
|
|
* returns early. Otherwise, it logs the wake reason which could be due to a
|
|
|
|
* "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
|
|
|
|
* is then cleared to acknowledge the handling of the wake event.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, or an error code on failure.
|
|
|
|
*/
|
|
|
|
static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port)
|
|
|
|
{
|
|
|
|
u8 pme_status;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (!pme_status)
|
|
|
|
return 0;
|
|
|
|
|
2023-10-26 07:10:47 +02:00
|
|
|
dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
|
|
|
|
pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
|
2023-10-23 11:33:38 +02:00
|
|
|
pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
|
2023-10-26 07:54:08 +01:00
|
|
|
pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
|
2023-10-23 11:33:38 +02:00
|
|
|
|
|
|
|
return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ksz9477_get_wol - Get Wake-on-LAN settings for a specified port.
|
|
|
|
* @dev: The device structure.
|
|
|
|
* @port: The port number.
|
|
|
|
* @wol: Pointer to ethtool Wake-on-LAN settings structure.
|
|
|
|
*
|
|
|
|
* This function checks the PME Pin Control Register to see if PME Pin Output
|
|
|
|
* Enable is set, indicating PME is enabled. If enabled, it sets the supported
|
|
|
|
* and active WoL flags.
|
|
|
|
*/
|
|
|
|
void ksz9477_get_wol(struct ksz_device *dev, int port,
|
|
|
|
struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
u8 pme_ctrl;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!dev->wakeup_source)
|
|
|
|
return;
|
|
|
|
|
|
|
|
wol->supported = WAKE_PHY;
|
|
|
|
|
2023-10-26 07:10:47 +02:00
|
|
|
/* Check if the current MAC address on this port can be set
|
|
|
|
* as global for WAKE_MAGIC support. The result may vary
|
|
|
|
* dynamically based on other ports configurations.
|
|
|
|
*/
|
|
|
|
if (ksz_is_port_mac_global_usable(dev->ds, port))
|
|
|
|
wol->supported |= WAKE_MAGIC;
|
|
|
|
|
2023-10-23 11:33:38 +02:00
|
|
|
ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl);
|
|
|
|
if (ret)
|
|
|
|
return;
|
|
|
|
|
2023-10-26 07:10:47 +02:00
|
|
|
if (pme_ctrl & PME_WOL_MAGICPKT)
|
|
|
|
wol->wolopts |= WAKE_MAGIC;
|
2023-10-23 11:33:38 +02:00
|
|
|
if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
|
|
|
|
wol->wolopts |= WAKE_PHY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ksz9477_set_wol - Set Wake-on-LAN settings for a specified port.
|
|
|
|
* @dev: The device structure.
|
|
|
|
* @port: The port number.
|
|
|
|
* @wol: Pointer to ethtool Wake-on-LAN settings structure.
|
|
|
|
*
|
|
|
|
* This function configures Wake-on-LAN (WoL) settings for a specified port.
|
|
|
|
* It validates the provided WoL options, checks if PME is enabled via the
|
|
|
|
* switch's PME Pin Control Register, clears any previous wake reasons,
|
|
|
|
* and sets the Magic Packet flag in the port's PME control register if
|
|
|
|
* specified.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, or other error codes on failure.
|
|
|
|
*/
|
|
|
|
int ksz9477_set_wol(struct ksz_device *dev, int port,
|
|
|
|
struct ethtool_wolinfo *wol)
|
|
|
|
{
|
2023-10-26 07:10:47 +02:00
|
|
|
u8 pme_ctrl = 0, pme_ctrl_old = 0;
|
|
|
|
bool magic_switched_off;
|
|
|
|
bool magic_switched_on;
|
2023-10-23 11:33:38 +02:00
|
|
|
int ret;
|
|
|
|
|
2023-10-26 07:10:47 +02:00
|
|
|
if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
|
2023-10-23 11:33:38 +02:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!dev->wakeup_source)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
ret = ksz9477_handle_wake_reason(dev, port);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2023-10-26 07:10:47 +02:00
|
|
|
if (wol->wolopts & WAKE_MAGIC)
|
|
|
|
pme_ctrl |= PME_WOL_MAGICPKT;
|
2023-10-23 11:33:38 +02:00
|
|
|
if (wol->wolopts & WAKE_PHY)
|
|
|
|
pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
|
|
|
|
|
2023-10-26 07:10:47 +02:00
|
|
|
ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (pme_ctrl_old == pme_ctrl)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
|
|
|
|
!(pme_ctrl & PME_WOL_MAGICPKT);
|
|
|
|
magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
|
|
|
|
(pme_ctrl & PME_WOL_MAGICPKT);
|
|
|
|
|
|
|
|
/* To keep reference count of MAC address, we should do this
|
|
|
|
* operation only on change of WOL settings.
|
|
|
|
*/
|
|
|
|
if (magic_switched_on) {
|
|
|
|
ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} else if (magic_switched_off) {
|
|
|
|
ksz_switch_macaddr_put(dev->ds);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl);
|
|
|
|
if (ret) {
|
|
|
|
if (magic_switched_on)
|
|
|
|
ksz_switch_macaddr_put(dev->ds);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2023-10-23 11:33:38 +02:00
|
|
|
}
|
|
|
|
|
2023-10-26 07:10:51 +02:00
|
|
|
/**
|
|
|
|
* ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while
|
|
|
|
* considering Wake-on-LAN (WoL) settings.
|
|
|
|
* @dev: The switch device structure.
|
|
|
|
* @wol_enabled: Pointer to a boolean which will be set to true if WoL is
|
|
|
|
* enabled on any port.
|
|
|
|
*
|
|
|
|
* This function prepares the switch device for a safe shutdown while taking
|
|
|
|
* into account the Wake-on-LAN (WoL) settings on the user ports. It updates
|
|
|
|
* the wol_enabled flag accordingly to reflect whether WoL is active on any
|
|
|
|
* port.
|
|
|
|
*/
|
|
|
|
void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
|
|
|
|
{
|
|
|
|
struct dsa_port *dp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
*wol_enabled = false;
|
|
|
|
|
|
|
|
if (!dev->wakeup_source)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dsa_switch_for_each_user_port(dp, dev->ds) {
|
|
|
|
u8 pme_ctrl = 0;
|
|
|
|
|
|
|
|
ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl);
|
|
|
|
if (!ret && pme_ctrl)
|
|
|
|
*wol_enabled = true;
|
|
|
|
|
|
|
|
/* make sure there are no pending wake events which would
|
|
|
|
* prevent the device from going to sleep/shutdown.
|
|
|
|
*/
|
|
|
|
ksz9477_handle_wake_reason(dev, dp->index);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now we are save to enable PME pin. */
|
|
|
|
if (*wol_enabled)
|
|
|
|
ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE);
|
|
|
|
}
|
|
|
|
|
2019-06-27 23:55:53 +02:00
|
|
|
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
2019-06-27 23:55:53 +02:00
|
|
|
unsigned int val;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
2023-05-26 09:34:42 +02:00
|
|
|
return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL,
|
2019-06-27 23:55:53 +02:00
|
|
|
val, !(val & VLAN_START), 10, 1000);
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
|
|
|
|
u32 *vlan_table)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&dev->vlan_mutex);
|
|
|
|
|
|
|
|
ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
|
|
|
|
ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
|
|
|
|
|
|
|
|
/* wait to be cleared */
|
2019-06-27 23:55:53 +02:00
|
|
|
ret = ksz9477_wait_vlan_ctrl_ready(dev);
|
|
|
|
if (ret) {
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to read vlan table\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
|
|
|
|
ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
|
|
|
|
ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
|
|
|
|
|
|
|
|
ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
|
|
|
|
|
|
|
|
exit:
|
|
|
|
mutex_unlock(&dev->vlan_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
|
|
|
|
u32 *vlan_table)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&dev->vlan_mutex);
|
|
|
|
|
|
|
|
ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
|
|
|
|
ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
|
|
|
|
ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
|
|
|
|
|
|
|
|
ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
|
|
|
|
ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
|
|
|
|
|
|
|
|
/* wait to be cleared */
|
2019-06-27 23:55:53 +02:00
|
|
|
ret = ksz9477_wait_vlan_ctrl_ready(dev);
|
|
|
|
if (ret) {
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to write vlan table\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
|
|
|
|
|
|
|
|
/* update vlan cache table */
|
|
|
|
dev->vlan_cache[vid].table[0] = vlan_table[0];
|
|
|
|
dev->vlan_cache[vid].table[1] = vlan_table[1];
|
|
|
|
dev->vlan_cache[vid].table[2] = vlan_table[2];
|
|
|
|
|
|
|
|
exit:
|
|
|
|
mutex_unlock(&dev->vlan_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
|
|
|
|
{
|
|
|
|
ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
|
|
|
|
ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
|
|
|
|
ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
|
|
|
|
ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
|
|
|
|
{
|
|
|
|
ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
|
|
|
|
ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
|
|
|
|
ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
|
|
|
|
ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
|
|
|
|
}
|
|
|
|
|
2019-06-27 23:55:54 +02:00
|
|
|
static int ksz9477_wait_alu_ready(struct ksz_device *dev)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
2019-06-27 23:55:54 +02:00
|
|
|
unsigned int val;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
2023-05-26 09:34:42 +02:00
|
|
|
return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4,
|
2019-06-27 23:55:54 +02:00
|
|
|
val, !(val & ALU_START), 10, 1000);
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
2019-06-27 23:55:55 +02:00
|
|
|
static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
2019-06-27 23:55:55 +02:00
|
|
|
unsigned int val;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
2023-05-26 09:34:42 +02:00
|
|
|
return regmap_read_poll_timeout(ksz_regmap_32(dev),
|
2019-06-27 23:55:55 +02:00
|
|
|
REG_SW_ALU_STAT_CTRL__4,
|
|
|
|
val, !(val & ALU_STAT_START),
|
|
|
|
10, 1000);
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_reset_switch(struct ksz_device *dev)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
u8 data8;
|
|
|
|
u32 data32;
|
|
|
|
|
|
|
|
/* reset switch */
|
|
|
|
ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
|
|
|
|
|
|
|
|
/* turn off SPI DO Edge select */
|
2023-05-26 09:34:42 +02:00
|
|
|
regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0,
|
2019-06-27 23:55:56 +02:00
|
|
|
SPI_AUTO_EDGE_DETECTION, 0);
|
2018-11-20 15:55:09 -08:00
|
|
|
|
|
|
|
/* default configuration */
|
|
|
|
ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
|
|
|
|
data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
|
|
|
|
SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
|
|
|
|
ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
|
|
|
|
|
|
|
|
/* disable interrupts */
|
|
|
|
ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
|
|
|
|
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
|
|
|
|
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
|
|
|
|
|
2022-08-26 12:56:25 +02:00
|
|
|
/* KSZ9893 compatible chips do not support refclk configuration */
|
|
|
|
if (dev->chip_id == KSZ9893_CHIP_ID ||
|
2022-11-07 14:59:18 +05:30
|
|
|
dev->chip_id == KSZ8563_CHIP_ID ||
|
|
|
|
dev->chip_id == KSZ9563_CHIP_ID)
|
2022-08-26 12:56:25 +02:00
|
|
|
return 0;
|
|
|
|
|
2022-01-27 10:41:56 -06:00
|
|
|
data8 = SW_ENABLE_REFCLKO;
|
|
|
|
if (dev->synclko_disable)
|
|
|
|
data8 = 0;
|
|
|
|
else if (dev->synclko_125)
|
|
|
|
data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
|
|
|
|
ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
|
2019-06-12 14:49:06 -06:00
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
|
2019-02-22 16:36:48 -08:00
|
|
|
{
|
|
|
|
struct ksz_port *p = &dev->ports[port];
|
2019-06-27 23:55:52 +02:00
|
|
|
unsigned int val;
|
2019-02-22 16:36:48 -08:00
|
|
|
u32 data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* retain the flush/freeze bit */
|
|
|
|
data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
|
|
|
|
data |= MIB_COUNTER_READ;
|
|
|
|
data |= (addr << MIB_COUNTER_INDEX_S);
|
|
|
|
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
|
|
|
|
|
2023-05-26 09:34:42 +02:00
|
|
|
ret = regmap_read_poll_timeout(ksz_regmap_32(dev),
|
2019-06-27 23:55:52 +02:00
|
|
|
PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
|
|
|
|
val, !(val & MIB_COUNTER_READ), 10, 1000);
|
2019-02-22 16:36:48 -08:00
|
|
|
/* failed to read MIB. get out of loop */
|
2019-06-27 23:55:52 +02:00
|
|
|
if (ret) {
|
2019-02-22 16:36:48 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to get MIB\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* count resets upon read */
|
|
|
|
ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
|
|
|
|
*cnt += data;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
|
|
|
|
u64 *dropped, u64 *cnt)
|
2019-02-22 16:36:48 -08:00
|
|
|
{
|
2022-05-17 15:13:28 +05:30
|
|
|
addr = dev->info->mib_names[addr].index;
|
2019-02-22 16:36:48 -08:00
|
|
|
ksz9477_r_mib_cnt(dev, port, addr, cnt);
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
|
2019-02-22 16:36:48 -08:00
|
|
|
{
|
|
|
|
u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
|
|
|
|
struct ksz_port *p = &dev->ports[port];
|
|
|
|
|
|
|
|
/* enable/disable the port for flush/freeze function */
|
|
|
|
mutex_lock(&p->mib.cnt_mutex);
|
|
|
|
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
|
|
|
|
|
|
|
|
/* used by MIB counter reading code to know freeze is enabled */
|
|
|
|
p->freeze = freeze;
|
|
|
|
mutex_unlock(&p->mib.cnt_mutex);
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
|
2019-02-22 16:36:48 -08:00
|
|
|
{
|
|
|
|
struct ksz_port_mib *mib = &dev->ports[port].mib;
|
|
|
|
|
|
|
|
/* flush all enabled port MIB counters */
|
|
|
|
mutex_lock(&mib->cnt_mutex);
|
|
|
|
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
|
|
|
|
MIB_COUNTER_FLUSH_FREEZE);
|
|
|
|
ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
|
|
|
|
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
|
|
|
|
mutex_unlock(&mib->cnt_mutex);
|
|
|
|
}
|
|
|
|
|
2022-08-26 12:56:20 +02:00
|
|
|
static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
|
|
|
|
u16 *data)
|
|
|
|
{
|
|
|
|
/* KSZ8563R do not have extended registers but BMSR_ESTATEN and
|
|
|
|
* BMSR_ERCAP bits are set.
|
|
|
|
*/
|
|
|
|
if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
|
|
|
|
*data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
|
|
|
|
}
|
|
|
|
|
2022-08-26 12:56:21 +02:00
|
|
|
int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
u16 val = 0xffff;
|
2022-08-26 12:56:23 +02:00
|
|
|
int ret;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
|
|
|
/* No real PHY after this. Simulate the PHY.
|
|
|
|
* A fixed PHY can be setup in the device tree, but this function is
|
|
|
|
* still called for that port during initialization.
|
|
|
|
* For RGMII PHY there is no way to access it so the fixed PHY should
|
|
|
|
* be used. For SGMII PHY the supporting code will be added later.
|
|
|
|
*/
|
2022-08-26 12:56:30 +02:00
|
|
|
if (!dev->info->internal_phy[addr]) {
|
2018-11-20 15:55:09 -08:00
|
|
|
struct ksz_port *p = &dev->ports[addr];
|
|
|
|
|
|
|
|
switch (reg) {
|
|
|
|
case MII_BMCR:
|
|
|
|
val = 0x1140;
|
|
|
|
break;
|
|
|
|
case MII_BMSR:
|
|
|
|
val = 0x796d;
|
|
|
|
break;
|
|
|
|
case MII_PHYSID1:
|
|
|
|
val = 0x0022;
|
|
|
|
break;
|
|
|
|
case MII_PHYSID2:
|
|
|
|
val = 0x1631;
|
|
|
|
break;
|
|
|
|
case MII_ADVERTISE:
|
|
|
|
val = 0x05e1;
|
|
|
|
break;
|
|
|
|
case MII_LPA:
|
|
|
|
val = 0xc5e1;
|
|
|
|
break;
|
|
|
|
case MII_CTRL1000:
|
|
|
|
val = 0x0700;
|
|
|
|
break;
|
|
|
|
case MII_STAT1000:
|
|
|
|
if (p->phydev.speed == SPEED_1000)
|
|
|
|
val = 0x3800;
|
|
|
|
else
|
|
|
|
val = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
2022-08-26 12:56:23 +02:00
|
|
|
ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2022-08-26 12:56:20 +02:00
|
|
|
ksz9477_r_phy_quirks(dev, addr, reg, &val);
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
2022-06-17 14:12:48 +05:30
|
|
|
*data = val;
|
2022-08-26 12:56:21 +02:00
|
|
|
|
|
|
|
return 0;
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
2022-08-26 12:56:21 +02:00
|
|
|
int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
2023-06-20 13:38:54 +02:00
|
|
|
u32 mask, val32;
|
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
/* No real PHY after this. */
|
2022-08-26 12:56:30 +02:00
|
|
|
if (!dev->info->internal_phy[addr])
|
2022-08-26 12:56:21 +02:00
|
|
|
return 0;
|
2019-02-28 19:57:24 -08:00
|
|
|
|
2023-06-20 13:38:54 +02:00
|
|
|
if (reg < 0x10)
|
|
|
|
return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
|
|
|
|
|
|
|
|
/* Errata: When using SPI, I2C, or in-band register access,
|
|
|
|
* writes to certain PHY registers should be performed as
|
|
|
|
* 32-bit writes instead of 16-bit writes.
|
|
|
|
*/
|
|
|
|
val32 = val;
|
|
|
|
mask = 0xffff;
|
|
|
|
if ((reg & 1) == 0) {
|
|
|
|
val32 <<= 16;
|
|
|
|
mask <<= 16;
|
|
|
|
}
|
|
|
|
reg &= ~1;
|
|
|
|
return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32);
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
2022-06-28 22:43:28 +05:30
|
|
|
const u16 *regs = dev->info->regs;
|
2018-11-20 15:55:09 -08:00
|
|
|
u8 data;
|
|
|
|
|
2023-05-26 09:34:42 +02:00
|
|
|
regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2,
|
2019-06-27 23:55:56 +02:00
|
|
|
SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
|
|
|
|
SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
|
|
|
|
|
2022-05-17 15:13:26 +05:30
|
|
|
if (port < dev->info->port_cnt) {
|
2018-11-20 15:55:09 -08:00
|
|
|
/* flush individual port */
|
2022-06-28 22:43:28 +05:30
|
|
|
ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
|
2018-11-20 15:55:09 -08:00
|
|
|
if (!(data & PORT_LEARN_DISABLE))
|
2022-06-28 22:43:28 +05:30
|
|
|
ksz_pwrite8(dev, port, regs[P_STP_CTRL],
|
2018-11-20 15:55:09 -08:00
|
|
|
data | PORT_LEARN_DISABLE);
|
|
|
|
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
|
2022-06-28 22:43:28 +05:30
|
|
|
ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
|
2018-11-20 15:55:09 -08:00
|
|
|
} else {
|
|
|
|
/* flush all */
|
|
|
|
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
|
|
|
|
bool flag, struct netlink_ext_ack *extack)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
if (flag) {
|
|
|
|
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
|
|
|
|
PORT_VLAN_LOOKUP_VID_0, true);
|
|
|
|
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
|
|
|
|
} else {
|
|
|
|
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
|
|
|
|
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
|
|
|
|
PORT_VLAN_LOOKUP_VID_0, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
|
|
|
|
const struct switchdev_obj_port_vlan *vlan,
|
|
|
|
struct netlink_ext_ack *extack)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
u32 vlan_table[3];
|
|
|
|
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
2021-01-09 02:01:53 +02:00
|
|
|
int err;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
2021-01-09 02:01:53 +02:00
|
|
|
err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
|
|
|
|
if (err) {
|
2021-02-13 22:43:18 +02:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
|
2021-01-09 02:01:53 +02:00
|
|
|
return err;
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
}
|
2018-11-20 15:55:09 -08:00
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
|
|
|
|
if (untagged)
|
|
|
|
vlan_table[1] |= BIT(port);
|
|
|
|
else
|
|
|
|
vlan_table[1] &= ~BIT(port);
|
|
|
|
vlan_table[1] &= ~(BIT(dev->cpu_port));
|
2018-11-20 15:55:09 -08:00
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
|
2018-11-20 15:55:09 -08:00
|
|
|
|
2021-01-09 02:01:53 +02:00
|
|
|
err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
|
|
|
|
if (err) {
|
2021-02-13 22:43:18 +02:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
|
2021-01-09 02:01:53 +02:00
|
|
|
return err;
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
|
|
|
|
/* change PVID */
|
|
|
|
if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
|
|
|
|
ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
|
2021-01-09 02:01:53 +02:00
|
|
|
|
|
|
|
return 0;
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
|
|
|
|
const struct switchdev_obj_port_vlan *vlan)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
|
|
|
u32 vlan_table[3];
|
|
|
|
u16 pvid;
|
|
|
|
|
|
|
|
ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
|
|
|
|
pvid = pvid & 0xFFF;
|
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
|
|
|
|
dev_dbg(dev->dev, "Failed to get vlan table\n");
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2018-11-20 15:55:09 -08:00
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
vlan_table[2] &= ~BIT(port);
|
2018-11-20 15:55:09 -08:00
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
if (pvid == vlan->vid)
|
|
|
|
pvid = 1;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
if (untagged)
|
|
|
|
vlan_table[1] &= ~BIT(port);
|
2018-11-20 15:55:09 -08:00
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
|
|
|
if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
|
|
|
|
dev_dbg(dev->dev, "Failed to set vlan table\n");
|
|
|
|
return -ETIMEDOUT;
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_fdb_add(struct ksz_device *dev, int port,
|
|
|
|
const unsigned char *addr, u16 vid, struct dsa_db db)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
u32 alu_table[4];
|
|
|
|
u32 data;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&dev->alu_mutex);
|
|
|
|
|
|
|
|
/* find any entry with mac & vid */
|
|
|
|
data = vid << ALU_FID_INDEX_S;
|
|
|
|
data |= ((addr[0] << 8) | addr[1]);
|
|
|
|
ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
|
|
|
|
|
|
|
|
data = ((addr[2] << 24) | (addr[3] << 16));
|
|
|
|
data |= ((addr[4] << 8) | addr[5]);
|
|
|
|
ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
|
|
|
|
|
|
|
|
/* start read operation */
|
|
|
|
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
|
|
|
|
|
|
|
|
/* wait to be finished */
|
2019-06-27 23:55:54 +02:00
|
|
|
ret = ksz9477_wait_alu_ready(dev);
|
|
|
|
if (ret) {
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to read ALU\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read ALU entry */
|
|
|
|
ksz9477_read_table(dev, alu_table);
|
|
|
|
|
|
|
|
/* update ALU entry */
|
|
|
|
alu_table[0] = ALU_V_STATIC_VALID;
|
|
|
|
alu_table[1] |= BIT(port);
|
|
|
|
if (vid)
|
|
|
|
alu_table[1] |= ALU_V_USE_FID;
|
|
|
|
alu_table[2] = (vid << ALU_V_FID_S);
|
|
|
|
alu_table[2] |= ((addr[0] << 8) | addr[1]);
|
|
|
|
alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
|
|
|
|
alu_table[3] |= ((addr[4] << 8) | addr[5]);
|
|
|
|
|
|
|
|
ksz9477_write_table(dev, alu_table);
|
|
|
|
|
|
|
|
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
|
|
|
|
|
|
|
|
/* wait to be finished */
|
2019-06-27 23:55:54 +02:00
|
|
|
ret = ksz9477_wait_alu_ready(dev);
|
|
|
|
if (ret)
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to write ALU\n");
|
|
|
|
|
|
|
|
exit:
|
|
|
|
mutex_unlock(&dev->alu_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_fdb_del(struct ksz_device *dev, int port,
|
|
|
|
const unsigned char *addr, u16 vid, struct dsa_db db)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
u32 alu_table[4];
|
|
|
|
u32 data;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&dev->alu_mutex);
|
|
|
|
|
|
|
|
/* read any entry with mac & vid */
|
|
|
|
data = vid << ALU_FID_INDEX_S;
|
|
|
|
data |= ((addr[0] << 8) | addr[1]);
|
|
|
|
ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
|
|
|
|
|
|
|
|
data = ((addr[2] << 24) | (addr[3] << 16));
|
|
|
|
data |= ((addr[4] << 8) | addr[5]);
|
|
|
|
ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
|
|
|
|
|
|
|
|
/* start read operation */
|
|
|
|
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
|
|
|
|
|
|
|
|
/* wait to be finished */
|
2019-06-27 23:55:54 +02:00
|
|
|
ret = ksz9477_wait_alu_ready(dev);
|
|
|
|
if (ret) {
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to read ALU\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
|
|
|
|
if (alu_table[0] & ALU_V_STATIC_VALID) {
|
|
|
|
ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
|
|
|
|
ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
|
|
|
|
ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
|
|
|
|
|
|
|
|
/* clear forwarding port */
|
2023-01-18 23:17:35 +05:30
|
|
|
alu_table[1] &= ~BIT(port);
|
2018-11-20 15:55:09 -08:00
|
|
|
|
|
|
|
/* if there is no port to forward, clear table */
|
2023-01-18 23:17:35 +05:30
|
|
|
if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
|
2018-11-20 15:55:09 -08:00
|
|
|
alu_table[0] = 0;
|
|
|
|
alu_table[1] = 0;
|
|
|
|
alu_table[2] = 0;
|
|
|
|
alu_table[3] = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
alu_table[0] = 0;
|
|
|
|
alu_table[1] = 0;
|
|
|
|
alu_table[2] = 0;
|
|
|
|
alu_table[3] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ksz9477_write_table(dev, alu_table);
|
|
|
|
|
|
|
|
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
|
|
|
|
|
|
|
|
/* wait to be finished */
|
2019-06-27 23:55:54 +02:00
|
|
|
ret = ksz9477_wait_alu_ready(dev);
|
|
|
|
if (ret)
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to write ALU\n");
|
|
|
|
|
|
|
|
exit:
|
|
|
|
mutex_unlock(&dev->alu_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
|
|
|
|
{
|
|
|
|
alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
|
|
|
|
alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
|
|
|
|
alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
|
|
|
|
alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
|
|
|
|
ALU_V_PRIO_AGE_CNT_M;
|
|
|
|
alu->mstp = alu_table[0] & ALU_V_MSTP_M;
|
|
|
|
|
|
|
|
alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
|
|
|
|
alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
|
|
|
|
alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
|
|
|
|
|
|
|
|
alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
|
|
|
|
|
|
|
|
alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
|
|
|
|
alu->mac[1] = alu_table[2] & 0xFF;
|
|
|
|
alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
|
|
|
|
alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
|
|
|
|
alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
|
|
|
|
alu->mac[5] = alu_table[3] & 0xFF;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_fdb_dump(struct ksz_device *dev, int port,
|
|
|
|
dsa_fdb_dump_cb_t *cb, void *data)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
u32 ksz_data;
|
|
|
|
u32 alu_table[4];
|
|
|
|
struct alu_struct alu;
|
|
|
|
int timeout;
|
|
|
|
|
|
|
|
mutex_lock(&dev->alu_mutex);
|
|
|
|
|
|
|
|
/* start ALU search */
|
|
|
|
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
|
|
|
|
|
|
|
|
do {
|
|
|
|
timeout = 1000;
|
|
|
|
do {
|
|
|
|
ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
|
|
|
|
if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
|
|
|
|
break;
|
|
|
|
usleep_range(1, 10);
|
|
|
|
} while (timeout-- > 0);
|
|
|
|
|
|
|
|
if (!timeout) {
|
|
|
|
dev_dbg(dev->dev, "Failed to search ALU\n");
|
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2022-08-16 16:25:16 +05:30
|
|
|
if (!(ksz_data & ALU_VALID))
|
|
|
|
continue;
|
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
/* read ALU table */
|
|
|
|
ksz9477_read_table(dev, alu_table);
|
|
|
|
|
|
|
|
ksz9477_convert_alu(&alu, alu_table);
|
|
|
|
|
|
|
|
if (alu.port_forward & BIT(port)) {
|
|
|
|
ret = cb(alu.mac, alu.fid, alu.is_static, data);
|
|
|
|
if (ret)
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
} while (ksz_data & ALU_START);
|
|
|
|
|
|
|
|
exit:
|
|
|
|
|
|
|
|
/* stop ALU search */
|
|
|
|
ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
|
|
|
|
|
|
|
|
mutex_unlock(&dev->alu_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_mdb_add(struct ksz_device *dev, int port,
|
|
|
|
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
u32 static_table[4];
|
2022-07-01 20:31:01 +05:30
|
|
|
const u8 *shifts;
|
|
|
|
const u32 *masks;
|
2018-11-20 15:55:09 -08:00
|
|
|
u32 data;
|
|
|
|
int index;
|
|
|
|
u32 mac_hi, mac_lo;
|
2021-01-09 02:01:52 +02:00
|
|
|
int err = 0;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
2022-07-01 20:31:01 +05:30
|
|
|
shifts = dev->info->shifts;
|
|
|
|
masks = dev->info->masks;
|
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
|
|
|
|
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
|
|
|
|
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
|
|
|
|
|
|
|
|
mutex_lock(&dev->alu_mutex);
|
|
|
|
|
2022-05-17 15:13:26 +05:30
|
|
|
for (index = 0; index < dev->info->num_statics; index++) {
|
2018-11-20 15:55:09 -08:00
|
|
|
/* find empty slot first */
|
2022-07-01 20:31:01 +05:30
|
|
|
data = (index << shifts[ALU_STAT_INDEX]) |
|
|
|
|
masks[ALU_STAT_READ] | ALU_STAT_START;
|
2018-11-20 15:55:09 -08:00
|
|
|
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
|
|
|
|
|
|
|
|
/* wait to be finished */
|
2021-01-09 02:01:52 +02:00
|
|
|
err = ksz9477_wait_alu_sta_ready(dev);
|
|
|
|
if (err) {
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read ALU static table */
|
|
|
|
ksz9477_read_table(dev, static_table);
|
|
|
|
|
|
|
|
if (static_table[0] & ALU_V_STATIC_VALID) {
|
|
|
|
/* check this has same vid & mac address */
|
|
|
|
if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
|
|
|
|
((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
|
|
|
|
static_table[3] == mac_lo) {
|
|
|
|
/* found matching one */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* found empty one */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* no available entry */
|
2022-05-17 15:13:26 +05:30
|
|
|
if (index == dev->info->num_statics) {
|
2021-01-09 02:01:52 +02:00
|
|
|
err = -ENOSPC;
|
2018-11-20 15:55:09 -08:00
|
|
|
goto exit;
|
2021-01-09 02:01:52 +02:00
|
|
|
}
|
2018-11-20 15:55:09 -08:00
|
|
|
|
|
|
|
/* add entry */
|
|
|
|
static_table[0] = ALU_V_STATIC_VALID;
|
|
|
|
static_table[1] |= BIT(port);
|
|
|
|
if (mdb->vid)
|
|
|
|
static_table[1] |= ALU_V_USE_FID;
|
|
|
|
static_table[2] = (mdb->vid << ALU_V_FID_S);
|
|
|
|
static_table[2] |= mac_hi;
|
|
|
|
static_table[3] = mac_lo;
|
|
|
|
|
|
|
|
ksz9477_write_table(dev, static_table);
|
|
|
|
|
2022-07-01 20:31:01 +05:30
|
|
|
data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
|
2018-11-20 15:55:09 -08:00
|
|
|
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
|
|
|
|
|
|
|
|
/* wait to be finished */
|
2019-06-27 23:55:55 +02:00
|
|
|
if (ksz9477_wait_alu_sta_ready(dev))
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
|
|
|
|
|
|
|
|
exit:
|
|
|
|
mutex_unlock(&dev->alu_mutex);
|
2021-01-09 02:01:52 +02:00
|
|
|
return err;
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_mdb_del(struct ksz_device *dev, int port,
|
|
|
|
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
u32 static_table[4];
|
2022-07-01 20:31:01 +05:30
|
|
|
const u8 *shifts;
|
|
|
|
const u32 *masks;
|
2018-11-20 15:55:09 -08:00
|
|
|
u32 data;
|
|
|
|
int index;
|
|
|
|
int ret = 0;
|
|
|
|
u32 mac_hi, mac_lo;
|
|
|
|
|
2022-07-01 20:31:01 +05:30
|
|
|
shifts = dev->info->shifts;
|
|
|
|
masks = dev->info->masks;
|
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
|
|
|
|
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
|
|
|
|
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
|
|
|
|
|
|
|
|
mutex_lock(&dev->alu_mutex);
|
|
|
|
|
2022-05-17 15:13:26 +05:30
|
|
|
for (index = 0; index < dev->info->num_statics; index++) {
|
2018-11-20 15:55:09 -08:00
|
|
|
/* find empty slot first */
|
2022-07-01 20:31:01 +05:30
|
|
|
data = (index << shifts[ALU_STAT_INDEX]) |
|
|
|
|
masks[ALU_STAT_READ] | ALU_STAT_START;
|
2018-11-20 15:55:09 -08:00
|
|
|
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
|
|
|
|
|
|
|
|
/* wait to be finished */
|
2019-06-27 23:55:55 +02:00
|
|
|
ret = ksz9477_wait_alu_sta_ready(dev);
|
|
|
|
if (ret) {
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read ALU static table */
|
|
|
|
ksz9477_read_table(dev, static_table);
|
|
|
|
|
|
|
|
if (static_table[0] & ALU_V_STATIC_VALID) {
|
|
|
|
/* check this has same vid & mac address */
|
|
|
|
|
|
|
|
if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
|
|
|
|
((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
|
|
|
|
static_table[3] == mac_lo) {
|
|
|
|
/* found matching one */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* no available entry */
|
2022-05-17 15:13:26 +05:30
|
|
|
if (index == dev->info->num_statics)
|
2018-11-20 15:55:09 -08:00
|
|
|
goto exit;
|
|
|
|
|
|
|
|
/* clear port */
|
|
|
|
static_table[1] &= ~BIT(port);
|
|
|
|
|
|
|
|
if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
|
|
|
|
/* delete entry */
|
|
|
|
static_table[0] = 0;
|
|
|
|
static_table[1] = 0;
|
|
|
|
static_table[2] = 0;
|
|
|
|
static_table[3] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ksz9477_write_table(dev, static_table);
|
|
|
|
|
2022-07-01 20:31:01 +05:30
|
|
|
data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
|
2018-11-20 15:55:09 -08:00
|
|
|
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
|
|
|
|
|
|
|
|
/* wait to be finished */
|
2019-06-27 23:55:55 +02:00
|
|
|
ret = ksz9477_wait_alu_sta_ready(dev);
|
|
|
|
if (ret)
|
2018-11-20 15:55:09 -08:00
|
|
|
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
|
|
|
|
|
|
|
|
exit:
|
|
|
|
mutex_unlock(&dev->alu_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
|
|
|
|
struct dsa_mall_mirror_tc_entry *mirror,
|
|
|
|
bool ingress, struct netlink_ext_ack *extack)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
2022-04-28 12:37:09 +05:30
|
|
|
u8 data;
|
|
|
|
int p;
|
|
|
|
|
|
|
|
/* Limit to one sniffer port
|
|
|
|
* Check if any of the port is already set for sniffing
|
|
|
|
* If yes, instruct the user to remove the previous entry & exit
|
|
|
|
*/
|
2022-05-17 15:13:26 +05:30
|
|
|
for (p = 0; p < dev->info->port_cnt; p++) {
|
2022-04-28 12:37:09 +05:30
|
|
|
/* Skip the current sniffing port */
|
|
|
|
if (p == mirror->to_local_port)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
|
|
|
|
|
|
|
|
if (data & PORT_MIRROR_SNIFFER) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Sniffer port is already configured, delete existing rules & retry");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
}
|
2018-11-20 15:55:09 -08:00
|
|
|
|
|
|
|
if (ingress)
|
|
|
|
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
|
|
|
|
else
|
|
|
|
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
|
|
|
|
|
|
|
|
/* configure mirror port */
|
|
|
|
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
|
|
|
|
PORT_MIRROR_SNIFFER, true);
|
|
|
|
|
|
|
|
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
|
|
|
|
struct dsa_mall_mirror_tc_entry *mirror)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
2022-04-28 12:37:09 +05:30
|
|
|
bool in_use = false;
|
2018-11-20 15:55:09 -08:00
|
|
|
u8 data;
|
2022-04-28 12:37:09 +05:30
|
|
|
int p;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
|
|
|
if (mirror->ingress)
|
|
|
|
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
|
|
|
|
else
|
|
|
|
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
|
|
|
|
|
|
|
|
|
2022-04-28 12:37:09 +05:30
|
|
|
/* Check if any of the port is still referring to sniffer port */
|
2022-05-17 15:13:26 +05:30
|
|
|
for (p = 0; p < dev->info->port_cnt; p++) {
|
2022-04-28 12:37:09 +05:30
|
|
|
ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
|
|
|
|
|
|
|
|
if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
|
|
|
|
in_use = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* delete sniffing if there are no other mirroring rules */
|
|
|
|
if (!in_use)
|
2018-11-20 15:55:09 -08:00
|
|
|
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
|
|
|
|
PORT_MIRROR_SNIFFER, false);
|
|
|
|
}
|
|
|
|
|
2019-02-28 19:57:24 -08:00
|
|
|
static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
|
|
|
|
{
|
|
|
|
phy_interface_t interface;
|
|
|
|
bool gbit;
|
|
|
|
|
2022-08-26 12:56:30 +02:00
|
|
|
if (dev->info->internal_phy[port])
|
2019-02-28 19:57:24 -08:00
|
|
|
return PHY_INTERFACE_MODE_NA;
|
2022-07-24 14:58:21 +05:30
|
|
|
|
2022-07-24 14:58:15 +05:30
|
|
|
gbit = ksz_get_gbit(dev, port);
|
2022-07-24 14:58:21 +05:30
|
|
|
|
|
|
|
interface = ksz_get_xmii(dev, port, gbit);
|
|
|
|
|
2019-02-28 19:57:24 -08:00
|
|
|
return interface;
|
2019-02-22 16:36:47 -08:00
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_get_caps(struct ksz_device *dev, int port,
|
|
|
|
struct phylink_config *config)
|
2022-05-17 15:13:32 +05:30
|
|
|
{
|
2022-06-22 14:34:21 +05:30
|
|
|
config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
|
|
|
|
MAC_SYM_PAUSE;
|
|
|
|
|
2022-08-26 12:56:19 +02:00
|
|
|
if (dev->info->gbit_capable[port])
|
2022-06-22 14:34:21 +05:30
|
|
|
config->mac_capabilities |= MAC_1000FD;
|
2022-05-17 15:13:32 +05:30
|
|
|
}
|
|
|
|
|
2022-09-07 12:50:39 +05:30
|
|
|
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
|
|
|
|
{
|
|
|
|
u32 secs = msecs / 1000;
|
|
|
|
u8 value;
|
|
|
|
u8 data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
|
|
|
|
|
|
|
|
ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
|
|
|
|
|
|
|
|
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
value &= ~SW_AGE_CNT_M;
|
|
|
|
value |= FIELD_PREP(SW_AGE_CNT_M, data);
|
|
|
|
|
|
|
|
return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
|
|
|
|
}
|
|
|
|
|
2023-01-20 10:51:34 +05:30
|
|
|
void ksz9477_port_queue_split(struct ksz_device *dev, int port)
|
|
|
|
{
|
|
|
|
u8 data;
|
|
|
|
|
|
|
|
if (dev->info->num_tx_queues == 8)
|
|
|
|
data = PORT_EIGHT_QUEUE;
|
|
|
|
else if (dev->info->num_tx_queues == 4)
|
|
|
|
data = PORT_FOUR_QUEUE;
|
|
|
|
else if (dev->info->num_tx_queues == 2)
|
|
|
|
data = PORT_TWO_QUEUE;
|
|
|
|
else
|
|
|
|
data = PORT_SINGLE_QUEUE;
|
|
|
|
|
|
|
|
ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
2021-11-26 13:39:26 +01:00
|
|
|
struct dsa_switch *ds = dev->ds;
|
|
|
|
u16 data16;
|
2022-07-24 14:58:21 +05:30
|
|
|
u8 member;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
|
|
|
/* enable tag tail for host port */
|
|
|
|
if (cpu_port)
|
|
|
|
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
|
|
|
|
true);
|
|
|
|
|
2023-01-20 10:51:34 +05:30
|
|
|
ksz9477_port_queue_split(dev, port);
|
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
|
|
|
|
|
|
|
|
/* set back pressure */
|
|
|
|
ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
|
|
|
|
|
|
|
|
/* enable broadcast storm limit */
|
|
|
|
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
|
|
|
|
|
|
|
|
/* replace priority */
|
|
|
|
ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
|
|
|
|
false);
|
|
|
|
ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
|
|
|
|
MTI_PVID_REPLACE, false);
|
|
|
|
|
2023-06-05 09:39:43 -06:00
|
|
|
/* force flow control for non-PHY ports only */
|
|
|
|
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
|
|
|
|
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
|
|
|
|
!dev->info->internal_phy[port]);
|
2021-11-26 13:39:26 +01:00
|
|
|
|
2020-07-02 18:17:24 +03:00
|
|
|
if (cpu_port)
|
2021-11-26 13:39:26 +01:00
|
|
|
member = dsa_user_ports(ds);
|
2020-07-02 18:17:24 +03:00
|
|
|
else
|
2021-11-26 13:39:26 +01:00
|
|
|
member = BIT(dsa_upstream_port(ds, port));
|
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
ksz9477_cfg_port_member(dev, port, member);
|
|
|
|
|
|
|
|
/* clear pending interrupts */
|
2022-08-26 12:56:30 +02:00
|
|
|
if (dev->info->internal_phy[port])
|
2018-11-20 15:55:09 -08:00
|
|
|
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
|
net: dsa: microchip: Add partial ACL support for ksz9477 switches
This patch adds partial Access Control List (ACL) support for the
ksz9477 family of switches. ACLs enable filtering of incoming layer 2
MAC, layer 3 IP, and layer 4 TCP/UDP packets on each port. They provide
additional capabilities for filtering routed network protocols and can
take precedence over other forwarding functions.
ACLs can filter ingress traffic based on header fields such as
source/destination MAC address, EtherType, IPv4 address, IPv4 protocol,
UDP/TCP ports, and TCP flags. The ACL is an ordered list of up to 16
access control rules programmed into the ACL Table. Each entry specifies
a set of matching conditions and action rules for controlling packet
forwarding and priority.
The ACL also implements a count function, generating an interrupt
instead of a forwarding action. It can be used as a watchdog timer or an
event counter. The ACL consists of three parts: matching rules, action
rules, and processing entries. Multiple match conditions can be either
AND'ed or OR'ed together.
This patch introduces support for a subset of the available ACL
functionality, specifically layer 2 matching and prioritization of
matched packets. For example:
tc qdisc add dev lan2 clsact
tc filter add dev lan2 ingress protocol 0x88f7 flower action skbedit prio 7
tc qdisc add dev lan1 clsact
tc filter add dev lan1 ingress protocol 0x88f7 flower action skbedit prio 7
The hardware offloading implementation was benchmarked against a
configuration without hardware offloading. This latter setup relied on a
software-based Linux bridge. No noticeable differences were observed
between the two configurations. Here is an example of software-based
test:
ip l s dev enu1u1 up
ip l s dev enu1u2 up
ip l s dev enu1u4 up
ethtool -A enu1u1 autoneg off rx off tx off
ethtool -A enu1u2 autoneg off rx off tx off
ethtool -A enu1u4 autoneg off rx off tx off
ip l a name br0 type bridge
ip l s dev br0 up
ip l s enu1u1 master br0
ip l s enu1u2 master br0
ip l s enu1u4 master br0
tc qdisc add dev enu1u1 root handle 1: ets strict 4 priomap 3 3 2 2 1 1 0 0
tc qdisc add dev enu1u4 root handle 1: ets strict 4 priomap 3 3 2 2 1 1 0 0
tc qdisc add dev enu1u2 root handle 1: ets strict 4 priomap 3 3 2 2 1 1 0 0
tc qdisc add dev enu1u1 clsact
tc filter add dev enu1u1 ingress protocol ipv4 flower action skbedit prio 7
tc qdisc add dev enu1u4 clsact
tc filter add dev enu1u4 ingress protocol ipv4 flower action skbedit prio 0
On a system attached to the port enu1u2 I run two iperf3 server
instances:
iperf3 -s -p 5210 &
iperf3 -s -p 5211 &
On systems attached to enu1u4 and enu1u1 I run:
iperf3 -u -c 172.17.0.1 -p 5210 -b100M -l1472 -t100
and
iperf3 -u -c 172.17.0.1 -p 5211 -b100M -l1472 -t100
As a result, IP traffic on port enu1u1 will be prioritized and take
precedence over IP traffic on port enu1u4
Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-09-14 15:11:45 +02:00
|
|
|
|
|
|
|
ksz9477_port_acl_init(dev, port);
|
2023-10-23 11:33:38 +02:00
|
|
|
|
|
|
|
/* clear pending wake flags */
|
|
|
|
ksz9477_handle_wake_reason(dev, port);
|
2023-10-26 07:10:47 +02:00
|
|
|
|
|
|
|
/* Disable all WoL options by default. Otherwise
|
|
|
|
* ksz_switch_macaddr_get/put logic will not work properly.
|
|
|
|
*/
|
|
|
|
ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0);
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_config_cpu_port(struct dsa_switch *ds)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
struct ksz_device *dev = ds->priv;
|
|
|
|
struct ksz_port *p;
|
|
|
|
int i;
|
|
|
|
|
2022-05-17 15:13:26 +05:30
|
|
|
for (i = 0; i < dev->info->port_cnt; i++) {
|
|
|
|
if (dsa_is_cpu_port(ds, i) &&
|
|
|
|
(dev->info->cpu_ports & (1 << i))) {
|
2019-02-28 19:57:24 -08:00
|
|
|
phy_interface_t interface;
|
2020-09-09 11:04:15 +01:00
|
|
|
const char *prev_msg;
|
|
|
|
const char *prev_mode;
|
2019-02-28 19:57:24 -08:00
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
dev->cpu_port = i;
|
2020-09-08 10:01:38 +02:00
|
|
|
p = &dev->ports[i];
|
2018-11-20 15:55:09 -08:00
|
|
|
|
2019-02-28 19:57:24 -08:00
|
|
|
/* Read from XMII register to determine host port
|
|
|
|
* interface. If set specifically in device tree
|
|
|
|
* note the difference to help debugging.
|
|
|
|
*/
|
|
|
|
interface = ksz9477_get_interface(dev, i);
|
2020-09-08 10:01:38 +02:00
|
|
|
if (!p->interface) {
|
|
|
|
if (dev->compat_interface) {
|
|
|
|
dev_warn(dev->dev,
|
|
|
|
"Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
|
|
|
|
"Please update your device tree.\n",
|
|
|
|
i);
|
|
|
|
p->interface = dev->compat_interface;
|
|
|
|
} else {
|
|
|
|
p->interface = interface;
|
|
|
|
}
|
|
|
|
}
|
2020-09-22 16:45:34 -07:00
|
|
|
if (interface && interface != p->interface) {
|
2020-09-09 11:04:15 +01:00
|
|
|
prev_msg = " instead of ";
|
|
|
|
prev_mode = phy_modes(interface);
|
|
|
|
} else {
|
|
|
|
prev_msg = "";
|
|
|
|
prev_mode = "";
|
|
|
|
}
|
|
|
|
dev_info(dev->dev,
|
|
|
|
"Port%d: using phy mode %s%s%s\n",
|
|
|
|
i,
|
2020-09-22 16:45:34 -07:00
|
|
|
phy_modes(p->interface),
|
2020-09-09 11:04:15 +01:00
|
|
|
prev_msg,
|
|
|
|
prev_mode);
|
2019-02-28 19:57:24 -08:00
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
/* enable cpu port */
|
|
|
|
ksz9477_port_setup(dev, i, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-17 15:13:26 +05:30
|
|
|
for (i = 0; i < dev->info->port_cnt; i++) {
|
2018-11-20 15:55:09 -08:00
|
|
|
if (i == dev->cpu_port)
|
|
|
|
continue;
|
2022-06-17 14:12:51 +05:30
|
|
|
ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
|
2018-11-20 15:55:09 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_enable_stp_addr(struct ksz_device *dev)
|
2022-06-22 14:34:15 +05:30
|
|
|
{
|
2022-07-01 20:31:01 +05:30
|
|
|
const u32 *masks;
|
2022-06-22 14:34:15 +05:30
|
|
|
u32 data;
|
|
|
|
int ret;
|
|
|
|
|
2022-07-01 20:31:01 +05:30
|
|
|
masks = dev->info->masks;
|
|
|
|
|
2022-06-22 14:34:15 +05:30
|
|
|
/* Enable Reserved multicast table */
|
|
|
|
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
|
|
|
|
|
|
|
|
/* Set the Override bit for forwarding BPDU packet to CPU */
|
|
|
|
ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
|
|
|
|
ALU_V_OVERRIDE | BIT(dev->cpu_port));
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2022-07-01 20:31:01 +05:30
|
|
|
data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
|
2022-06-22 14:34:15 +05:30
|
|
|
|
|
|
|
ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* wait to be finished */
|
|
|
|
ret = ksz9477_wait_alu_sta_ready(dev);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_setup(struct dsa_switch *ds)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
struct ksz_device *dev = ds->priv;
|
|
|
|
int ret = 0;
|
|
|
|
|
2022-12-05 06:22:31 +01:00
|
|
|
ds->mtu_enforcement_ingress = true;
|
|
|
|
|
2018-12-19 18:59:31 -08:00
|
|
|
/* Required for port partitioning. */
|
|
|
|
ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
|
|
|
|
true);
|
|
|
|
|
2019-02-28 19:57:24 -08:00
|
|
|
/* Do not work correctly with tail tagging. */
|
|
|
|
ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
|
|
|
|
|
2022-03-08 14:58:57 +01:00
|
|
|
/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
|
|
|
|
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
|
|
|
|
|
|
|
|
/* Now we can configure default MTU value */
|
2023-05-26 09:34:42 +02:00
|
|
|
ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
|
2022-03-08 14:58:57 +01:00
|
|
|
VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-11-20 15:55:09 -08:00
|
|
|
|
|
|
|
/* queue based egress rate limit */
|
|
|
|
ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
|
|
|
|
|
2019-02-22 16:36:48 -08:00
|
|
|
/* enable global MIB counter freeze function */
|
|
|
|
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
|
|
|
|
|
2023-10-26 07:10:51 +02:00
|
|
|
/* Make sure PME (WoL) is not enabled. If requested, it will be
|
|
|
|
* enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not
|
|
|
|
* like PME events changes before shutdown.
|
|
|
|
*/
|
|
|
|
ksz_write8(dev, REG_SW_PME_CTRL, 0);
|
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
u32 ksz9477_get_port_addr(int port, int offset)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
return PORT_CTRL_ADDR(port, offset);
|
|
|
|
}
|
|
|
|
|
2023-01-20 10:51:35 +05:30
|
|
|
int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
|
|
|
|
{
|
|
|
|
val = val >> 8;
|
|
|
|
|
|
|
|
return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
|
|
|
|
}
|
|
|
|
|
2023-09-22 15:31:08 +02:00
|
|
|
/* The KSZ9477 provides following HW features to accelerate
|
|
|
|
* HSR frames handling:
|
|
|
|
*
|
|
|
|
* 1. TX PACKET DUPLICATION FROM HOST TO SWITCH
|
|
|
|
* 2. RX PACKET DUPLICATION DISCARDING
|
|
|
|
* 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING
|
|
|
|
*
|
|
|
|
* Only one from point 1. has the NETIF_F* flag available.
|
|
|
|
*
|
|
|
|
* Ones from point 2 and 3 are "best effort" - i.e. those will
|
|
|
|
* work correctly most of the time, but it may happen that some
|
|
|
|
* frames will not be caught - to be more specific; there is a race
|
|
|
|
* condition in hardware such that, when duplicate packets are received
|
|
|
|
* on member ports very close in time to each other, the hardware fails
|
|
|
|
* to detect that they are duplicates.
|
|
|
|
*
|
|
|
|
* Hence, the SW needs to handle those special cases. However, the speed
|
|
|
|
* up gain is considerable when above features are used.
|
|
|
|
*
|
|
|
|
* Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames
|
|
|
|
* can be forwarded in the switch fabric between HSR ports.
|
|
|
|
*/
|
|
|
|
#define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD)
|
|
|
|
|
|
|
|
void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr)
|
|
|
|
{
|
|
|
|
struct ksz_device *dev = ds->priv;
|
2023-10-23 11:17:28 -07:00
|
|
|
struct net_device *user;
|
2023-09-22 15:31:08 +02:00
|
|
|
struct dsa_port *hsr_dp;
|
|
|
|
u8 data, hsr_ports = 0;
|
|
|
|
|
|
|
|
/* Program which port(s) shall support HSR */
|
|
|
|
ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port));
|
|
|
|
|
|
|
|
/* Forward frames between HSR ports (i.e. bridge together HSR ports) */
|
|
|
|
if (dev->hsr_ports) {
|
|
|
|
dsa_hsr_foreach_port(hsr_dp, ds, hsr)
|
|
|
|
hsr_ports |= BIT(hsr_dp->index);
|
|
|
|
|
|
|
|
hsr_ports |= BIT(dsa_upstream_port(ds, port));
|
|
|
|
dsa_hsr_foreach_port(hsr_dp, ds, hsr)
|
|
|
|
ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dev->hsr_ports) {
|
|
|
|
/* Enable discarding of received HSR frames */
|
|
|
|
ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data);
|
|
|
|
data |= HSR_DUPLICATE_DISCARD;
|
|
|
|
data &= ~HSR_NODE_UNICAST;
|
|
|
|
ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable per port self-address filtering.
|
|
|
|
* The global self-address filtering has already been enabled in the
|
|
|
|
* ksz9477_reset_switch() function.
|
|
|
|
*/
|
|
|
|
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true);
|
|
|
|
|
|
|
|
/* Setup HW supported features for lan HSR ports */
|
2023-10-23 11:17:28 -07:00
|
|
|
user = dsa_to_port(ds, port)->user;
|
|
|
|
user->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
|
2023-09-22 15:31:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr)
|
|
|
|
{
|
|
|
|
struct ksz_device *dev = ds->priv;
|
|
|
|
|
|
|
|
/* Clear port HSR support */
|
|
|
|
ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0);
|
|
|
|
|
|
|
|
/* Disable forwarding frames between HSR ports */
|
|
|
|
ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port)));
|
|
|
|
|
|
|
|
/* Disable per port self-address filtering */
|
|
|
|
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false);
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
int ksz9477_switch_init(struct ksz_device *dev)
|
2022-06-17 14:12:45 +05:30
|
|
|
{
|
|
|
|
u8 data8;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev->port_mask = (1 << dev->info->port_cnt) - 1;
|
|
|
|
|
2018-11-20 15:55:09 -08:00
|
|
|
/* turn off SPI DO Edge select */
|
|
|
|
ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
data8 &= ~SPI_AUTO_EDGE_DETECTION;
|
|
|
|
ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 14:34:23 +05:30
|
|
|
void ksz9477_switch_exit(struct ksz_device *dev)
|
2018-11-20 15:55:09 -08:00
|
|
|
{
|
|
|
|
ksz9477_reset_switch(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
|
|
|
|
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
|
|
|
|
MODULE_LICENSE("GPL");
|