2018-05-04 10:01:38 +01:00
|
|
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
|
|
|
|
* stmmac TC Handling (HW only)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <net/pkt_cls.h>
|
|
|
|
#include <net/tc_act/tc_gact.h>
|
|
|
|
#include "common.h"
|
|
|
|
#include "dwmac4.h"
|
|
|
|
#include "dwmac5.h"
|
|
|
|
#include "stmmac.h"
|
|
|
|
|
|
|
|
static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
|
|
|
|
{
|
|
|
|
memset(entry, 0, sizeof(*entry));
|
|
|
|
entry->in_use = true;
|
|
|
|
entry->is_last = true;
|
|
|
|
entry->is_frag = false;
|
|
|
|
entry->prio = ~0x0;
|
|
|
|
entry->handle = 0;
|
|
|
|
entry->val.match_data = 0x0;
|
|
|
|
entry->val.match_en = 0x0;
|
|
|
|
entry->val.af = 1;
|
|
|
|
entry->val.dma_ch_no = 0x0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
|
|
|
|
struct tc_cls_u32_offload *cls,
|
|
|
|
bool free)
|
|
|
|
{
|
|
|
|
struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
|
|
|
|
u32 loc = cls->knode.handle;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->tc_entries_max; i++) {
|
|
|
|
entry = &priv->tc_entries[i];
|
|
|
|
if (!entry->in_use && !first && free)
|
|
|
|
first = entry;
|
2019-08-06 15:16:18 +02:00
|
|
|
if ((entry->handle == loc) && !free && !entry->is_frag)
|
2018-05-04 10:01:38 +01:00
|
|
|
dup = entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dup)
|
|
|
|
return dup;
|
|
|
|
if (first) {
|
|
|
|
first->handle = loc;
|
|
|
|
first->in_use = true;
|
|
|
|
|
|
|
|
/* Reset HW values */
|
|
|
|
memset(&first->val, 0, sizeof(first->val));
|
|
|
|
}
|
|
|
|
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_fill_actions(struct stmmac_tc_entry *entry,
|
|
|
|
struct stmmac_tc_entry *frag,
|
|
|
|
struct tc_cls_u32_offload *cls)
|
|
|
|
{
|
|
|
|
struct stmmac_tc_entry *action_entry = entry;
|
|
|
|
const struct tc_action *act;
|
|
|
|
struct tcf_exts *exts;
|
2018-08-19 12:22:09 -07:00
|
|
|
int i;
|
2018-05-04 10:01:38 +01:00
|
|
|
|
|
|
|
exts = cls->knode.exts;
|
|
|
|
if (!tcf_exts_has_actions(exts))
|
|
|
|
return -EINVAL;
|
|
|
|
if (frag)
|
|
|
|
action_entry = frag;
|
|
|
|
|
2018-08-19 12:22:09 -07:00
|
|
|
tcf_exts_for_each_action(i, act, exts) {
|
2018-05-04 10:01:38 +01:00
|
|
|
/* Accept */
|
|
|
|
if (is_tcf_gact_ok(act)) {
|
|
|
|
action_entry->val.af = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Drop */
|
|
|
|
if (is_tcf_gact_shot(act)) {
|
|
|
|
action_entry->val.rf = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unsupported */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_fill_entry(struct stmmac_priv *priv,
|
|
|
|
struct tc_cls_u32_offload *cls)
|
|
|
|
{
|
|
|
|
struct stmmac_tc_entry *entry, *frag = NULL;
|
|
|
|
struct tc_u32_sel *sel = cls->knode.sel;
|
|
|
|
u32 off, data, mask, real_off, rem;
|
2019-08-16 03:24:09 +02:00
|
|
|
u32 prio = cls->common.prio << 16;
|
2018-05-04 10:01:38 +01:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Only 1 match per entry */
|
|
|
|
if (sel->nkeys <= 0 || sel->nkeys > 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
off = sel->keys[0].off << sel->offshift;
|
|
|
|
data = sel->keys[0].val;
|
|
|
|
mask = sel->keys[0].mask;
|
|
|
|
|
|
|
|
switch (ntohs(cls->common.protocol)) {
|
|
|
|
case ETH_P_ALL:
|
|
|
|
break;
|
|
|
|
case ETH_P_IP:
|
|
|
|
off += ETH_HLEN;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (off > priv->tc_off_max)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
real_off = off / 4;
|
|
|
|
rem = off % 4;
|
|
|
|
|
|
|
|
entry = tc_find_entry(priv, cls, true);
|
|
|
|
if (!entry)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (rem) {
|
|
|
|
frag = tc_find_entry(priv, cls, true);
|
|
|
|
if (!frag) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err_unuse;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->frag_ptr = frag;
|
|
|
|
entry->val.match_en = (mask << (rem * 8)) &
|
|
|
|
GENMASK(31, rem * 8);
|
|
|
|
entry->val.match_data = (data << (rem * 8)) &
|
|
|
|
GENMASK(31, rem * 8);
|
|
|
|
entry->val.frame_offset = real_off;
|
|
|
|
entry->prio = prio;
|
|
|
|
|
|
|
|
frag->val.match_en = (mask >> (rem * 8)) &
|
|
|
|
GENMASK(rem * 8 - 1, 0);
|
|
|
|
frag->val.match_data = (data >> (rem * 8)) &
|
|
|
|
GENMASK(rem * 8 - 1, 0);
|
|
|
|
frag->val.frame_offset = real_off + 1;
|
|
|
|
frag->prio = prio;
|
|
|
|
frag->is_frag = true;
|
|
|
|
} else {
|
|
|
|
entry->frag_ptr = NULL;
|
|
|
|
entry->val.match_en = mask;
|
|
|
|
entry->val.match_data = data;
|
|
|
|
entry->val.frame_offset = real_off;
|
|
|
|
entry->prio = prio;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = tc_fill_actions(entry, frag, cls);
|
|
|
|
if (ret)
|
|
|
|
goto err_unuse;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_unuse:
|
|
|
|
if (frag)
|
|
|
|
frag->in_use = false;
|
|
|
|
entry->in_use = false;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tc_unfill_entry(struct stmmac_priv *priv,
|
|
|
|
struct tc_cls_u32_offload *cls)
|
|
|
|
{
|
|
|
|
struct stmmac_tc_entry *entry;
|
|
|
|
|
|
|
|
entry = tc_find_entry(priv, cls, false);
|
|
|
|
if (!entry)
|
|
|
|
return;
|
|
|
|
|
|
|
|
entry->in_use = false;
|
|
|
|
if (entry->frag_ptr) {
|
|
|
|
entry = entry->frag_ptr;
|
|
|
|
entry->is_frag = false;
|
|
|
|
entry->in_use = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_config_knode(struct stmmac_priv *priv,
|
|
|
|
struct tc_cls_u32_offload *cls)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tc_fill_entry(priv, cls);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
|
|
|
|
priv->tc_entries_max);
|
|
|
|
if (ret)
|
|
|
|
goto err_unfill;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_unfill:
|
|
|
|
tc_unfill_entry(priv, cls);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_delete_knode(struct stmmac_priv *priv,
|
|
|
|
struct tc_cls_u32_offload *cls)
|
|
|
|
{
|
|
|
|
/* Set entry and fragments as not used */
|
|
|
|
tc_unfill_entry(priv, cls);
|
|
|
|
|
2020-12-10 21:48:33 +08:00
|
|
|
return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
|
|
|
|
priv->tc_entries_max);
|
2018-05-04 10:01:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_setup_cls_u32(struct stmmac_priv *priv,
|
|
|
|
struct tc_cls_u32_offload *cls)
|
|
|
|
{
|
|
|
|
switch (cls->command) {
|
|
|
|
case TC_CLSU32_REPLACE_KNODE:
|
|
|
|
tc_unfill_entry(priv, cls);
|
2020-08-23 17:36:59 -05:00
|
|
|
fallthrough;
|
2018-05-04 10:01:38 +01:00
|
|
|
case TC_CLSU32_NEW_KNODE:
|
|
|
|
return tc_config_knode(priv, cls);
|
|
|
|
case TC_CLSU32_DELETE_KNODE:
|
|
|
|
return tc_delete_knode(priv, cls);
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-11 22:51:34 +08:00
|
|
|
static int tc_rfs_init(struct stmmac_priv *priv)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
|
2021-12-22 22:43:10 +08:00
|
|
|
priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
|
|
|
|
priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
|
2021-12-11 22:51:34 +08:00
|
|
|
|
|
|
|
for (i = 0; i < STMMAC_RFS_T_MAX; i++)
|
|
|
|
priv->rfs_entries_total += priv->rfs_entries_max[i];
|
|
|
|
|
|
|
|
priv->rfs_entries = devm_kcalloc(priv->device,
|
|
|
|
priv->rfs_entries_total,
|
|
|
|
sizeof(*priv->rfs_entries),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!priv->rfs_entries)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
|
|
|
|
priv->rfs_entries_total);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-04 10:01:38 +01:00
|
|
|
static int tc_init(struct stmmac_priv *priv)
|
|
|
|
{
|
|
|
|
struct dma_features *dma_cap = &priv->dma_cap;
|
|
|
|
unsigned int count;
|
2021-12-11 22:51:34 +08:00
|
|
|
int ret, i;
|
2019-09-04 15:16:56 +02:00
|
|
|
|
|
|
|
if (dma_cap->l3l4fnum) {
|
|
|
|
priv->flow_entries_max = dma_cap->l3l4fnum;
|
|
|
|
priv->flow_entries = devm_kcalloc(priv->device,
|
|
|
|
dma_cap->l3l4fnum,
|
|
|
|
sizeof(*priv->flow_entries),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!priv->flow_entries)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->flow_entries_max; i++)
|
|
|
|
priv->flow_entries[i].idx = i;
|
|
|
|
|
2021-12-11 22:51:34 +08:00
|
|
|
dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
|
2019-09-04 15:16:56 +02:00
|
|
|
priv->flow_entries_max);
|
|
|
|
}
|
2018-05-04 10:01:38 +01:00
|
|
|
|
2021-12-11 22:51:34 +08:00
|
|
|
ret = tc_rfs_init(priv);
|
|
|
|
if (ret)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-09-04 15:16:55 +02:00
|
|
|
/* Fail silently as we can still use remaining features, e.g. CBS */
|
2018-05-04 10:01:38 +01:00
|
|
|
if (!dma_cap->frpsel)
|
2019-09-04 15:16:55 +02:00
|
|
|
return 0;
|
2018-05-04 10:01:38 +01:00
|
|
|
|
|
|
|
switch (dma_cap->frpbs) {
|
|
|
|
case 0x0:
|
|
|
|
priv->tc_off_max = 64;
|
|
|
|
break;
|
|
|
|
case 0x1:
|
|
|
|
priv->tc_off_max = 128;
|
|
|
|
break;
|
|
|
|
case 0x2:
|
|
|
|
priv->tc_off_max = 256;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (dma_cap->frpes) {
|
|
|
|
case 0x0:
|
|
|
|
count = 64;
|
|
|
|
break;
|
|
|
|
case 0x1:
|
|
|
|
count = 128;
|
|
|
|
break;
|
|
|
|
case 0x2:
|
|
|
|
count = 256;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reserve one last filter which lets all pass */
|
|
|
|
priv->tc_entries_max = count;
|
treewide: devm_kzalloc() -> devm_kcalloc()
The devm_kzalloc() function has a 2-factor argument form, devm_kcalloc().
This patch replaces cases of:
devm_kzalloc(handle, a * b, gfp)
with:
devm_kcalloc(handle, a * b, gfp)
as well as handling cases of:
devm_kzalloc(handle, a * b * c, gfp)
with:
devm_kzalloc(handle, array3_size(a, b, c), gfp)
as it's slightly less ugly than:
devm_kcalloc(handle, array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
devm_kzalloc(handle, 4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
Some manual whitespace fixes were needed in this patch, as Coccinelle
really liked to write "=devm_kcalloc..." instead of "= devm_kcalloc...".
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
expression HANDLE;
type TYPE;
expression THING, E;
@@
(
devm_kzalloc(HANDLE,
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
devm_kzalloc(HANDLE,
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression HANDLE;
expression COUNT;
typedef u8;
typedef __u8;
@@
(
devm_kzalloc(HANDLE,
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
expression HANDLE;
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
expression HANDLE;
identifier SIZE, COUNT;
@@
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression HANDLE;
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression HANDLE;
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
expression HANDLE;
identifier STRIDE, SIZE, COUNT;
@@
(
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression HANDLE;
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE,
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression HANDLE;
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, sizeof(THING) * C2, ...)
|
devm_kzalloc(HANDLE, sizeof(TYPE) * C2, ...)
|
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE, C1 * C2, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * E2
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * (E2)
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:07:58 -07:00
|
|
|
priv->tc_entries = devm_kcalloc(priv->device,
|
|
|
|
count, sizeof(*priv->tc_entries), GFP_KERNEL);
|
2018-05-04 10:01:38 +01:00
|
|
|
if (!priv->tc_entries)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
|
|
|
|
|
|
|
|
dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
|
|
|
|
priv->tc_entries_max, priv->tc_off_max);
|
2021-03-24 17:07:42 +08:00
|
|
|
|
2018-05-04 10:01:38 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-27 15:57:02 +01:00
|
|
|
static int tc_setup_cbs(struct stmmac_priv *priv,
|
|
|
|
struct tc_cbs_qopt_offload *qopt)
|
|
|
|
{
|
|
|
|
u32 tx_queues_count = priv->plat->tx_queues_to_use;
|
2024-06-08 22:35:24 +08:00
|
|
|
s64 port_transmit_rate_kbps;
|
2018-06-27 15:57:02 +01:00
|
|
|
u32 queue = qopt->queue;
|
|
|
|
u32 mode_to_use;
|
|
|
|
u64 value;
|
2024-06-08 22:35:24 +08:00
|
|
|
u32 ptr;
|
2018-06-27 15:57:02 +01:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Queue 0 is not AVB capable */
|
|
|
|
if (queue <= 0 || queue >= tx_queues_count)
|
|
|
|
return -EINVAL;
|
2019-01-09 10:05:58 +01:00
|
|
|
if (!priv->dma_cap.av)
|
|
|
|
return -EOPNOTSUPP;
|
2018-06-27 15:57:02 +01:00
|
|
|
|
2024-06-08 22:35:24 +08:00
|
|
|
port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
|
|
|
|
|
2024-06-17 09:39:22 +08:00
|
|
|
if (qopt->enable) {
|
|
|
|
/* Port Transmit Rate and Speed Divider */
|
|
|
|
switch (div_s64(port_transmit_rate_kbps, 1000)) {
|
|
|
|
case SPEED_10000:
|
|
|
|
case SPEED_5000:
|
|
|
|
ptr = 32;
|
|
|
|
break;
|
|
|
|
case SPEED_2500:
|
|
|
|
case SPEED_1000:
|
|
|
|
ptr = 8;
|
|
|
|
break;
|
|
|
|
case SPEED_100:
|
|
|
|
ptr = 4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
netdev_err(priv->dev,
|
|
|
|
"Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
|
|
|
|
port_transmit_rate_kbps);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ptr = 0;
|
2021-02-18 21:40:53 +08:00
|
|
|
}
|
|
|
|
|
2018-06-27 15:57:02 +01:00
|
|
|
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
|
|
|
|
if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
|
|
|
|
ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
|
|
|
|
} else if (!qopt->enable) {
|
2021-02-04 22:03:16 +08:00
|
|
|
ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
|
|
|
|
MTL_QUEUE_DCB);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
|
2024-09-18 14:14:22 +08:00
|
|
|
return 0;
|
2018-06-27 15:57:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Final adjustments for HW */
|
2024-06-08 22:35:24 +08:00
|
|
|
value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
|
2018-06-27 15:57:02 +01:00
|
|
|
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
|
|
|
|
|
2024-06-08 22:35:24 +08:00
|
|
|
value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
|
2018-06-27 15:57:02 +01:00
|
|
|
priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
|
|
|
|
|
2018-07-06 15:36:07 +02:00
|
|
|
value = qopt->hicredit * 1024ll * 8;
|
2018-06-27 15:57:02 +01:00
|
|
|
priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
|
|
|
|
|
2018-07-06 15:36:07 +02:00
|
|
|
value = qopt->locredit * 1024ll * 8;
|
2018-06-27 15:57:02 +01:00
|
|
|
priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
|
|
|
|
|
|
|
|
ret = stmmac_config_cbs(priv, priv->hw,
|
|
|
|
priv->plat->tx_queues_cfg[queue].send_slope,
|
|
|
|
priv->plat->tx_queues_cfg[queue].idle_slope,
|
|
|
|
priv->plat->tx_queues_cfg[queue].high_credit,
|
|
|
|
priv->plat->tx_queues_cfg[queue].low_credit,
|
|
|
|
queue);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
|
|
|
|
queue, qopt->sendslope, qopt->idleslope,
|
|
|
|
qopt->hicredit, qopt->locredit);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-04 15:16:56 +02:00
|
|
|
static int tc_parse_flow_actions(struct stmmac_priv *priv,
|
|
|
|
struct flow_action *action,
|
2020-03-07 12:40:13 +01:00
|
|
|
struct stmmac_flow_entry *entry,
|
|
|
|
struct netlink_ext_ack *extack)
|
2019-09-04 15:16:56 +02:00
|
|
|
{
|
|
|
|
struct flow_action_entry *act;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!flow_action_has_entries(action))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-16 18:42:11 -07:00
|
|
|
if (!flow_action_basic_hw_stats_check(action, extack))
|
2020-03-07 12:40:13 +01:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2019-09-04 15:16:56 +02:00
|
|
|
flow_action_for_each(i, act, action) {
|
|
|
|
switch (act->id) {
|
|
|
|
case FLOW_ACTION_DROP:
|
|
|
|
entry->action |= STMMAC_FLOW_ACTION_DROP;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Nothing to do, maybe inverse filter ? */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-22 22:43:10 +08:00
|
|
|
#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0)
|
|
|
|
|
2019-09-04 15:16:56 +02:00
|
|
|
static int tc_add_basic_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls,
|
|
|
|
struct stmmac_flow_entry *entry)
|
|
|
|
{
|
|
|
|
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
|
|
|
struct flow_dissector *dissector = rule->match.dissector;
|
|
|
|
struct flow_match_basic match;
|
|
|
|
|
|
|
|
/* Nothing to do here */
|
|
|
|
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
flow_rule_match_basic(rule, &match);
|
2021-12-22 22:43:10 +08:00
|
|
|
|
2019-09-04 15:16:56 +02:00
|
|
|
entry->ip_proto = match.key->ip_proto;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_add_ip4_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls,
|
|
|
|
struct stmmac_flow_entry *entry)
|
|
|
|
{
|
|
|
|
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
|
|
|
struct flow_dissector *dissector = rule->match.dissector;
|
|
|
|
bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
|
|
|
|
struct flow_match_ipv4_addrs match;
|
|
|
|
u32 hw_match;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Nothing to do here */
|
|
|
|
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
flow_rule_match_ipv4_addrs(rule, &match);
|
|
|
|
hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
|
|
|
|
if (hw_match) {
|
|
|
|
ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
|
|
|
|
false, true, inv, hw_match);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
|
|
|
|
if (hw_match) {
|
|
|
|
ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
|
|
|
|
false, false, inv, hw_match);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_add_ports_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls,
|
|
|
|
struct stmmac_flow_entry *entry)
|
|
|
|
{
|
|
|
|
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
|
|
|
struct flow_dissector *dissector = rule->match.dissector;
|
|
|
|
bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
|
|
|
|
struct flow_match_ports match;
|
|
|
|
u32 hw_match;
|
|
|
|
bool is_udp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Nothing to do here */
|
|
|
|
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (entry->ip_proto) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
is_udp = false;
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
is_udp = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
flow_rule_match_ports(rule, &match);
|
|
|
|
|
|
|
|
hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
|
|
|
|
if (hw_match) {
|
|
|
|
ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
|
|
|
|
is_udp, true, inv, hw_match);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
|
|
|
|
if (hw_match) {
|
|
|
|
ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
|
|
|
|
is_udp, false, inv, hw_match);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->is_l4 = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls,
|
|
|
|
bool get_free)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->flow_entries_max; i++) {
|
|
|
|
struct stmmac_flow_entry *entry = &priv->flow_entries[i];
|
|
|
|
|
|
|
|
if (entry->cookie == cls->cookie)
|
|
|
|
return entry;
|
|
|
|
if (get_free && (entry->in_use == false))
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-10-15 17:17:48 +01:00
|
|
|
static struct {
|
2019-09-04 15:16:56 +02:00
|
|
|
int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
|
|
|
|
struct stmmac_flow_entry *entry);
|
|
|
|
} tc_flow_parsers[] = {
|
|
|
|
{ .fn = tc_add_basic_flow },
|
|
|
|
{ .fn = tc_add_ip4_flow },
|
|
|
|
{ .fn = tc_add_ports_flow },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int tc_add_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls)
|
|
|
|
{
|
|
|
|
struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
|
|
|
|
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
entry = tc_find_flow(priv, cls, true);
|
|
|
|
if (!entry)
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2020-03-07 12:40:13 +01:00
|
|
|
ret = tc_parse_flow_actions(priv, &rule->action, entry,
|
|
|
|
cls->common.extack);
|
2019-09-04 15:16:56 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
|
|
|
|
ret = tc_flow_parsers[i].fn(priv, cls, entry);
|
2021-06-18 10:44:25 +01:00
|
|
|
if (!ret)
|
2019-09-04 15:16:56 +02:00
|
|
|
entry->in_use = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!entry->in_use)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
entry->cookie = cls->cookie;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_del_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls)
|
|
|
|
{
|
|
|
|
struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!entry || !entry->in_use)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (entry->is_l4) {
|
|
|
|
ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
|
|
|
|
false, false, false, 0);
|
|
|
|
} else {
|
|
|
|
ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
|
|
|
|
false, false, false, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->in_use = false;
|
|
|
|
entry->cookie = 0;
|
|
|
|
entry->is_l4 = false;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-12-11 22:51:34 +08:00
|
|
|
static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls,
|
|
|
|
bool get_free)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->rfs_entries_total; i++) {
|
|
|
|
struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
|
|
|
|
|
|
|
|
if (entry->cookie == cls->cookie)
|
|
|
|
return entry;
|
|
|
|
if (get_free && entry->in_use == false)
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-03-19 01:22:04 +08:00
|
|
|
#define VLAN_PRIO_FULL_MASK (0x07)
|
|
|
|
|
|
|
|
static int tc_add_vlan_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls)
|
|
|
|
{
|
2021-12-11 22:51:34 +08:00
|
|
|
struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
|
2021-03-19 01:22:04 +08:00
|
|
|
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
|
|
|
struct flow_dissector *dissector = rule->match.dissector;
|
|
|
|
int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
|
|
|
|
struct flow_match_vlan match;
|
|
|
|
|
2021-12-11 22:51:34 +08:00
|
|
|
if (!entry) {
|
|
|
|
entry = tc_find_rfs(priv, cls, true);
|
|
|
|
if (!entry)
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
|
|
|
|
priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
|
|
|
|
return -ENOENT;
|
|
|
|
|
2021-03-19 01:22:04 +08:00
|
|
|
/* Nothing to do here */
|
|
|
|
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (tc < 0) {
|
|
|
|
netdev_err(priv->dev, "Invalid traffic class\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
flow_rule_match_vlan(rule, &match);
|
|
|
|
|
|
|
|
if (match.mask->vlan_priority) {
|
|
|
|
u32 prio;
|
|
|
|
|
|
|
|
if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
|
|
|
|
netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
prio = BIT(match.key->vlan_priority);
|
|
|
|
stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
|
2021-12-11 22:51:34 +08:00
|
|
|
|
|
|
|
entry->in_use = true;
|
|
|
|
entry->cookie = cls->cookie;
|
|
|
|
entry->tc = tc;
|
|
|
|
entry->type = STMMAC_RFS_T_VLAN;
|
|
|
|
priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
|
2021-03-19 01:22:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_del_vlan_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls)
|
|
|
|
{
|
2021-12-11 22:51:34 +08:00
|
|
|
struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
|
2021-03-19 01:22:04 +08:00
|
|
|
|
2021-12-11 22:51:34 +08:00
|
|
|
if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
|
|
|
|
return -ENOENT;
|
2021-03-19 01:22:04 +08:00
|
|
|
|
2021-12-11 22:51:34 +08:00
|
|
|
stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
|
|
|
|
|
|
|
|
entry->in_use = false;
|
|
|
|
entry->cookie = 0;
|
|
|
|
entry->tc = 0;
|
|
|
|
entry->type = 0;
|
2021-03-19 01:22:04 +08:00
|
|
|
|
2021-12-11 22:51:34 +08:00
|
|
|
priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
|
2021-03-19 01:22:04 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-22 22:43:10 +08:00
|
|
|
static int tc_add_ethtype_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls)
|
|
|
|
{
|
|
|
|
struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
|
|
|
|
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
|
|
|
struct flow_dissector *dissector = rule->match.dissector;
|
|
|
|
int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
|
|
|
|
struct flow_match_basic match;
|
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
entry = tc_find_rfs(priv, cls, true);
|
|
|
|
if (!entry)
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Nothing to do here */
|
|
|
|
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (tc < 0) {
|
|
|
|
netdev_err(priv->dev, "Invalid traffic class\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
flow_rule_match_basic(rule, &match);
|
|
|
|
|
|
|
|
if (match.mask->n_proto) {
|
|
|
|
u16 etype = ntohs(match.key->n_proto);
|
|
|
|
|
|
|
|
if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
|
|
|
|
netdev_err(priv->dev, "Only full mask is supported for EthType filter");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
switch (etype) {
|
|
|
|
case ETH_P_LLDP:
|
|
|
|
if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
|
|
|
|
priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
entry->type = STMMAC_RFS_T_LLDP;
|
|
|
|
priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
|
|
|
|
|
|
|
|
stmmac_rx_queue_routing(priv, priv->hw,
|
|
|
|
PACKET_DCBCPQ, tc);
|
|
|
|
break;
|
|
|
|
case ETH_P_1588:
|
|
|
|
if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
|
|
|
|
priv->rfs_entries_max[STMMAC_RFS_T_1588])
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
entry->type = STMMAC_RFS_T_1588;
|
|
|
|
priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
|
|
|
|
|
|
|
|
stmmac_rx_queue_routing(priv, priv->hw,
|
|
|
|
PACKET_PTPQ, tc);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->in_use = true;
|
|
|
|
entry->cookie = cls->cookie;
|
|
|
|
entry->tc = tc;
|
|
|
|
entry->etype = etype;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_del_ethtype_flow(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls)
|
|
|
|
{
|
|
|
|
struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
|
|
|
|
|
|
|
|
if (!entry || !entry->in_use ||
|
|
|
|
entry->type < STMMAC_RFS_T_LLDP ||
|
|
|
|
entry->type > STMMAC_RFS_T_1588)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
switch (entry->etype) {
|
|
|
|
case ETH_P_LLDP:
|
|
|
|
stmmac_rx_queue_routing(priv, priv->hw,
|
|
|
|
PACKET_DCBCPQ, 0);
|
|
|
|
priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
|
|
|
|
break;
|
|
|
|
case ETH_P_1588:
|
|
|
|
stmmac_rx_queue_routing(priv, priv->hw,
|
|
|
|
PACKET_PTPQ, 0);
|
|
|
|
priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
netdev_err(priv->dev, "EthType(0x%x) is not supported",
|
|
|
|
entry->etype);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->in_use = false;
|
|
|
|
entry->cookie = 0;
|
|
|
|
entry->tc = 0;
|
|
|
|
entry->etype = 0;
|
|
|
|
entry->type = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-19 01:22:03 +08:00
|
|
|
static int tc_add_flow_cls(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tc_add_flow(priv, cls);
|
2021-03-19 01:22:04 +08:00
|
|
|
if (!ret)
|
|
|
|
return ret;
|
2021-03-19 01:22:03 +08:00
|
|
|
|
2021-12-22 22:43:10 +08:00
|
|
|
ret = tc_add_ethtype_flow(priv, cls);
|
|
|
|
if (!ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-03-19 01:22:04 +08:00
|
|
|
return tc_add_vlan_flow(priv, cls);
|
2021-03-19 01:22:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_del_flow_cls(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tc_del_flow(priv, cls);
|
2021-03-19 01:22:04 +08:00
|
|
|
if (!ret)
|
|
|
|
return ret;
|
2021-03-19 01:22:03 +08:00
|
|
|
|
2021-12-22 22:43:10 +08:00
|
|
|
ret = tc_del_ethtype_flow(priv, cls);
|
|
|
|
if (!ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-03-19 01:22:04 +08:00
|
|
|
return tc_del_vlan_flow(priv, cls);
|
2021-03-19 01:22:03 +08:00
|
|
|
}
|
|
|
|
|
2019-09-04 15:16:56 +02:00
|
|
|
static int tc_setup_cls(struct stmmac_priv *priv,
|
|
|
|
struct flow_cls_offload *cls)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2020-01-10 16:23:53 +01:00
|
|
|
/* When RSS is enabled, the filtering will be bypassed */
|
|
|
|
if (priv->rss.enable)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2019-09-04 15:16:56 +02:00
|
|
|
switch (cls->command) {
|
|
|
|
case FLOW_CLS_REPLACE:
|
2021-03-19 01:22:03 +08:00
|
|
|
ret = tc_add_flow_cls(priv, cls);
|
2019-09-04 15:16:56 +02:00
|
|
|
break;
|
|
|
|
case FLOW_CLS_DESTROY:
|
2021-03-19 01:22:03 +08:00
|
|
|
ret = tc_del_flow_cls(priv, cls);
|
2019-09-04 15:16:56 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-07-05 18:26:53 +08:00
|
|
|
struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
|
|
|
|
ktime_t current_time,
|
|
|
|
u64 cycle_time)
|
|
|
|
{
|
|
|
|
struct timespec64 time;
|
|
|
|
|
|
|
|
if (ktime_after(old_base_time, current_time)) {
|
|
|
|
time = ktime_to_timespec64(old_base_time);
|
|
|
|
} else {
|
|
|
|
s64 n;
|
|
|
|
ktime_t base_time;
|
|
|
|
|
|
|
|
n = div64_s64(ktime_sub_ns(current_time, old_base_time),
|
|
|
|
cycle_time);
|
|
|
|
base_time = ktime_add_ns(old_base_time,
|
|
|
|
(n + 1) * cycle_time);
|
|
|
|
|
|
|
|
time = ktime_to_timespec64(base_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
return time;
|
|
|
|
}
|
|
|
|
|
2024-01-27 12:04:41 +08:00
|
|
|
static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
|
|
|
|
struct tc_taprio_qopt_offload *qopt)
|
|
|
|
{
|
|
|
|
u32 num_tc = qopt->mqprio.qopt.num_tc;
|
|
|
|
u32 offset, count, i, j;
|
|
|
|
|
|
|
|
/* QueueMaxSDU received from the driver corresponds to the Linux traffic
|
|
|
|
* class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < num_tc; i++) {
|
|
|
|
if (!qopt->max_sdu[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
offset = qopt->mqprio.qopt.offset[i];
|
|
|
|
count = qopt->mqprio.qopt.count[i];
|
|
|
|
|
|
|
|
for (j = offset; j < offset + count; j++)
|
2024-05-13 09:43:46 +08:00
|
|
|
priv->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
|
2024-01-27 12:04:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-27 12:04:43 +08:00
|
|
|
static int tc_taprio_configure(struct stmmac_priv *priv,
|
|
|
|
struct tc_taprio_qopt_offload *qopt)
|
2019-12-18 11:33:07 +01:00
|
|
|
{
|
|
|
|
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
|
net: stmmac: support fp parameter of tc-taprio
tc-taprio can select whether traffic classes are express or preemptible.
0) tc qdisc add dev eth1 parent root handle 100 taprio \
num_tc 4 \
map 0 1 2 3 2 2 2 2 2 2 2 2 2 2 2 3 \
queues 1@0 1@1 1@2 1@3 \
base-time 1000000000 \
sched-entry S 03 10000000 \
sched-entry S 0e 10000000 \
flags 0x2 fp P E E E
1) After some traffic tests, MAC merge layer statistics are all good.
Local device:
[ {
"ifname": "eth1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 0,
"MACMergeFragCountRx": 0,
"MACMergeFragCountTx": 17837,
"MACMergeHoldCount": 18639
}
} ]
Remote device:
[ {
"ifname": "end1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 17189,
"MACMergeFragCountRx": 17837,
"MACMergeFragCountTx": 0,
"MACMergeHoldCount": 0
}
} ]
Tested on DWMAC CORE 5.10a
Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Link: https://patch.msgid.link/0d21ae356fb3cab77337527e87d46748a4852055.1725631883.git.0x1207@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-09-06 22:30:11 +08:00
|
|
|
struct netlink_ext_ack *extack = qopt->mqprio.extack;
|
2021-07-05 18:26:55 +08:00
|
|
|
struct timespec64 time, current_time, qopt_time;
|
2021-01-13 14:15:57 +01:00
|
|
|
ktime_t current_time_ns;
|
2019-12-18 11:33:07 +01:00
|
|
|
int i, ret = 0;
|
2019-12-18 23:55:01 +01:00
|
|
|
u64 ctr;
|
2019-12-18 11:33:07 +01:00
|
|
|
|
2022-12-08 17:03:15 +08:00
|
|
|
if (qopt->base_time < 0)
|
|
|
|
return -ERANGE;
|
|
|
|
|
2019-12-18 11:33:07 +01:00
|
|
|
if (!priv->dma_cap.estsel)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
switch (wid) {
|
|
|
|
case 0x1:
|
|
|
|
wid = 16;
|
|
|
|
break;
|
|
|
|
case 0x2:
|
|
|
|
wid = 20;
|
|
|
|
break;
|
|
|
|
case 0x3:
|
|
|
|
wid = 24;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (dep) {
|
|
|
|
case 0x1:
|
|
|
|
dep = 64;
|
|
|
|
break;
|
|
|
|
case 0x2:
|
|
|
|
dep = 128;
|
|
|
|
break;
|
|
|
|
case 0x3:
|
|
|
|
dep = 256;
|
|
|
|
break;
|
|
|
|
case 0x4:
|
|
|
|
dep = 512;
|
|
|
|
break;
|
|
|
|
case 0x5:
|
|
|
|
dep = 1024;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2023-05-30 12:19:45 +03:00
|
|
|
if (qopt->cmd == TAPRIO_CMD_DESTROY)
|
2019-12-18 11:33:07 +01:00
|
|
|
goto disable;
|
2023-05-30 12:19:45 +03:00
|
|
|
|
2019-12-18 11:33:07 +01:00
|
|
|
if (qopt->num_entries >= dep)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!qopt->cycle_time)
|
|
|
|
return -ERANGE;
|
2023-12-01 13:52:52 +08:00
|
|
|
if (qopt->cycle_time_extension >= BIT(wid + 7))
|
|
|
|
return -ERANGE;
|
2019-12-18 11:33:07 +01:00
|
|
|
|
2024-05-13 09:43:46 +08:00
|
|
|
if (!priv->est) {
|
|
|
|
priv->est = devm_kzalloc(priv->device, sizeof(*priv->est),
|
2019-12-18 11:33:07 +01:00
|
|
|
GFP_KERNEL);
|
2024-05-13 09:43:46 +08:00
|
|
|
if (!priv->est)
|
2019-12-18 11:33:07 +01:00
|
|
|
return -ENOMEM;
|
2021-07-05 18:26:54 +08:00
|
|
|
|
2024-05-13 09:43:45 +08:00
|
|
|
mutex_init(&priv->est_lock);
|
2019-12-18 11:33:07 +01:00
|
|
|
} else {
|
2024-05-13 09:43:45 +08:00
|
|
|
mutex_lock(&priv->est_lock);
|
2024-05-13 09:43:46 +08:00
|
|
|
memset(priv->est, 0, sizeof(*priv->est));
|
2024-05-13 09:43:45 +08:00
|
|
|
mutex_unlock(&priv->est_lock);
|
2019-12-18 11:33:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
size = qopt->num_entries;
|
|
|
|
|
2024-05-13 09:43:45 +08:00
|
|
|
mutex_lock(&priv->est_lock);
|
2024-05-13 09:43:46 +08:00
|
|
|
priv->est->gcl_size = size;
|
|
|
|
priv->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
|
2024-05-13 09:43:45 +08:00
|
|
|
mutex_unlock(&priv->est_lock);
|
2019-12-18 11:33:07 +01:00
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
s64 delta_ns = qopt->entries[i].interval;
|
|
|
|
u32 gates = qopt->entries[i].gate_mask;
|
|
|
|
|
|
|
|
if (delta_ns > GENMASK(wid, 0))
|
|
|
|
return -ERANGE;
|
|
|
|
if (gates > GENMASK(31 - wid, 0))
|
|
|
|
return -ERANGE;
|
2019-12-18 11:33:08 +01:00
|
|
|
|
|
|
|
switch (qopt->entries[i].command) {
|
|
|
|
case TC_TAPRIO_CMD_SET_GATES:
|
|
|
|
break;
|
|
|
|
case TC_TAPRIO_CMD_SET_AND_HOLD:
|
|
|
|
gates |= BIT(0);
|
|
|
|
break;
|
|
|
|
case TC_TAPRIO_CMD_SET_AND_RELEASE:
|
|
|
|
gates &= ~BIT(0);
|
|
|
|
break;
|
|
|
|
default:
|
2019-12-18 11:33:07 +01:00
|
|
|
return -EOPNOTSUPP;
|
2019-12-18 11:33:08 +01:00
|
|
|
}
|
2019-12-18 11:33:07 +01:00
|
|
|
|
2024-05-13 09:43:46 +08:00
|
|
|
priv->est->gcl[i] = delta_ns | (gates << wid);
|
2019-12-18 11:33:07 +01:00
|
|
|
}
|
|
|
|
|
2024-05-13 09:43:45 +08:00
|
|
|
mutex_lock(&priv->est_lock);
|
2019-12-18 11:33:07 +01:00
|
|
|
/* Adjust for real system time */
|
2021-01-13 14:15:57 +01:00
|
|
|
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time);
|
|
|
|
current_time_ns = timespec64_to_ktime(current_time);
|
2021-07-05 18:26:53 +08:00
|
|
|
time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
|
|
|
|
qopt->cycle_time);
|
2021-01-13 14:15:57 +01:00
|
|
|
|
2024-05-13 09:43:46 +08:00
|
|
|
priv->est->btr[0] = (u32)time.tv_nsec;
|
|
|
|
priv->est->btr[1] = (u32)time.tv_sec;
|
2019-12-18 11:33:07 +01:00
|
|
|
|
2021-07-05 18:26:55 +08:00
|
|
|
qopt_time = ktime_to_timespec64(qopt->base_time);
|
2024-05-13 09:43:46 +08:00
|
|
|
priv->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
|
|
|
|
priv->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
|
2021-07-05 18:26:55 +08:00
|
|
|
|
2019-12-18 23:55:01 +01:00
|
|
|
ctr = qopt->cycle_time;
|
2024-05-13 09:43:46 +08:00
|
|
|
priv->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
|
|
|
|
priv->est->ctr[1] = (u32)ctr;
|
2019-12-18 11:33:07 +01:00
|
|
|
|
2024-05-13 09:43:46 +08:00
|
|
|
priv->est->ter = qopt->cycle_time_extension;
|
2023-12-01 13:52:52 +08:00
|
|
|
|
2024-01-27 12:04:41 +08:00
|
|
|
tc_taprio_map_maxsdu_txq(priv, qopt);
|
|
|
|
|
2024-05-13 09:43:46 +08:00
|
|
|
ret = stmmac_est_configure(priv, priv, priv->est,
|
2019-12-18 11:33:07 +01:00
|
|
|
priv->plat->clk_ptp_rate);
|
2024-05-13 09:43:45 +08:00
|
|
|
mutex_unlock(&priv->est_lock);
|
2019-12-18 11:33:07 +01:00
|
|
|
if (ret) {
|
|
|
|
netdev_err(priv->dev, "failed to configure EST\n");
|
|
|
|
goto disable;
|
|
|
|
}
|
|
|
|
|
net: stmmac: support fp parameter of tc-taprio
tc-taprio can select whether traffic classes are express or preemptible.
0) tc qdisc add dev eth1 parent root handle 100 taprio \
num_tc 4 \
map 0 1 2 3 2 2 2 2 2 2 2 2 2 2 2 3 \
queues 1@0 1@1 1@2 1@3 \
base-time 1000000000 \
sched-entry S 03 10000000 \
sched-entry S 0e 10000000 \
flags 0x2 fp P E E E
1) After some traffic tests, MAC merge layer statistics are all good.
Local device:
[ {
"ifname": "eth1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 0,
"MACMergeFragCountRx": 0,
"MACMergeFragCountTx": 17837,
"MACMergeHoldCount": 18639
}
} ]
Remote device:
[ {
"ifname": "end1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 17189,
"MACMergeFragCountRx": 17837,
"MACMergeFragCountTx": 0,
"MACMergeHoldCount": 0
}
} ]
Tested on DWMAC CORE 5.10a
Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Link: https://patch.msgid.link/0d21ae356fb3cab77337527e87d46748a4852055.1725631883.git.0x1207@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-09-06 22:30:11 +08:00
|
|
|
ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack,
|
|
|
|
qopt->mqprio.preemptible_tcs);
|
|
|
|
if (ret)
|
|
|
|
goto disable;
|
|
|
|
|
2019-12-18 11:33:07 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
disable:
|
2024-05-13 09:43:46 +08:00
|
|
|
if (priv->est) {
|
2024-05-13 09:43:45 +08:00
|
|
|
mutex_lock(&priv->est_lock);
|
2024-05-13 09:43:46 +08:00
|
|
|
priv->est->enable = false;
|
|
|
|
stmmac_est_configure(priv, priv, priv->est,
|
2021-08-20 21:26:22 +08:00
|
|
|
priv->plat->clk_ptp_rate);
|
2024-01-27 12:04:43 +08:00
|
|
|
/* Reset taprio status */
|
|
|
|
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
|
|
|
|
priv->xstats.max_sdu_txq_drop[i] = 0;
|
|
|
|
priv->xstats.mtl_est_txq_hlbf[i] = 0;
|
|
|
|
}
|
2024-05-13 09:43:45 +08:00
|
|
|
mutex_unlock(&priv->est_lock);
|
2021-08-20 21:26:22 +08:00
|
|
|
}
|
2021-03-24 17:07:42 +08:00
|
|
|
|
net: stmmac: support fp parameter of tc-taprio
tc-taprio can select whether traffic classes are express or preemptible.
0) tc qdisc add dev eth1 parent root handle 100 taprio \
num_tc 4 \
map 0 1 2 3 2 2 2 2 2 2 2 2 2 2 2 3 \
queues 1@0 1@1 1@2 1@3 \
base-time 1000000000 \
sched-entry S 03 10000000 \
sched-entry S 0e 10000000 \
flags 0x2 fp P E E E
1) After some traffic tests, MAC merge layer statistics are all good.
Local device:
[ {
"ifname": "eth1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 0,
"MACMergeFragCountRx": 0,
"MACMergeFragCountTx": 17837,
"MACMergeHoldCount": 18639
}
} ]
Remote device:
[ {
"ifname": "end1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 17189,
"MACMergeFragCountRx": 17837,
"MACMergeFragCountTx": 0,
"MACMergeHoldCount": 0
}
} ]
Tested on DWMAC CORE 5.10a
Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Link: https://patch.msgid.link/0d21ae356fb3cab77337527e87d46748a4852055.1725631883.git.0x1207@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-09-06 22:30:11 +08:00
|
|
|
stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0);
|
|
|
|
|
2019-12-18 11:33:07 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-01-27 12:04:43 +08:00
|
|
|
static void tc_taprio_stats(struct stmmac_priv *priv,
|
|
|
|
struct tc_taprio_qopt_offload *qopt)
|
|
|
|
{
|
|
|
|
u64 window_drops = 0;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
|
|
|
|
window_drops += priv->xstats.max_sdu_txq_drop[i] +
|
|
|
|
priv->xstats.mtl_est_txq_hlbf[i];
|
|
|
|
qopt->stats.window_drops = window_drops;
|
|
|
|
|
|
|
|
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
|
|
|
|
qopt->stats.tx_overruns = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tc_taprio_queue_stats(struct stmmac_priv *priv,
|
|
|
|
struct tc_taprio_qopt_offload *qopt)
|
|
|
|
{
|
|
|
|
struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats;
|
|
|
|
int queue = qopt->queue_stats.queue;
|
|
|
|
|
|
|
|
q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
|
|
|
|
priv->xstats.mtl_est_txq_hlbf[queue];
|
|
|
|
|
|
|
|
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
|
|
|
|
q_stats->stats.tx_overruns = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_setup_taprio(struct stmmac_priv *priv,
|
|
|
|
struct tc_taprio_qopt_offload *qopt)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
switch (qopt->cmd) {
|
|
|
|
case TAPRIO_CMD_REPLACE:
|
|
|
|
case TAPRIO_CMD_DESTROY:
|
|
|
|
err = tc_taprio_configure(priv, qopt);
|
|
|
|
break;
|
|
|
|
case TAPRIO_CMD_STATS:
|
|
|
|
tc_taprio_stats(priv, qopt);
|
|
|
|
break;
|
|
|
|
case TAPRIO_CMD_QUEUE_STATS:
|
|
|
|
tc_taprio_queue_stats(priv, qopt);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
net: stmmac: support fp parameter of tc-taprio
tc-taprio can select whether traffic classes are express or preemptible.
0) tc qdisc add dev eth1 parent root handle 100 taprio \
num_tc 4 \
map 0 1 2 3 2 2 2 2 2 2 2 2 2 2 2 3 \
queues 1@0 1@1 1@2 1@3 \
base-time 1000000000 \
sched-entry S 03 10000000 \
sched-entry S 0e 10000000 \
flags 0x2 fp P E E E
1) After some traffic tests, MAC merge layer statistics are all good.
Local device:
[ {
"ifname": "eth1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 0,
"MACMergeFragCountRx": 0,
"MACMergeFragCountTx": 17837,
"MACMergeHoldCount": 18639
}
} ]
Remote device:
[ {
"ifname": "end1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 17189,
"MACMergeFragCountRx": 17837,
"MACMergeFragCountTx": 0,
"MACMergeHoldCount": 0
}
} ]
Tested on DWMAC CORE 5.10a
Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Link: https://patch.msgid.link/0d21ae356fb3cab77337527e87d46748a4852055.1725631883.git.0x1207@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-09-06 22:30:11 +08:00
|
|
|
static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv,
|
|
|
|
struct tc_taprio_qopt_offload *qopt)
|
|
|
|
{
|
|
|
|
if (!qopt->mqprio.preemptible_tcs)
|
|
|
|
return tc_setup_taprio(priv, qopt);
|
|
|
|
|
|
|
|
NL_SET_ERR_MSG_MOD(qopt->mqprio.extack,
|
|
|
|
"taprio with FPE is not implemented for this MAC");
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2020-01-13 17:24:10 +01:00
|
|
|
static int tc_setup_etf(struct stmmac_priv *priv,
|
|
|
|
struct tc_etf_qopt_offload *qopt)
|
|
|
|
{
|
|
|
|
if (!priv->dma_cap.tbssel)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
if (qopt->queue >= priv->plat->tx_queues_to_use)
|
|
|
|
return -EINVAL;
|
2022-07-23 16:29:31 +02:00
|
|
|
if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
|
2020-01-13 17:24:10 +01:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (qopt->enable)
|
2022-07-23 16:29:31 +02:00
|
|
|
priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
|
2020-01-13 17:24:10 +01:00
|
|
|
else
|
2022-07-23 16:29:31 +02:00
|
|
|
priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
|
2020-01-13 17:24:10 +01:00
|
|
|
|
|
|
|
netdev_info(priv->dev, "%s ETF for Queue %d\n",
|
|
|
|
qopt->enable ? "enabled" : "disabled", qopt->queue);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
net/sched: taprio: only pass gate mask per TXQ for igc, stmmac, tsnep, am65_cpsw
There are 2 classes of in-tree drivers currently:
- those who act upon struct tc_taprio_sched_entry :: gate_mask as if it
holds a bit mask of TXQs
- those who act upon the gate_mask as if it holds a bit mask of TCs
When it comes to the standard, IEEE 802.1Q-2018 does say this in the
second paragraph of section 8.6.8.4 Enhancements for scheduled traffic:
| A gate control list associated with each Port contains an ordered list
| of gate operations. Each gate operation changes the transmission gate
| state for the gate associated with each of the Port's traffic class
| queues and allows associated control operations to be scheduled.
In typically obtuse language, it refers to a "traffic class queue"
rather than a "traffic class" or a "queue". But careful reading of
802.1Q clarifies that "traffic class" and "queue" are in fact
synonymous (see 8.6.6 Queuing frames):
| A queue in this context is not necessarily a single FIFO data structure.
| A queue is a record of all frames of a given traffic class awaiting
| transmission on a given Bridge Port. The structure of this record is not
| specified.
i.o.w. their definition of "queue" isn't the Linux TX queue.
The gate_mask really is input into taprio via its UAPI as a mask of
traffic classes, but taprio_sched_to_offload() converts it into a TXQ
mask.
The breakdown of drivers which handle TC_SETUP_QDISC_TAPRIO is:
- hellcreek, felix, sja1105: these are DSA switches, it's not even very
clear what TXQs correspond to, other than purely software constructs.
Only the mqprio configuration with 8 TCs and 1 TXQ per TC makes sense.
So it's fine to convert these to a gate mask per TC.
- enetc: I have the hardware and can confirm that the gate mask is per
TC, and affects all TXQs (BD rings) configured for that priority.
- igc: in igc_save_qbv_schedule(), the gate_mask is clearly interpreted
to be per-TXQ.
- tsnep: Gerhard Engleder clarifies that even though this hardware
supports at most 1 TXQ per TC, the TXQ indices may be different from
the TC values themselves, and it is the TXQ indices that matter to
this hardware. So keep it per-TXQ as well.
- stmmac: I have a GMAC datasheet, and in the EST section it does
specify that the gate events are per TXQ rather than per TC.
- lan966x: again, this is a switch, and while not a DSA one, the way in
which it implements lan966x_mqprio_add() - by only allowing num_tc ==
NUM_PRIO_QUEUES (8) - makes it clear to me that TXQs are a purely
software construct here as well. They seem to map 1:1 with TCs.
- am65_cpsw: from looking at am65_cpsw_est_set_sched_cmds(), I get the
impression that the fetch_allow variable is treated like a prio_mask.
This definitely sounds closer to a per-TC gate mask rather than a
per-TXQ one, and TI documentation does seem to recomment an identity
mapping between TCs and TXQs. However, Roger Quadros would like to do
some testing before making changes, so I'm leaving this driver to
operate as it did before, for now. Link with more details at the end.
Based on this breakdown, we have 5 drivers with a gate mask per TC and
4 with a gate mask per TXQ. So let's make the gate mask per TXQ the
opt-in and the gate mask per TC the default.
Benefit from the TC_QUERY_CAPS feature that Jakub suggested we add, and
query the device driver before calling the proper ndo_setup_tc(), and
figure out if it expects one or the other format.
Link: https://patchwork.kernel.org/project/netdevbpf/patch/20230202003621.2679603-15-vladimir.oltean@nxp.com/#25193204
Cc: Horatiu Vultur <horatiu.vultur@microchip.com>
Cc: Siddharth Vadapalli <s-vadapalli@ti.com>
Cc: Roger Quadros <rogerq@kernel.org>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-04 15:53:04 +02:00
|
|
|
static int tc_query_caps(struct stmmac_priv *priv,
|
|
|
|
struct tc_query_caps_base *base)
|
|
|
|
{
|
|
|
|
switch (base->type) {
|
net: stmmac: support fp parameter of tc-mqprio
tc-mqprio can select whether traffic classes are express or preemptible.
After some traffic tests, MAC merge layer statistics are all good.
Local device:
ethtool --include-statistics --json --show-mm eth1
[ {
"ifname": "eth1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 0,
"MACMergeFragCountRx": 0,
"MACMergeFragCountTx": 35105,
"MACMergeHoldCount": 0
}
} ]
Remote device:
ethtool --include-statistics --json --show-mm end1
[ {
"ifname": "end1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 35105,
"MACMergeFragCountRx": 35105,
"MACMergeFragCountTx": 0,
"MACMergeHoldCount": 0
}
} ]
Tested on DWMAC CORE 5.10a
Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Link: https://patch.msgid.link/592965ea93ed8240f0a1b8f6f8ebb8914f69419b.1725631883.git.0x1207@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-09-06 22:30:10 +08:00
|
|
|
case TC_SETUP_QDISC_MQPRIO: {
|
|
|
|
struct tc_mqprio_caps *caps = base->caps;
|
|
|
|
|
|
|
|
caps->validate_queue_counts = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
net/sched: taprio: only pass gate mask per TXQ for igc, stmmac, tsnep, am65_cpsw
There are 2 classes of in-tree drivers currently:
- those who act upon struct tc_taprio_sched_entry :: gate_mask as if it
holds a bit mask of TXQs
- those who act upon the gate_mask as if it holds a bit mask of TCs
When it comes to the standard, IEEE 802.1Q-2018 does say this in the
second paragraph of section 8.6.8.4 Enhancements for scheduled traffic:
| A gate control list associated with each Port contains an ordered list
| of gate operations. Each gate operation changes the transmission gate
| state for the gate associated with each of the Port's traffic class
| queues and allows associated control operations to be scheduled.
In typically obtuse language, it refers to a "traffic class queue"
rather than a "traffic class" or a "queue". But careful reading of
802.1Q clarifies that "traffic class" and "queue" are in fact
synonymous (see 8.6.6 Queuing frames):
| A queue in this context is not necessarily a single FIFO data structure.
| A queue is a record of all frames of a given traffic class awaiting
| transmission on a given Bridge Port. The structure of this record is not
| specified.
i.o.w. their definition of "queue" isn't the Linux TX queue.
The gate_mask really is input into taprio via its UAPI as a mask of
traffic classes, but taprio_sched_to_offload() converts it into a TXQ
mask.
The breakdown of drivers which handle TC_SETUP_QDISC_TAPRIO is:
- hellcreek, felix, sja1105: these are DSA switches, it's not even very
clear what TXQs correspond to, other than purely software constructs.
Only the mqprio configuration with 8 TCs and 1 TXQ per TC makes sense.
So it's fine to convert these to a gate mask per TC.
- enetc: I have the hardware and can confirm that the gate mask is per
TC, and affects all TXQs (BD rings) configured for that priority.
- igc: in igc_save_qbv_schedule(), the gate_mask is clearly interpreted
to be per-TXQ.
- tsnep: Gerhard Engleder clarifies that even though this hardware
supports at most 1 TXQ per TC, the TXQ indices may be different from
the TC values themselves, and it is the TXQ indices that matter to
this hardware. So keep it per-TXQ as well.
- stmmac: I have a GMAC datasheet, and in the EST section it does
specify that the gate events are per TXQ rather than per TC.
- lan966x: again, this is a switch, and while not a DSA one, the way in
which it implements lan966x_mqprio_add() - by only allowing num_tc ==
NUM_PRIO_QUEUES (8) - makes it clear to me that TXQs are a purely
software construct here as well. They seem to map 1:1 with TCs.
- am65_cpsw: from looking at am65_cpsw_est_set_sched_cmds(), I get the
impression that the fetch_allow variable is treated like a prio_mask.
This definitely sounds closer to a per-TC gate mask rather than a
per-TXQ one, and TI documentation does seem to recomment an identity
mapping between TCs and TXQs. However, Roger Quadros would like to do
some testing before making changes, so I'm leaving this driver to
operate as it did before, for now. Link with more details at the end.
Based on this breakdown, we have 5 drivers with a gate mask per TC and
4 with a gate mask per TXQ. So let's make the gate mask per TXQ the
opt-in and the gate mask per TC the default.
Benefit from the TC_QUERY_CAPS feature that Jakub suggested we add, and
query the device driver before calling the proper ndo_setup_tc(), and
figure out if it expects one or the other format.
Link: https://patchwork.kernel.org/project/netdevbpf/patch/20230202003621.2679603-15-vladimir.oltean@nxp.com/#25193204
Cc: Horatiu Vultur <horatiu.vultur@microchip.com>
Cc: Siddharth Vadapalli <s-vadapalli@ti.com>
Cc: Roger Quadros <rogerq@kernel.org>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-04 15:53:04 +02:00
|
|
|
case TC_SETUP_QDISC_TAPRIO: {
|
|
|
|
struct tc_taprio_caps *caps = base->caps;
|
|
|
|
|
|
|
|
if (!priv->dma_cap.estsel)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
caps->gate_mask_per_txq = true;
|
2024-01-27 12:04:41 +08:00
|
|
|
caps->supports_queue_max_sdu = true;
|
net/sched: taprio: only pass gate mask per TXQ for igc, stmmac, tsnep, am65_cpsw
There are 2 classes of in-tree drivers currently:
- those who act upon struct tc_taprio_sched_entry :: gate_mask as if it
holds a bit mask of TXQs
- those who act upon the gate_mask as if it holds a bit mask of TCs
When it comes to the standard, IEEE 802.1Q-2018 does say this in the
second paragraph of section 8.6.8.4 Enhancements for scheduled traffic:
| A gate control list associated with each Port contains an ordered list
| of gate operations. Each gate operation changes the transmission gate
| state for the gate associated with each of the Port's traffic class
| queues and allows associated control operations to be scheduled.
In typically obtuse language, it refers to a "traffic class queue"
rather than a "traffic class" or a "queue". But careful reading of
802.1Q clarifies that "traffic class" and "queue" are in fact
synonymous (see 8.6.6 Queuing frames):
| A queue in this context is not necessarily a single FIFO data structure.
| A queue is a record of all frames of a given traffic class awaiting
| transmission on a given Bridge Port. The structure of this record is not
| specified.
i.o.w. their definition of "queue" isn't the Linux TX queue.
The gate_mask really is input into taprio via its UAPI as a mask of
traffic classes, but taprio_sched_to_offload() converts it into a TXQ
mask.
The breakdown of drivers which handle TC_SETUP_QDISC_TAPRIO is:
- hellcreek, felix, sja1105: these are DSA switches, it's not even very
clear what TXQs correspond to, other than purely software constructs.
Only the mqprio configuration with 8 TCs and 1 TXQ per TC makes sense.
So it's fine to convert these to a gate mask per TC.
- enetc: I have the hardware and can confirm that the gate mask is per
TC, and affects all TXQs (BD rings) configured for that priority.
- igc: in igc_save_qbv_schedule(), the gate_mask is clearly interpreted
to be per-TXQ.
- tsnep: Gerhard Engleder clarifies that even though this hardware
supports at most 1 TXQ per TC, the TXQ indices may be different from
the TC values themselves, and it is the TXQ indices that matter to
this hardware. So keep it per-TXQ as well.
- stmmac: I have a GMAC datasheet, and in the EST section it does
specify that the gate events are per TXQ rather than per TC.
- lan966x: again, this is a switch, and while not a DSA one, the way in
which it implements lan966x_mqprio_add() - by only allowing num_tc ==
NUM_PRIO_QUEUES (8) - makes it clear to me that TXQs are a purely
software construct here as well. They seem to map 1:1 with TCs.
- am65_cpsw: from looking at am65_cpsw_est_set_sched_cmds(), I get the
impression that the fetch_allow variable is treated like a prio_mask.
This definitely sounds closer to a per-TC gate mask rather than a
per-TXQ one, and TI documentation does seem to recomment an identity
mapping between TCs and TXQs. However, Roger Quadros would like to do
some testing before making changes, so I'm leaving this driver to
operate as it did before, for now. Link with more details at the end.
Based on this breakdown, we have 5 drivers with a gate mask per TC and
4 with a gate mask per TXQ. So let's make the gate mask per TXQ the
opt-in and the gate mask per TC the default.
Benefit from the TC_QUERY_CAPS feature that Jakub suggested we add, and
query the device driver before calling the proper ndo_setup_tc(), and
figure out if it expects one or the other format.
Link: https://patchwork.kernel.org/project/netdevbpf/patch/20230202003621.2679603-15-vladimir.oltean@nxp.com/#25193204
Cc: Horatiu Vultur <horatiu.vultur@microchip.com>
Cc: Siddharth Vadapalli <s-vadapalli@ti.com>
Cc: Roger Quadros <rogerq@kernel.org>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-04 15:53:04 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
net: stmmac: support fp parameter of tc-mqprio
tc-mqprio can select whether traffic classes are express or preemptible.
After some traffic tests, MAC merge layer statistics are all good.
Local device:
ethtool --include-statistics --json --show-mm eth1
[ {
"ifname": "eth1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 0,
"MACMergeFragCountRx": 0,
"MACMergeFragCountTx": 35105,
"MACMergeHoldCount": 0
}
} ]
Remote device:
ethtool --include-statistics --json --show-mm end1
[ {
"ifname": "end1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 35105,
"MACMergeFragCountRx": 35105,
"MACMergeFragCountTx": 0,
"MACMergeHoldCount": 0
}
} ]
Tested on DWMAC CORE 5.10a
Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Link: https://patch.msgid.link/592965ea93ed8240f0a1b8f6f8ebb8914f69419b.1725631883.git.0x1207@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-09-06 22:30:10 +08:00
|
|
|
static void stmmac_reset_tc_mqprio(struct net_device *ndev,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct stmmac_priv *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
netdev_reset_tc(ndev);
|
|
|
|
netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
|
|
|
|
stmmac_fpe_map_preemption_class(priv, ndev, extack, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv,
|
|
|
|
struct tc_mqprio_qopt_offload *mqprio)
|
|
|
|
{
|
|
|
|
struct netlink_ext_ack *extack = mqprio->extack;
|
|
|
|
struct tc_mqprio_qopt *qopt = &mqprio->qopt;
|
|
|
|
u32 offset, count, num_stack_tx_queues = 0;
|
|
|
|
struct net_device *ndev = priv->dev;
|
|
|
|
u32 num_tc = qopt->num_tc;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!num_tc) {
|
|
|
|
stmmac_reset_tc_mqprio(ndev, extack);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = netdev_set_num_tc(ndev, num_tc);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
for (u32 tc = 0; tc < num_tc; tc++) {
|
|
|
|
offset = qopt->offset[tc];
|
|
|
|
count = qopt->count[tc];
|
|
|
|
num_stack_tx_queues += count;
|
|
|
|
|
|
|
|
err = netdev_set_tc_queue(ndev, tc, count, offset);
|
|
|
|
if (err)
|
|
|
|
goto err_reset_tc;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
|
|
|
|
if (err)
|
|
|
|
goto err_reset_tc;
|
|
|
|
|
|
|
|
err = stmmac_fpe_map_preemption_class(priv, ndev, extack,
|
|
|
|
mqprio->preemptible_tcs);
|
|
|
|
if (err)
|
|
|
|
goto err_reset_tc;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_reset_tc:
|
|
|
|
stmmac_reset_tc_mqprio(ndev, extack);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv,
|
|
|
|
struct tc_mqprio_qopt_offload *mqprio)
|
|
|
|
{
|
|
|
|
NL_SET_ERR_MSG_MOD(mqprio->extack,
|
|
|
|
"mqprio HW offload is not implemented for this MAC");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct stmmac_tc_ops dwmac4_tc_ops = {
|
|
|
|
.init = tc_init,
|
|
|
|
.setup_cls_u32 = tc_setup_cls_u32,
|
|
|
|
.setup_cbs = tc_setup_cbs,
|
|
|
|
.setup_cls = tc_setup_cls,
|
net: stmmac: support fp parameter of tc-taprio
tc-taprio can select whether traffic classes are express or preemptible.
0) tc qdisc add dev eth1 parent root handle 100 taprio \
num_tc 4 \
map 0 1 2 3 2 2 2 2 2 2 2 2 2 2 2 3 \
queues 1@0 1@1 1@2 1@3 \
base-time 1000000000 \
sched-entry S 03 10000000 \
sched-entry S 0e 10000000 \
flags 0x2 fp P E E E
1) After some traffic tests, MAC merge layer statistics are all good.
Local device:
[ {
"ifname": "eth1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 0,
"MACMergeFragCountRx": 0,
"MACMergeFragCountTx": 17837,
"MACMergeHoldCount": 18639
}
} ]
Remote device:
[ {
"ifname": "end1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 17189,
"MACMergeFragCountRx": 17837,
"MACMergeFragCountTx": 0,
"MACMergeHoldCount": 0
}
} ]
Tested on DWMAC CORE 5.10a
Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Link: https://patch.msgid.link/0d21ae356fb3cab77337527e87d46748a4852055.1725631883.git.0x1207@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-09-06 22:30:11 +08:00
|
|
|
.setup_taprio = tc_setup_taprio_without_fpe,
|
net: stmmac: support fp parameter of tc-mqprio
tc-mqprio can select whether traffic classes are express or preemptible.
After some traffic tests, MAC merge layer statistics are all good.
Local device:
ethtool --include-statistics --json --show-mm eth1
[ {
"ifname": "eth1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 0,
"MACMergeFragCountRx": 0,
"MACMergeFragCountTx": 35105,
"MACMergeHoldCount": 0
}
} ]
Remote device:
ethtool --include-statistics --json --show-mm end1
[ {
"ifname": "end1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 35105,
"MACMergeFragCountRx": 35105,
"MACMergeFragCountTx": 0,
"MACMergeHoldCount": 0
}
} ]
Tested on DWMAC CORE 5.10a
Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Link: https://patch.msgid.link/592965ea93ed8240f0a1b8f6f8ebb8914f69419b.1725631883.git.0x1207@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-09-06 22:30:10 +08:00
|
|
|
.setup_etf = tc_setup_etf,
|
|
|
|
.query_caps = tc_query_caps,
|
|
|
|
.setup_mqprio = tc_setup_mqprio_unimplemented,
|
|
|
|
};
|
|
|
|
|
2018-05-04 10:01:38 +01:00
|
|
|
const struct stmmac_tc_ops dwmac510_tc_ops = {
|
|
|
|
.init = tc_init,
|
|
|
|
.setup_cls_u32 = tc_setup_cls_u32,
|
2018-06-27 15:57:02 +01:00
|
|
|
.setup_cbs = tc_setup_cbs,
|
2019-09-04 15:16:56 +02:00
|
|
|
.setup_cls = tc_setup_cls,
|
2019-12-18 11:33:07 +01:00
|
|
|
.setup_taprio = tc_setup_taprio,
|
2020-01-13 17:24:10 +01:00
|
|
|
.setup_etf = tc_setup_etf,
|
net/sched: taprio: only pass gate mask per TXQ for igc, stmmac, tsnep, am65_cpsw
There are 2 classes of in-tree drivers currently:
- those who act upon struct tc_taprio_sched_entry :: gate_mask as if it
holds a bit mask of TXQs
- those who act upon the gate_mask as if it holds a bit mask of TCs
When it comes to the standard, IEEE 802.1Q-2018 does say this in the
second paragraph of section 8.6.8.4 Enhancements for scheduled traffic:
| A gate control list associated with each Port contains an ordered list
| of gate operations. Each gate operation changes the transmission gate
| state for the gate associated with each of the Port's traffic class
| queues and allows associated control operations to be scheduled.
In typically obtuse language, it refers to a "traffic class queue"
rather than a "traffic class" or a "queue". But careful reading of
802.1Q clarifies that "traffic class" and "queue" are in fact
synonymous (see 8.6.6 Queuing frames):
| A queue in this context is not necessarily a single FIFO data structure.
| A queue is a record of all frames of a given traffic class awaiting
| transmission on a given Bridge Port. The structure of this record is not
| specified.
i.o.w. their definition of "queue" isn't the Linux TX queue.
The gate_mask really is input into taprio via its UAPI as a mask of
traffic classes, but taprio_sched_to_offload() converts it into a TXQ
mask.
The breakdown of drivers which handle TC_SETUP_QDISC_TAPRIO is:
- hellcreek, felix, sja1105: these are DSA switches, it's not even very
clear what TXQs correspond to, other than purely software constructs.
Only the mqprio configuration with 8 TCs and 1 TXQ per TC makes sense.
So it's fine to convert these to a gate mask per TC.
- enetc: I have the hardware and can confirm that the gate mask is per
TC, and affects all TXQs (BD rings) configured for that priority.
- igc: in igc_save_qbv_schedule(), the gate_mask is clearly interpreted
to be per-TXQ.
- tsnep: Gerhard Engleder clarifies that even though this hardware
supports at most 1 TXQ per TC, the TXQ indices may be different from
the TC values themselves, and it is the TXQ indices that matter to
this hardware. So keep it per-TXQ as well.
- stmmac: I have a GMAC datasheet, and in the EST section it does
specify that the gate events are per TXQ rather than per TC.
- lan966x: again, this is a switch, and while not a DSA one, the way in
which it implements lan966x_mqprio_add() - by only allowing num_tc ==
NUM_PRIO_QUEUES (8) - makes it clear to me that TXQs are a purely
software construct here as well. They seem to map 1:1 with TCs.
- am65_cpsw: from looking at am65_cpsw_est_set_sched_cmds(), I get the
impression that the fetch_allow variable is treated like a prio_mask.
This definitely sounds closer to a per-TC gate mask rather than a
per-TXQ one, and TI documentation does seem to recomment an identity
mapping between TCs and TXQs. However, Roger Quadros would like to do
some testing before making changes, so I'm leaving this driver to
operate as it did before, for now. Link with more details at the end.
Based on this breakdown, we have 5 drivers with a gate mask per TC and
4 with a gate mask per TXQ. So let's make the gate mask per TXQ the
opt-in and the gate mask per TC the default.
Benefit from the TC_QUERY_CAPS feature that Jakub suggested we add, and
query the device driver before calling the proper ndo_setup_tc(), and
figure out if it expects one or the other format.
Link: https://patchwork.kernel.org/project/netdevbpf/patch/20230202003621.2679603-15-vladimir.oltean@nxp.com/#25193204
Cc: Horatiu Vultur <horatiu.vultur@microchip.com>
Cc: Siddharth Vadapalli <s-vadapalli@ti.com>
Cc: Roger Quadros <rogerq@kernel.org>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-04 15:53:04 +02:00
|
|
|
.query_caps = tc_query_caps,
|
net: stmmac: support fp parameter of tc-mqprio
tc-mqprio can select whether traffic classes are express or preemptible.
After some traffic tests, MAC merge layer statistics are all good.
Local device:
ethtool --include-statistics --json --show-mm eth1
[ {
"ifname": "eth1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 0,
"MACMergeFragCountRx": 0,
"MACMergeFragCountTx": 35105,
"MACMergeHoldCount": 0
}
} ]
Remote device:
ethtool --include-statistics --json --show-mm end1
[ {
"ifname": "end1",
"pmac-enabled": true,
"tx-enabled": true,
"tx-active": true,
"tx-min-frag-size": 60,
"rx-min-frag-size": 60,
"verify-enabled": true,
"verify-time": 100,
"max-verify-time": 128,
"verify-status": "SUCCEEDED",
"statistics": {
"MACMergeFrameAssErrorCount": 0,
"MACMergeFrameSmdErrorCount": 0,
"MACMergeFrameAssOkCount": 35105,
"MACMergeFragCountRx": 35105,
"MACMergeFragCountTx": 0,
"MACMergeHoldCount": 0
}
} ]
Tested on DWMAC CORE 5.10a
Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Link: https://patch.msgid.link/592965ea93ed8240f0a1b8f6f8ebb8914f69419b.1725631883.git.0x1207@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-09-06 22:30:10 +08:00
|
|
|
.setup_mqprio = tc_setup_dwmac510_mqprio,
|
|
|
|
};
|