linux/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c

552 lines
17 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
can: mcp251xfd: mcp251xfd_ring_alloc(): fix coalescing configuration when switching CAN modes Since commit 50ea5449c563 ("can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode"), the current ring and coalescing configuration is passed to can_ram_get_layout(). That fixed the issue when switching between CAN-CC and CAN-FD mode with configured ring (rx, tx) and/or coalescing parameters (rx-frames-irq, tx-frames-irq). However 50ea5449c563 ("can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode"), introduced a regression when switching CAN modes with disabled coalescing configuration: Even if the previous CAN mode has no coalescing configured, the new mode is configured with active coalescing. This leads to delayed receiving of CAN-FD frames. This comes from the fact, that ethtool uses usecs = 0 and max_frames = 1 to disable coalescing, however the driver uses internally priv->{rx,tx}_obj_num_coalesce_irq = 0 to indicate disabled coalescing. Fix the regression by assigning struct ethtool_coalesce ec->{rx,tx}_max_coalesced_frames_irq = 1 if coalescing is disabled in the driver as can_ram_get_layout() expects this. Reported-by: https://github.com/vdh-robothania Closes: https://github.com/raspberrypi/linux/issues/6407 Fixes: 50ea5449c563 ("can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode") Cc: stable@vger.kernel.org Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/20241025-mcp251xfd-fix-coalesing-v1-1-9d11416de1df@pengutronix.de Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
2024-10-25 14:34:40 +02:00
// Copyright (c) 2019, 2020, 2021, 2024 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
//
// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
//
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
#include <linux/unaligned.h>
#include "mcp251xfd.h"
#include "mcp251xfd-ram.h"
static inline u8
mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
union mcp251xfd_write_reg_buf *write_reg_buf,
const u16 reg, const u32 mask, const u32 val)
{
u8 first_byte, last_byte, len;
u8 *data;
__le32 val_le32;
first_byte = mcp251xfd_first_byte_set(mask);
last_byte = mcp251xfd_last_byte_set(mask);
len = last_byte - first_byte + 1;
data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len);
val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
memcpy(data, &val_le32, len);
if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) {
len += sizeof(write_reg_buf->nocrc.cmd);
} else if (len == 1) {
u16 crc;
/* CRC */
len += sizeof(write_reg_buf->safe.cmd);
crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len);
put_unaligned_be16(crc, (void *)write_reg_buf + len);
/* Total length */
len += sizeof(write_reg_buf->safe.crc);
} else {
u16 crc;
mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
len);
/* CRC */
len += sizeof(write_reg_buf->crc.cmd);
crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
put_unaligned_be16(crc, (void *)write_reg_buf + len);
/* Total length */
len += sizeof(write_reg_buf->crc.crc);
}
return len;
}
static void
mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
{
struct mcp251xfd_tef_ring *tef_ring;
struct spi_transfer *xfer;
u32 val;
u16 addr;
u8 len;
int i;
/* TEF */
tef_ring = priv->tef;
tef_ring->head = 0;
tef_ring->tail = 0;
/* TEF- and TX-FIFO have same number of objects */
*base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
/* FIFO IRQ enable */
addr = MCP251XFD_REG_TEFCON;
val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
addr, val, val);
tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
tef_ring->irq_enable_xfer.len = len;
spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
&tef_ring->irq_enable_xfer, 1);
/* FIFO increment TEF tail pointer */
addr = MCP251XFD_REG_TEFCON;
val = MCP251XFD_REG_TEFCON_UINC;
len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
addr, val, val);
for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
xfer = &tef_ring->uinc_xfer[i];
xfer->tx_buf = &tef_ring->uinc_buf;
xfer->len = len;
xfer->cs_change = 1;
xfer->cs_change_delay.value = 0;
xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
}
/* "cs_change == 1" on the last transfer results in an active
* chip select after the complete SPI message. This causes the
* controller to interpret the next register access as
* data. Set "cs_change" of the last transfer to "0" to
* properly deactivate the chip select at the end of the
* message.
*/
xfer->cs_change = 0;
if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
val = MCP251XFD_REG_TEFCON_UINC |
MCP251XFD_REG_TEFCON_TEFOVIE |
MCP251XFD_REG_TEFCON_TEFHIE;
len = mcp251xfd_cmd_prepare_write_reg(priv,
&tef_ring->uinc_irq_disable_buf,
addr, val, val);
xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
xfer->len = len;
}
}
static void
mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
const struct mcp251xfd_tx_ring *ring,
struct mcp251xfd_tx_obj *tx_obj,
const u8 rts_buf_len,
const u8 n)
{
struct spi_transfer *xfer;
u16 addr;
/* FIFO load */
addr = mcp251xfd_get_tx_obj_addr(ring, n);
if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
addr);
else
mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
addr);
xfer = &tx_obj->xfer[0];
xfer->tx_buf = &tx_obj->buf;
xfer->len = 0; /* actual len is assigned on the fly */
xfer->cs_change = 1;
xfer->cs_change_delay.value = 0;
xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
/* FIFO request to send */
xfer = &tx_obj->xfer[1];
xfer->tx_buf = &ring->rts_buf;
xfer->len = rts_buf_len;
/* SPI message */
spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
ARRAY_SIZE(tx_obj->xfer));
}
static void
mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
{
struct mcp251xfd_tx_ring *tx_ring;
struct mcp251xfd_tx_obj *tx_obj;
u32 val;
u16 addr;
u8 len;
int i;
tx_ring = priv->tx;
tx_ring->head = 0;
tx_ring->tail = 0;
tx_ring->base = *base;
tx_ring->nr = 0;
tx_ring->fifo_nr = *fifo_nr;
*base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
*fifo_nr += 1;
/* FIFO request to send */
addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
addr, val, val);
mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
}
static void
mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
{
struct mcp251xfd_rx_ring *rx_ring;
struct spi_transfer *xfer;
u32 val;
u16 addr;
u8 len;
int i, j;
mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
can: mcp251xfd: rx: add workaround for erratum DS80000789E 6 of mcp2518fd This patch tries to works around erratum DS80000789E 6 of the mcp2518fd, the other variants of the chip family (mcp2517fd and mcp251863) are probably also affected. In the bad case, the driver reads a too large head index. In the original code, the driver always trusted the read value, which caused old, already processed CAN frames or new, incompletely written CAN frames to be (re-)processed. To work around this issue, keep a per FIFO timestamp [1] of the last valid received CAN frame and compare against the timestamp of every received CAN frame. If an old CAN frame is detected, abort the iteration and mark the number of valid CAN frames as processed in the chip by incrementing the FIFO's tail index. Further tests showed that this workaround can recognize old CAN frames, but a small time window remains in which partially written CAN frames [2] are not recognized but then processed. These CAN frames have the correct data and time stamps, but the DLC has not yet been updated. [1] As the raw timestamp overflows every 107 seconds (at the usual clock rate of 40 MHz) convert it to nanoseconds with the timecounter framework and use this to detect stale CAN frames. Link: https://lore.kernel.org/all/BL3PR11MB64844C1C95CA3BDADAE4D8CCFBC99@BL3PR11MB6484.namprd11.prod.outlook.com [2] Reported-by: Stefan Althöfer <Stefan.Althoefer@janztec.com> Closes: https://lore.kernel.org/all/FR0P281MB1966273C216630B120ABB6E197E89@FR0P281MB1966.DEUP281.PROD.OUTLOOK.COM Tested-by: Stefan Althöfer <Stefan.Althoefer@janztec.com> Tested-by: Thomas Kopp <thomas.kopp@microchip.com> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
2023-01-11 11:53:50 +01:00
rx_ring->last_valid = timecounter_read(&priv->tc);
rx_ring->head = 0;
rx_ring->tail = 0;
rx_ring->base = *base;
rx_ring->nr = i;
rx_ring->fifo_nr = *fifo_nr;
*base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
*fifo_nr += 1;
/* FIFO IRQ enable */
addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
val = MCP251XFD_REG_FIFOCON_RXOVIE |
MCP251XFD_REG_FIFOCON_TFNRFNIE;
len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
addr, val, val);
rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
rx_ring->irq_enable_xfer.len = len;
spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
&rx_ring->irq_enable_xfer, 1);
/* FIFO increment RX tail pointer */
val = MCP251XFD_REG_FIFOCON_UINC;
len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
addr, val, val);
for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
xfer = &rx_ring->uinc_xfer[j];
xfer->tx_buf = &rx_ring->uinc_buf;
xfer->len = len;
xfer->cs_change = 1;
xfer->cs_change_delay.value = 0;
xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
}
/* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
* message. This causes the controller to interpret
* the next register access as data. Set "cs_change"
* of the last transfer to "0" to properly deactivate
* the chip select at the end of the message.
*/
xfer->cs_change = 0;
/* Use 1st RX-FIFO for IRQ coalescing. If enabled
* (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
* is activated), use the last transfer to disable:
*
* - TFNRFNIE (Receive FIFO Not Empty Interrupt)
*
* and enable:
*
* - TFHRFHIE (Receive FIFO Half Full Interrupt)
* - or -
* - TFERFFIE (Receive FIFO Full Interrupt)
*
* depending on rx_max_coalesce_frames_irq.
*
* The RXOVIE (Overflow Interrupt) is always enabled.
*/
if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
priv->rx_obj_num_coalesce_irq)) {
val = MCP251XFD_REG_FIFOCON_UINC |
MCP251XFD_REG_FIFOCON_RXOVIE;
if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
else if (priv->rx_obj_num_coalesce_irq)
val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
len = mcp251xfd_cmd_prepare_write_reg(priv,
&rx_ring->uinc_irq_disable_buf,
addr, val, val);
xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
xfer->len = len;
}
}
}
int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
{
const struct mcp251xfd_rx_ring *rx_ring;
u16 base = 0, ram_used;
u8 fifo_nr = 1;
int err = 0, i;
netdev_reset_queue(priv->ndev);
mcp251xfd_ring_init_tef(priv, &base);
mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
can: mcp251xfd: prepare for multiple RX-FIFOs This patch prepares the driver to use more than one RX-FIFO. Having a bigger RX buffer is beneficial in high load situations, where the system temporarily cannot keep up reading CAN frames from the chip. Using a bigger RX buffer also allows to implement RX IRQ coalescing, which will be added in a later patch series. If using more than 1 RX-FIFO the driver has to figure out, which FIFOs have RX'ed CAN frames pending. This is indicated by a set bit in the RXIF register, which is positioned directly after the interrupt status register INT. If more than 1 RX-FIFO is used, the driver reads both registers in 1 transfer. The mcp251xfd_handle_rxif() function iterates over all RX rings and reads out the RX'ed CAN frames for for all pending FIFOs. To keep the logic for the 1 RX-FIFO only case in mcp251xfd_handle_rxif() simple, the driver marks that FIFO pending in mcp251xfd_ring_init(). The chip has a dedicated RX interrupt line to signal pending RX'ed frames. If connected to an input GPIO and the driver will skip the initial read of the interrupt status register (INT) and directly read the pending RX'ed frames if the line is active. The driver assumes the 1st RX-FIFO pending (a read of the RXIF register would re-introduce the skipped initial read of the INT register). Any other pending RX-FIFO will be served in the main interrupt handler. Link: https://lore.kernel.org/all/20220217103826.2299157-8-mkl@pengutronix.de Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
2021-10-10 21:39:59 +02:00
/* mcp251xfd_handle_rxif() will iterate over all RX rings.
* Rings with their corresponding bit set in
* priv->regs_status.rxif are read out.
*
* If the chip is configured for only 1 RX-FIFO, and if there
* is an RX interrupt pending (RXIF in INT register is set),
* it must be the 1st RX-FIFO.
*
* We mark the RXIF of the 1st FIFO as pending here, so that
* we can skip the read of the RXIF register in
* mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
*
* If we use more than 1 RX-FIFO, this value gets overwritten
* in mcp251xfd_read_regs_status(), so set it unconditionally
* here.
*/
priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
if (priv->tx_obj_num_coalesce_irq) {
netdev_dbg(priv->ndev,
"FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
mcp251xfd_get_tef_obj_addr(0),
priv->tx_obj_num_coalesce_irq,
sizeof(struct mcp251xfd_hw_tef_obj),
priv->tx_obj_num_coalesce_irq *
sizeof(struct mcp251xfd_hw_tef_obj));
netdev_dbg(priv->ndev,
" 0x%03x: %2d*%zu bytes = %4zu bytes\n",
mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
sizeof(struct mcp251xfd_hw_tef_obj),
(priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
sizeof(struct mcp251xfd_hw_tef_obj));
} else {
netdev_dbg(priv->ndev,
"FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n",
mcp251xfd_get_tef_obj_addr(0),
priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
}
mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
netdev_dbg(priv->ndev,
"FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
rx_ring->nr, rx_ring->fifo_nr,
mcp251xfd_get_rx_obj_addr(rx_ring, 0),
priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
continue;
netdev_dbg(priv->ndev,
" 0x%03x: %2u*%u bytes = %4u bytes\n",
mcp251xfd_get_rx_obj_addr(rx_ring,
priv->rx_obj_num_coalesce_irq),
rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
rx_ring->obj_size,
(rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
rx_ring->obj_size);
} else {
netdev_dbg(priv->ndev,
"FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
rx_ring->nr, rx_ring->fifo_nr,
mcp251xfd_get_rx_obj_addr(rx_ring, 0),
rx_ring->obj_num, rx_ring->obj_size,
rx_ring->obj_num * rx_ring->obj_size);
}
}
netdev_dbg(priv->ndev,
"FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
priv->tx->fifo_nr,
mcp251xfd_get_tx_obj_addr(priv->tx, 0),
priv->tx->obj_num, priv->tx->obj_size,
priv->tx->obj_num * priv->tx->obj_size);
netdev_dbg(priv->ndev,
"FIFO setup: free: %4d bytes\n",
MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
ram_used = base - MCP251XFD_RAM_START;
if (ram_used > MCP251XFD_RAM_SIZE) {
netdev_err(priv->ndev,
"Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
ram_used, MCP251XFD_RAM_SIZE);
err = -ENOMEM;
}
if (priv->tx_obj_num_coalesce_irq &&
priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) {
netdev_err(priv->ndev,
"Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n",
priv->tx_obj_num_coalesce_irq, priv->tx->obj_num);
err = -EINVAL;
}
return err;
}
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
{
int i;
for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
kfree(priv->rx[i]);
priv->rx[i] = NULL;
}
}
static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
{
struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
rx_irq_timer);
struct mcp251xfd_rx_ring *ring = priv->rx[0];
if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
return HRTIMER_NORESTART;
spi_async(priv->spi, &ring->irq_enable_msg);
return HRTIMER_NORESTART;
}
static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
{
struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
tx_irq_timer);
struct mcp251xfd_tef_ring *ring = priv->tef;
if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
return HRTIMER_NORESTART;
spi_async(priv->spi, &ring->irq_enable_msg);
return HRTIMER_NORESTART;
}
const struct can_ram_config mcp251xfd_ram_config = {
.rx = {
.size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
.size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
.min = MCP251XFD_RX_OBJ_NUM_MIN,
.max = MCP251XFD_RX_OBJ_NUM_MAX,
.def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
.def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
.fifo_num = MCP251XFD_FIFO_RX_NUM,
.fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
.fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
},
.tx = {
.size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
sizeof(struct mcp251xfd_hw_tx_obj_can),
.size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
sizeof(struct mcp251xfd_hw_tx_obj_canfd),
.min = MCP251XFD_TX_OBJ_NUM_MIN,
.max = MCP251XFD_TX_OBJ_NUM_MAX,
.def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
.def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
.fifo_num = MCP251XFD_FIFO_TX_NUM,
.fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
.fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
},
.size = MCP251XFD_RAM_SIZE,
.fifo_depth = MCP251XFD_FIFO_DEPTH,
};
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
{
const bool fd_mode = mcp251xfd_is_fd_mode(priv);
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct mcp251xfd_rx_ring *rx_ring;
u8 tx_obj_size, rx_obj_size;
u8 rem, i;
/* switching from CAN-2.0 to CAN-FD mode or vice versa */
if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
const struct ethtool_ringparam ring = {
.rx_pending = priv->rx_obj_num,
.tx_pending = priv->tx->obj_num,
};
const struct ethtool_coalesce ec = {
.rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
can: mcp251xfd: mcp251xfd_ring_alloc(): fix coalescing configuration when switching CAN modes Since commit 50ea5449c563 ("can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode"), the current ring and coalescing configuration is passed to can_ram_get_layout(). That fixed the issue when switching between CAN-CC and CAN-FD mode with configured ring (rx, tx) and/or coalescing parameters (rx-frames-irq, tx-frames-irq). However 50ea5449c563 ("can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode"), introduced a regression when switching CAN modes with disabled coalescing configuration: Even if the previous CAN mode has no coalescing configured, the new mode is configured with active coalescing. This leads to delayed receiving of CAN-FD frames. This comes from the fact, that ethtool uses usecs = 0 and max_frames = 1 to disable coalescing, however the driver uses internally priv->{rx,tx}_obj_num_coalesce_irq = 0 to indicate disabled coalescing. Fix the regression by assigning struct ethtool_coalesce ec->{rx,tx}_max_coalesced_frames_irq = 1 if coalescing is disabled in the driver as can_ram_get_layout() expects this. Reported-by: https://github.com/vdh-robothania Closes: https://github.com/raspberrypi/linux/issues/6407 Fixes: 50ea5449c563 ("can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode") Cc: stable@vger.kernel.org Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/20241025-mcp251xfd-fix-coalesing-v1-1-9d11416de1df@pengutronix.de Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
2024-10-25 14:34:40 +02:00
.rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq == 0 ?
1 : priv->rx_obj_num_coalesce_irq,
.tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
can: mcp251xfd: mcp251xfd_ring_alloc(): fix coalescing configuration when switching CAN modes Since commit 50ea5449c563 ("can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode"), the current ring and coalescing configuration is passed to can_ram_get_layout(). That fixed the issue when switching between CAN-CC and CAN-FD mode with configured ring (rx, tx) and/or coalescing parameters (rx-frames-irq, tx-frames-irq). However 50ea5449c563 ("can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode"), introduced a regression when switching CAN modes with disabled coalescing configuration: Even if the previous CAN mode has no coalescing configured, the new mode is configured with active coalescing. This leads to delayed receiving of CAN-FD frames. This comes from the fact, that ethtool uses usecs = 0 and max_frames = 1 to disable coalescing, however the driver uses internally priv->{rx,tx}_obj_num_coalesce_irq = 0 to indicate disabled coalescing. Fix the regression by assigning struct ethtool_coalesce ec->{rx,tx}_max_coalesced_frames_irq = 1 if coalescing is disabled in the driver as can_ram_get_layout() expects this. Reported-by: https://github.com/vdh-robothania Closes: https://github.com/raspberrypi/linux/issues/6407 Fixes: 50ea5449c563 ("can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode") Cc: stable@vger.kernel.org Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/20241025-mcp251xfd-fix-coalesing-v1-1-9d11416de1df@pengutronix.de Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
2024-10-25 14:34:40 +02:00
.tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq == 0 ?
1 : priv->tx_obj_num_coalesce_irq,
};
struct can_ram_layout layout;
can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
priv->rx_obj_num = layout.cur_rx;
priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
tx_ring->obj_num = layout.cur_tx;
priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
}
if (fd_mode) {
tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
} else {
tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
}
tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
ilog2(tx_ring->obj_num);
tx_ring->obj_size = tx_obj_size;
rem = priv->rx_obj_num;
for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
u8 rx_obj_num;
if (i == 0 && priv->rx_obj_num_coalesce_irq)
rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
MCP251XFD_FIFO_DEPTH);
else
rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
MCP251XFD_FIFO_DEPTH);
rem -= rx_obj_num;
rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
GFP_KERNEL);
if (!rx_ring) {
mcp251xfd_ring_free(priv);
return -ENOMEM;
}
rx_ring->obj_num = rx_obj_num;
can: mcp251xfd: rx: prepare to workaround broken RX FIFO head index erratum This is a preparatory patch to work around erratum DS80000789E 6 of the mcp2518fd, the other variants of the chip family (mcp2517fd and mcp251863) are probably also affected. When handling the RX interrupt, the driver iterates over all pending FIFOs (which are implemented as ring buffers in hardware) and reads the FIFO header index from the RX FIFO STA register of the chip. In the bad case, the driver reads a too large head index. In the original code, the driver always trusted the read value, which caused old CAN frames that were already processed, or new, incompletely written CAN frames to be (re-)processed. Instead of reading and trusting the head index, read the head index and calculate the number of CAN frames that were supposedly received - replace mcp251xfd_rx_ring_update() with mcp251xfd_get_rx_len(). The mcp251xfd_handle_rxif_ring() function reads the received CAN frames from the chip, iterates over them and pushes them into the network stack. Prepare that the iteration can be stopped if an old CAN frame is detected. The actual code to detect old or incomplete frames and abort will be added in the next patch. Link: https://lore.kernel.org/all/BL3PR11MB64844C1C95CA3BDADAE4D8CCFBC99@BL3PR11MB6484.namprd11.prod.outlook.com Reported-by: Stefan Althöfer <Stefan.Althoefer@janztec.com> Closes: https://lore.kernel.org/all/FR0P281MB1966273C216630B120ABB6E197E89@FR0P281MB1966.DEUP281.PROD.OUTLOOK.COM Tested-by: Stefan Althöfer <Stefan.Althoefer@janztec.com> Tested-by: Thomas Kopp <thomas.kopp@microchip.com> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
2023-01-11 21:07:03 +01:00
rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
ilog2(rx_obj_num);
rx_ring->obj_size = rx_obj_size;
priv->rx[i] = rx_ring;
}
priv->rx_ring_num = i;
hrtimer_setup(&priv->rx_irq_timer, mcp251xfd_rx_irq_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
hrtimer_setup(&priv->tx_irq_timer, mcp251xfd_tx_irq_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
return 0;
}