linux/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
Jiawen Wu e37546ad1f net: wangxun: revert the adjustment of the IRQ vector sequence
Due to hardware limitations of NGBE, queue IRQs can only be requested
on vector 0 to 7. When the number of queues is set to the maximum 8,
the PCI IRQ vectors are allocated from 0 to 8. The vector 0 is used by
MISC interrupt, and althrough the vector 8 is used by queue interrupt,
it is unable to receive packets. This will cause some packets to be
dropped when RSS is enabled and they are assigned to queue 8.

So revert the adjustment of the MISC IRQ location, to make it be the
last one in IRQ vectors.

Fixes: 937d46ecc5 ("net: wangxun: add ethtool_ops for channel number")
Cc: stable@vger.kernel.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
Link: https://patch.msgid.link/20250701063030.59340-3-jiawenwu@trustnetic.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-07-03 11:51:40 +02:00

274 lines
6.5 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
#include <linux/irqdomain.h>
#include <linux/pci.h>
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
#include "txgbe_irq.h"
#include "txgbe_aml.h"
/**
* txgbe_irq_enable - Enable default interrupt generation settings
* @wx: pointer to private structure
* @queues: enable irqs for queues
**/
void txgbe_irq_enable(struct wx *wx, bool queues)
{
u32 misc_ien = TXGBE_PX_MISC_IEN_MASK;
if (wx->mac.type == wx_mac_aml) {
misc_ien |= TXGBE_PX_MISC_GPIO;
txgbe_gpio_init_aml(wx);
}
wr32(wx, WX_PX_MISC_IEN, misc_ien);
/* unmask interrupt */
wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
if (queues)
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
}
/**
* txgbe_request_queue_irqs - Initialize MSI-X queue interrupts
* @wx: board private structure
*
* Allocate MSI-X queue vectors and request interrupts from the kernel.
**/
int txgbe_request_queue_irqs(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
int vector, err;
if (!wx->pdev->msix_enabled)
return 0;
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
struct msix_entry *entry = &wx->msix_q_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring)
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-TxRx-%d", netdev->name, entry->entry);
else
/* skip this unused q_vector */
continue;
err = request_irq(entry->vector, wx_msix_clean_rings, 0,
q_vector->name, q_vector);
if (err) {
wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
q_vector->name, err);
goto free_queue_irqs;
}
}
return 0;
free_queue_irqs:
while (vector) {
vector--;
free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
return err;
}
static int txgbe_request_link_irq(struct txgbe *txgbe)
{
txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
return request_threaded_irq(txgbe->link_irq, NULL,
txgbe_link_irq_handler,
IRQF_ONESHOT, "txgbe-link-irq", txgbe);
}
static int txgbe_request_gpio_irq(struct txgbe *txgbe)
{
txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
return request_threaded_irq(txgbe->gpio_irq, NULL,
txgbe_gpio_irq_handler_aml,
IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
}
static const struct irq_chip txgbe_irq_chip = {
.name = "txgbe-misc-irq",
};
static int txgbe_misc_irq_domain_map(struct irq_domain *d,
unsigned int irq,
irq_hw_number_t hwirq)
{
struct txgbe *txgbe = d->host_data;
irq_set_chip_data(irq, txgbe);
irq_set_chip(irq, &txgbe->misc.chip);
irq_set_nested_thread(irq, true);
irq_set_noprobe(irq);
return 0;
}
static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
.map = txgbe_misc_irq_domain_map,
};
static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
{
struct wx_q_vector *q_vector;
struct txgbe *txgbe = data;
struct wx *wx = txgbe->wx;
u32 eicr;
if (wx->pdev->msix_enabled) {
eicr = wx_misc_isb(wx, WX_ISB_MISC);
txgbe->eicr = eicr;
if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
wx_msg_task(txgbe->wx);
wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
}
return IRQ_WAKE_THREAD;
}
eicr = wx_misc_isb(wx, WX_ISB_VEC0);
if (!eicr) {
/* shared interrupt alert!
* the interrupt that we masked before the ICR read.
*/
if (netif_running(wx->netdev))
txgbe_irq_enable(wx, true);
return IRQ_NONE; /* Not our interrupt */
}
wx->isb_mem[WX_ISB_VEC0] = 0;
if (!(wx->pdev->msi_enabled))
wr32(wx, WX_PX_INTA, 1);
/* would disable interrupts here but it is auto disabled */
q_vector = wx->q_vector[0];
napi_schedule_irqoff(&q_vector->napi);
txgbe->eicr = wx_misc_isb(wx, WX_ISB_MISC);
return IRQ_WAKE_THREAD;
}
static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
{
struct txgbe *txgbe = data;
struct wx *wx = txgbe->wx;
unsigned int nhandled = 0;
unsigned int sub_irq;
u32 eicr;
eicr = txgbe->eicr;
if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
TXGBE_PX_MISC_ETH_AN)) {
sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
handle_nested_irq(sub_irq);
nhandled++;
}
if (eicr & TXGBE_PX_MISC_GPIO) {
sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
handle_nested_irq(sub_irq);
nhandled++;
}
if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) {
wx_ptp_check_pps_event(wx);
nhandled++;
}
wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
static void txgbe_del_irq_domain(struct txgbe *txgbe)
{
int hwirq, virq;
for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++) {
virq = irq_find_mapping(txgbe->misc.domain, hwirq);
irq_dispose_mapping(virq);
}
irq_domain_remove(txgbe->misc.domain);
}
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
if (txgbe->wx->mac.type == wx_mac_aml40)
return;
if (txgbe->wx->mac.type == wx_mac_aml)
free_irq(txgbe->gpio_irq, txgbe);
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
txgbe->wx->misc_irq_domain = false;
}
int txgbe_setup_misc_irq(struct txgbe *txgbe)
{
unsigned long flags = IRQF_ONESHOT;
struct wx *wx = txgbe->wx;
int hwirq, err;
if (wx->mac.type == wx_mac_aml40)
goto skip_sp_irq;
txgbe->misc.nirqs = TXGBE_IRQ_MAX;
txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0,
&txgbe_misc_irq_domain_ops, txgbe);
if (!txgbe->misc.domain)
return -ENOMEM;
for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++)
irq_create_mapping(txgbe->misc.domain, hwirq);
txgbe->misc.chip = txgbe_irq_chip;
if (wx->pdev->msix_enabled) {
txgbe->misc.irq = wx->msix_entry->vector;
} else {
txgbe->misc.irq = wx->pdev->irq;
if (!wx->pdev->msi_enabled)
flags |= IRQF_SHARED;
}
err = request_threaded_irq(txgbe->misc.irq, txgbe_misc_irq_handle,
txgbe_misc_irq_thread_fn,
flags,
wx->netdev->name, txgbe);
if (err)
goto del_misc_irq;
err = txgbe_request_link_irq(txgbe);
if (err)
goto free_msic_irq;
if (wx->mac.type == wx_mac_sp)
goto skip_sp_irq;
err = txgbe_request_gpio_irq(txgbe);
if (err)
goto free_link_irq;
skip_sp_irq:
wx->misc_irq_domain = true;
return 0;
free_link_irq:
free_irq(txgbe->link_irq, txgbe);
free_msic_irq:
free_irq(txgbe->misc.irq, txgbe);
del_misc_irq:
txgbe_del_irq_domain(txgbe);
return err;
}