2020-05-27 16:21:28 +01:00
|
|
|
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
|
|
|
|
/* Copyright(c) 2014 - 2020 Intel Corporation */
|
2014-06-05 13:44:20 -07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/interrupt.h>
|
2015-12-04 16:56:28 -08:00
|
|
|
#include "adf_accel_devices.h"
|
|
|
|
#include "adf_common_drv.h"
|
|
|
|
#include "adf_cfg.h"
|
|
|
|
#include "adf_cfg_strings.h"
|
|
|
|
#include "adf_cfg_common.h"
|
|
|
|
#include "adf_transport_access_macros.h"
|
|
|
|
#include "adf_transport_internal.h"
|
2014-06-05 13:44:20 -07:00
|
|
|
|
2021-08-12 21:21:10 +01:00
|
|
|
#define ADF_MAX_NUM_VFS 32
|
2022-02-10 13:38:25 +00:00
|
|
|
static struct workqueue_struct *adf_misc_wq;
|
2021-08-12 21:21:10 +01:00
|
|
|
|
2014-06-05 13:44:20 -07:00
|
|
|
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
|
|
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
2021-09-01 18:36:05 +01:00
|
|
|
u32 msix_num_entries = hw_data->num_banks + 1;
|
|
|
|
int ret;
|
2014-06-05 13:44:20 -07:00
|
|
|
|
2020-11-13 16:46:42 +00:00
|
|
|
if (hw_data->set_msix_rttable)
|
|
|
|
hw_data->set_msix_rttable(accel_dev);
|
|
|
|
|
2021-09-01 18:36:05 +01:00
|
|
|
ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
|
|
|
|
msix_num_entries, PCI_IRQ_MSIX);
|
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
dev_err(&GET_DEV(accel_dev),
|
|
|
|
"Failed to allocate %d MSI-X vectors\n",
|
|
|
|
msix_num_entries);
|
|
|
|
return ret;
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
|
|
|
|
{
|
2021-09-01 18:36:05 +01:00
|
|
|
pci_free_irq_vectors(pci_dev_info->pci_dev);
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
|
|
|
|
{
|
|
|
|
struct adf_etr_bank_data *bank = bank_ptr;
|
2020-10-12 21:38:21 +01:00
|
|
|
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
|
2014-06-05 13:44:20 -07:00
|
|
|
|
2020-10-12 21:38:21 +01:00
|
|
|
csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
|
|
|
|
0);
|
2014-09-10 14:07:31 -07:00
|
|
|
tasklet_hi_schedule(&bank->resp_handler);
|
2014-06-05 13:44:20 -07:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2021-11-17 14:30:36 +00:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2021-11-17 14:30:37 +00:00
|
|
|
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
|
|
|
|
{
|
2021-12-16 09:13:16 +00:00
|
|
|
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
|
2021-11-17 14:30:37 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
|
2021-12-16 09:13:16 +00:00
|
|
|
GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
|
2021-11-17 14:30:37 +00:00
|
|
|
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
|
|
|
|
}
|
|
|
|
|
2022-04-07 17:54:53 +01:00
|
|
|
void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
|
2021-11-17 14:30:37 +00:00
|
|
|
{
|
2021-12-16 09:13:16 +00:00
|
|
|
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
|
2021-11-17 14:30:37 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
|
2022-04-07 17:54:53 +01:00
|
|
|
GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr);
|
2021-11-17 14:30:37 +00:00
|
|
|
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
|
|
|
|
}
|
|
|
|
|
crypto: qat - rework the VF2PF interrupt handling logic
Change the VF2PF interrupt handler in the PF ISR and the definition of
the internal PFVF API to correct the current implementation, which can
result in missed interrupts.
More specifically, current HW generations consider a write to the mask
register, regardless of the value, as an acknowledge of any pending
VF2PF interrupt. Therefore, if there is an interrupt between the source
register read and the mask register write, such interrupt will not be
delivered and silently acknowledged, resulting in a lost VF2PF message.
To work around the problem, rather than disabling specific interrupts,
disable all the interrupts and re-enable only the ones that we are not
serving (excluding the already disabled ones too). This will force any
other pending interrupt to be triggered and be serviced by a subsequent
ISR.
This new approach requires, however, changes to the interrupt related
pfvf_ops functions. In particular, get_vf2pf_sources() has now been
removed in favor of disable_pending_vf2pf_interrupts(), which not only
retrieves and returns the pending (and enabled) sources, but also
disables them.
As a consequence, introduce the adf_disable_pending_vf2pf_interrupts()
utility in place of adf_disable_vf2pf_interrupts_irq(), which is no
longer needed.
Cc: stable@vger.kernel.org
Fixes: 993161d ("crypto: qat - fix handling of VF to PF interrupts")
Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-04-07 17:54:51 +01:00
|
|
|
static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
|
2021-11-17 14:30:37 +00:00
|
|
|
{
|
2021-12-16 09:13:16 +00:00
|
|
|
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
|
crypto: qat - rework the VF2PF interrupt handling logic
Change the VF2PF interrupt handler in the PF ISR and the definition of
the internal PFVF API to correct the current implementation, which can
result in missed interrupts.
More specifically, current HW generations consider a write to the mask
register, regardless of the value, as an acknowledge of any pending
VF2PF interrupt. Therefore, if there is an interrupt between the source
register read and the mask register write, such interrupt will not be
delivered and silently acknowledged, resulting in a lost VF2PF message.
To work around the problem, rather than disabling specific interrupts,
disable all the interrupts and re-enable only the ones that we are not
serving (excluding the already disabled ones too). This will force any
other pending interrupt to be triggered and be serviced by a subsequent
ISR.
This new approach requires, however, changes to the interrupt related
pfvf_ops functions. In particular, get_vf2pf_sources() has now been
removed in favor of disable_pending_vf2pf_interrupts(), which not only
retrieves and returns the pending (and enabled) sources, but also
disables them.
As a consequence, introduce the adf_disable_pending_vf2pf_interrupts()
utility in place of adf_disable_vf2pf_interrupts_irq(), which is no
longer needed.
Cc: stable@vger.kernel.org
Fixes: 993161d ("crypto: qat - fix handling of VF to PF interrupts")
Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-04-07 17:54:51 +01:00
|
|
|
u32 pending;
|
2021-11-17 14:30:37 +00:00
|
|
|
|
|
|
|
spin_lock(&accel_dev->pf.vf2pf_ints_lock);
|
crypto: qat - rework the VF2PF interrupt handling logic
Change the VF2PF interrupt handler in the PF ISR and the definition of
the internal PFVF API to correct the current implementation, which can
result in missed interrupts.
More specifically, current HW generations consider a write to the mask
register, regardless of the value, as an acknowledge of any pending
VF2PF interrupt. Therefore, if there is an interrupt between the source
register read and the mask register write, such interrupt will not be
delivered and silently acknowledged, resulting in a lost VF2PF message.
To work around the problem, rather than disabling specific interrupts,
disable all the interrupts and re-enable only the ones that we are not
serving (excluding the already disabled ones too). This will force any
other pending interrupt to be triggered and be serviced by a subsequent
ISR.
This new approach requires, however, changes to the interrupt related
pfvf_ops functions. In particular, get_vf2pf_sources() has now been
removed in favor of disable_pending_vf2pf_interrupts(), which not only
retrieves and returns the pending (and enabled) sources, but also
disables them.
As a consequence, introduce the adf_disable_pending_vf2pf_interrupts()
utility in place of adf_disable_vf2pf_interrupts_irq(), which is no
longer needed.
Cc: stable@vger.kernel.org
Fixes: 993161d ("crypto: qat - fix handling of VF to PF interrupts")
Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-04-07 17:54:51 +01:00
|
|
|
pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
|
2021-11-17 14:30:37 +00:00
|
|
|
spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
|
crypto: qat - rework the VF2PF interrupt handling logic
Change the VF2PF interrupt handler in the PF ISR and the definition of
the internal PFVF API to correct the current implementation, which can
result in missed interrupts.
More specifically, current HW generations consider a write to the mask
register, regardless of the value, as an acknowledge of any pending
VF2PF interrupt. Therefore, if there is an interrupt between the source
register read and the mask register write, such interrupt will not be
delivered and silently acknowledged, resulting in a lost VF2PF message.
To work around the problem, rather than disabling specific interrupts,
disable all the interrupts and re-enable only the ones that we are not
serving (excluding the already disabled ones too). This will force any
other pending interrupt to be triggered and be serviced by a subsequent
ISR.
This new approach requires, however, changes to the interrupt related
pfvf_ops functions. In particular, get_vf2pf_sources() has now been
removed in favor of disable_pending_vf2pf_interrupts(), which not only
retrieves and returns the pending (and enabled) sources, but also
disables them.
As a consequence, introduce the adf_disable_pending_vf2pf_interrupts()
utility in place of adf_disable_vf2pf_interrupts_irq(), which is no
longer needed.
Cc: stable@vger.kernel.org
Fixes: 993161d ("crypto: qat - fix handling of VF to PF interrupts")
Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-04-07 17:54:51 +01:00
|
|
|
|
|
|
|
return pending;
|
2021-11-17 14:30:37 +00:00
|
|
|
}
|
|
|
|
|
2021-11-17 14:30:36 +00:00
|
|
|
static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
bool irq_handled = false;
|
|
|
|
unsigned long vf_mask;
|
|
|
|
|
crypto: qat - rework the VF2PF interrupt handling logic
Change the VF2PF interrupt handler in the PF ISR and the definition of
the internal PFVF API to correct the current implementation, which can
result in missed interrupts.
More specifically, current HW generations consider a write to the mask
register, regardless of the value, as an acknowledge of any pending
VF2PF interrupt. Therefore, if there is an interrupt between the source
register read and the mask register write, such interrupt will not be
delivered and silently acknowledged, resulting in a lost VF2PF message.
To work around the problem, rather than disabling specific interrupts,
disable all the interrupts and re-enable only the ones that we are not
serving (excluding the already disabled ones too). This will force any
other pending interrupt to be triggered and be serviced by a subsequent
ISR.
This new approach requires, however, changes to the interrupt related
pfvf_ops functions. In particular, get_vf2pf_sources() has now been
removed in favor of disable_pending_vf2pf_interrupts(), which not only
retrieves and returns the pending (and enabled) sources, but also
disables them.
As a consequence, introduce the adf_disable_pending_vf2pf_interrupts()
utility in place of adf_disable_vf2pf_interrupts_irq(), which is no
longer needed.
Cc: stable@vger.kernel.org
Fixes: 993161d ("crypto: qat - fix handling of VF to PF interrupts")
Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-04-07 17:54:51 +01:00
|
|
|
/* Get the interrupt sources triggered by VFs, except for those already disabled */
|
|
|
|
vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
|
2021-11-17 14:30:36 +00:00
|
|
|
if (vf_mask) {
|
|
|
|
struct adf_accel_vf_info *vf_info;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle VF2PF interrupt unless the VF is malicious and
|
|
|
|
* is attempting to flood the host OS with VF2PF interrupts.
|
|
|
|
*/
|
|
|
|
for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
|
|
|
|
vf_info = accel_dev->pf.vf_info + i;
|
|
|
|
|
|
|
|
if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
|
|
|
|
dev_info(&GET_DEV(accel_dev),
|
|
|
|
"Too many ints from VF%d\n",
|
2021-11-17 14:30:57 +00:00
|
|
|
vf_info->vf_nr);
|
2021-11-17 14:30:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
adf_schedule_vf2pf_handler(vf_info);
|
|
|
|
irq_handled = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return irq_handled;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2022-02-10 13:38:27 +00:00
|
|
|
static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
|
|
|
|
|
|
|
if (hw_data->handle_pm_interrupt &&
|
|
|
|
hw_data->handle_pm_interrupt(accel_dev))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-10-20 11:32:45 +01:00
|
|
|
static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct adf_ras_ops *ras_ops = &accel_dev->hw_device->ras_ops;
|
|
|
|
bool reset_required;
|
|
|
|
|
|
|
|
if (ras_ops->handle_interrupt &&
|
|
|
|
ras_ops->handle_interrupt(accel_dev, &reset_required)) {
|
2024-02-02 18:53:21 +08:00
|
|
|
if (reset_required) {
|
2023-10-20 11:32:45 +01:00
|
|
|
dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n");
|
2024-02-02 18:53:21 +08:00
|
|
|
if (adf_notify_fatal_error(accel_dev))
|
|
|
|
dev_err(&GET_DEV(accel_dev),
|
|
|
|
"Failed to notify fatal error\n");
|
|
|
|
}
|
|
|
|
|
2023-10-20 11:32:45 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-06-05 13:44:20 -07:00
|
|
|
static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
|
|
|
|
{
|
|
|
|
struct adf_accel_dev *accel_dev = dev_ptr;
|
|
|
|
|
2015-08-07 11:34:25 -07:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
|
2021-11-17 14:30:36 +00:00
|
|
|
if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
|
|
|
|
return IRQ_HANDLED;
|
2015-08-07 11:34:25 -07:00
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2022-02-10 13:38:27 +00:00
|
|
|
if (adf_handle_pm_int(accel_dev))
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
|
2023-10-20 11:32:45 +01:00
|
|
|
if (adf_handle_ras_int(accel_dev))
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
|
2015-08-07 11:34:25 -07:00
|
|
|
dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
|
|
|
|
accel_dev->accel_id);
|
|
|
|
|
|
|
|
return IRQ_NONE;
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
|
|
|
|
2021-09-01 18:36:08 +01:00
|
|
|
static void adf_free_irqs(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
|
|
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
|
|
|
struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
|
|
|
|
struct adf_etr_data *etr_data = accel_dev->transport;
|
|
|
|
int clust_irq = hw_data->num_banks;
|
|
|
|
int irq, i = 0;
|
|
|
|
|
|
|
|
if (pci_dev_info->msix_entries.num_entries > 1) {
|
|
|
|
for (i = 0; i < hw_data->num_banks; i++) {
|
|
|
|
if (irqs[i].enabled) {
|
|
|
|
irq = pci_irq_vector(pci_dev_info->pci_dev, i);
|
|
|
|
irq_set_affinity_hint(irq, NULL);
|
|
|
|
free_irq(irq, &etr_data->banks[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (irqs[i].enabled) {
|
|
|
|
irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
|
|
|
|
free_irq(irq, accel_dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-05 13:44:20 -07:00
|
|
|
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
|
|
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
2021-09-01 18:36:07 +01:00
|
|
|
struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
|
2014-06-05 13:44:20 -07:00
|
|
|
struct adf_etr_data *etr_data = accel_dev->transport;
|
2021-09-01 18:36:05 +01:00
|
|
|
int clust_irq = hw_data->num_banks;
|
|
|
|
int ret, irq, i = 0;
|
2014-06-05 13:44:20 -07:00
|
|
|
char *name;
|
|
|
|
|
2015-08-07 11:34:25 -07:00
|
|
|
/* Request msix irq for all banks unless SR-IOV enabled */
|
|
|
|
if (!accel_dev->pf.vf_info) {
|
|
|
|
for (i = 0; i < hw_data->num_banks; i++) {
|
|
|
|
struct adf_etr_bank_data *bank = &etr_data->banks[i];
|
|
|
|
unsigned int cpu, cpus = num_online_cpus();
|
|
|
|
|
2021-09-01 18:36:07 +01:00
|
|
|
name = irqs[i].name;
|
2015-08-07 11:34:25 -07:00
|
|
|
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
|
|
|
|
"qat%d-bundle%d", accel_dev->accel_id, i);
|
2021-09-01 18:36:05 +01:00
|
|
|
irq = pci_irq_vector(pci_dev_info->pci_dev, i);
|
|
|
|
if (unlikely(irq < 0)) {
|
|
|
|
dev_err(&GET_DEV(accel_dev),
|
|
|
|
"Failed to get IRQ number of device vector %d - %s\n",
|
|
|
|
i, name);
|
2021-09-01 18:36:08 +01:00
|
|
|
ret = irq;
|
|
|
|
goto err;
|
2021-09-01 18:36:05 +01:00
|
|
|
}
|
|
|
|
ret = request_irq(irq, adf_msix_isr_bundle, 0,
|
|
|
|
&name[0], bank);
|
2015-08-07 11:34:25 -07:00
|
|
|
if (ret) {
|
|
|
|
dev_err(&GET_DEV(accel_dev),
|
2021-09-01 18:36:05 +01:00
|
|
|
"Failed to allocate IRQ %d for %s\n",
|
|
|
|
irq, name);
|
2021-09-01 18:36:08 +01:00
|
|
|
goto err;
|
2015-08-07 11:34:25 -07:00
|
|
|
}
|
2014-06-05 13:44:20 -07:00
|
|
|
|
2015-08-07 11:34:25 -07:00
|
|
|
cpu = ((accel_dev->accel_id * hw_data->num_banks) +
|
|
|
|
i) % cpus;
|
2021-09-01 18:36:05 +01:00
|
|
|
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
|
2021-09-01 18:36:07 +01:00
|
|
|
irqs[i].enabled = true;
|
2015-08-07 11:34:25 -07:00
|
|
|
}
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Request msix irq for AE */
|
2021-09-01 18:36:07 +01:00
|
|
|
name = irqs[i].name;
|
2014-06-05 13:44:20 -07:00
|
|
|
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
|
|
|
|
"qat%d-ae-cluster", accel_dev->accel_id);
|
2021-09-01 18:36:05 +01:00
|
|
|
irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
|
|
|
|
if (unlikely(irq < 0)) {
|
|
|
|
dev_err(&GET_DEV(accel_dev),
|
|
|
|
"Failed to get IRQ number of device vector %d - %s\n",
|
|
|
|
i, name);
|
2021-09-01 18:36:08 +01:00
|
|
|
ret = irq;
|
|
|
|
goto err;
|
2021-09-01 18:36:05 +01:00
|
|
|
}
|
|
|
|
ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
|
2014-06-05 13:44:20 -07:00
|
|
|
if (ret) {
|
2015-03-19 16:03:44 -07:00
|
|
|
dev_err(&GET_DEV(accel_dev),
|
2021-09-01 18:36:05 +01:00
|
|
|
"Failed to allocate IRQ %d for %s\n", irq, name);
|
2021-09-01 18:36:08 +01:00
|
|
|
goto err;
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
2021-09-01 18:36:07 +01:00
|
|
|
irqs[i].enabled = true;
|
2014-06-05 13:44:20 -07:00
|
|
|
return ret;
|
2021-09-01 18:36:08 +01:00
|
|
|
err:
|
|
|
|
adf_free_irqs(accel_dev);
|
|
|
|
return ret;
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
|
|
|
|
2021-09-01 18:36:05 +01:00
|
|
|
static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
|
2014-06-05 13:44:20 -07:00
|
|
|
{
|
|
|
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
2015-08-07 11:34:25 -07:00
|
|
|
u32 msix_num_entries = 1;
|
2021-09-01 18:36:07 +01:00
|
|
|
struct adf_irq *irqs;
|
2015-08-07 11:34:25 -07:00
|
|
|
|
|
|
|
/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
|
|
|
|
if (!accel_dev->pf.vf_info)
|
|
|
|
msix_num_entries += hw_data->num_banks;
|
2014-06-05 13:44:20 -07:00
|
|
|
|
2024-01-21 17:40:43 +01:00
|
|
|
irqs = kcalloc_node(msix_num_entries, sizeof(*irqs),
|
2021-09-01 18:36:07 +01:00
|
|
|
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
|
|
|
|
if (!irqs)
|
2014-06-05 13:44:20 -07:00
|
|
|
return -ENOMEM;
|
2021-09-01 18:36:05 +01:00
|
|
|
|
2015-08-07 11:34:25 -07:00
|
|
|
accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
|
2021-09-01 18:36:07 +01:00
|
|
|
accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
|
2014-06-05 13:44:20 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-09-01 18:36:05 +01:00
|
|
|
static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
|
2014-06-05 13:44:20 -07:00
|
|
|
{
|
2021-09-01 18:36:07 +01:00
|
|
|
kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
|
|
|
|
accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int adf_setup_bh(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct adf_etr_data *priv_data = accel_dev->transport;
|
|
|
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < hw_data->num_banks; i++)
|
2014-09-10 14:07:31 -07:00
|
|
|
tasklet_init(&priv_data->banks[i].resp_handler,
|
2014-06-05 13:44:20 -07:00
|
|
|
adf_response_handler,
|
|
|
|
(unsigned long)&priv_data->banks[i]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct adf_etr_data *priv_data = accel_dev->transport;
|
|
|
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < hw_data->num_banks; i++) {
|
2014-09-10 14:07:31 -07:00
|
|
|
tasklet_disable(&priv_data->banks[i].resp_handler);
|
|
|
|
tasklet_kill(&priv_data->banks[i].resp_handler);
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-04 16:56:28 -08:00
|
|
|
/**
|
2016-04-29 10:59:59 -07:00
|
|
|
* adf_isr_resource_free() - Free IRQ for acceleration device
|
2015-12-04 16:56:28 -08:00
|
|
|
* @accel_dev: Pointer to acceleration device.
|
|
|
|
*
|
|
|
|
* Function frees interrupts for acceleration device.
|
|
|
|
*/
|
2014-06-05 13:44:20 -07:00
|
|
|
void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
adf_free_irqs(accel_dev);
|
|
|
|
adf_cleanup_bh(accel_dev);
|
|
|
|
adf_disable_msix(&accel_dev->accel_pci_dev);
|
2021-09-01 18:36:05 +01:00
|
|
|
adf_isr_free_msix_vectors_data(accel_dev);
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
2015-12-04 16:56:28 -08:00
|
|
|
EXPORT_SYMBOL_GPL(adf_isr_resource_free);
|
|
|
|
|
|
|
|
/**
|
2016-04-29 10:59:59 -07:00
|
|
|
* adf_isr_resource_alloc() - Allocate IRQ for acceleration device
|
2015-12-04 16:56:28 -08:00
|
|
|
* @accel_dev: Pointer to acceleration device.
|
|
|
|
*
|
|
|
|
* Function allocates interrupts for acceleration device.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, error code otherwise.
|
|
|
|
*/
|
2014-06-05 13:44:20 -07:00
|
|
|
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2021-09-01 18:36:05 +01:00
|
|
|
ret = adf_isr_alloc_msix_vectors_data(accel_dev);
|
2014-06-05 13:44:20 -07:00
|
|
|
if (ret)
|
|
|
|
goto err_out;
|
|
|
|
|
2021-03-25 08:34:18 +00:00
|
|
|
ret = adf_enable_msix(accel_dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_free_msix_table;
|
2014-06-05 13:44:20 -07:00
|
|
|
|
2021-03-25 08:34:18 +00:00
|
|
|
ret = adf_setup_bh(accel_dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_disable_msix;
|
|
|
|
|
|
|
|
ret = adf_request_irqs(accel_dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_cleanup_bh;
|
2014-06-05 13:44:20 -07:00
|
|
|
|
|
|
|
return 0;
|
2021-03-25 08:34:18 +00:00
|
|
|
|
|
|
|
err_cleanup_bh:
|
|
|
|
adf_cleanup_bh(accel_dev);
|
|
|
|
|
|
|
|
err_disable_msix:
|
|
|
|
adf_disable_msix(&accel_dev->accel_pci_dev);
|
|
|
|
|
|
|
|
err_free_msix_table:
|
2021-09-01 18:36:05 +01:00
|
|
|
adf_isr_free_msix_vectors_data(accel_dev);
|
2021-03-25 08:34:18 +00:00
|
|
|
|
2014-06-05 13:44:20 -07:00
|
|
|
err_out:
|
2021-03-25 08:34:18 +00:00
|
|
|
return ret;
|
2014-06-05 13:44:20 -07:00
|
|
|
}
|
2015-12-04 16:56:28 -08:00
|
|
|
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
|
2022-02-10 13:38:25 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* adf_init_misc_wq() - Init misc workqueue
|
|
|
|
*
|
|
|
|
* Return: 0 on success, error code otherwise.
|
|
|
|
*/
|
|
|
|
int __init adf_init_misc_wq(void)
|
|
|
|
{
|
|
|
|
adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
|
|
|
|
|
|
|
|
return !adf_misc_wq ? -ENOMEM : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void adf_exit_misc_wq(void)
|
|
|
|
{
|
|
|
|
if (adf_misc_wq)
|
|
|
|
destroy_workqueue(adf_misc_wq);
|
|
|
|
|
|
|
|
adf_misc_wq = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool adf_misc_wq_queue_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
return queue_work(adf_misc_wq, work);
|
|
|
|
}
|
2023-06-30 19:03:54 +02:00
|
|
|
|
|
|
|
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
|
|
|
|
unsigned long delay)
|
|
|
|
{
|
|
|
|
return queue_delayed_work(adf_misc_wq, work, delay);
|
|
|
|
}
|
2025-07-11 13:27:43 +01:00
|
|
|
|
|
|
|
void adf_misc_wq_flush(void)
|
|
|
|
{
|
|
|
|
flush_workqueue(adf_misc_wq);
|
|
|
|
}
|