2020-05-27 16:21:28 +01:00
|
|
|
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
|
2021-11-17 14:30:51 +00:00
|
|
|
/* Copyright(c) 2015 - 2021 Intel Corporation */
|
2015-08-07 11:34:25 -07:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/iommu.h>
|
|
|
|
#include "adf_common_drv.h"
|
|
|
|
#include "adf_cfg.h"
|
2021-11-17 14:30:51 +00:00
|
|
|
#include "adf_pfvf_pf_msg.h"
|
2015-08-07 11:34:25 -07:00
|
|
|
|
crypto: qat - introduce support for PFVF block messages
GEN2 devices use a single CSR for PFVF messages, which leaves up to 10 bits
of payload per single message. While such amount is sufficient for the
currently defined messages, the transfer of bigger and more complex data
streams from the PF to the VF requires a new mechanism that extends the
protocol.
This patch adds a new layer on top of the basic PFVF messaging, called
Block Messages, to encapsulate up to 126 bytes of data in a single
logical message across multiple PFVF messages of new types (SMALL,
MEDIUM and LARGE BLOCK), including (sub)types (BLKMSG_TYPE) to carry the
information about the actual Block Message.
Regardless of the size, each Block Message uses a two bytes header,
containing the version and size, to allow for extension while
maintaining compatibility. The size and the types of Block Messages are
defined as follow:
- small block messages: up to 16 BLKMSG types of up to 30 bytes
- medium block messages: up to 8 BLKMSG types of up to 62 bytes
- large block messages: up to 4 BLKMSG types of up to 126 bytes
It effectively works as reading a byte at a time from a block device and
for each of these new Block Messages:
- the requestor (always a VF) can either request a specific byte of the
larger message, in order to retrieve the full message, or request the
value of the CRC calculated for a specific message up to the provided
size (to allow for messages to grow while maintaining forward
compatibility)
- the responder (always the PF) will either return a single data or CRC
byte, along with the indication of response type (or error).
This patch provides the basic infrastructure to perform the above
operations, without defining any new message.
As CRCs are required, this code now depends on the CRC8 module.
Note: as a consequence of the Block Messages design, sending multiple
PFVF messages in bursts, the interrupt rate limiting values on the PF are
increased.
Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Fiona Trahe <fiona.trahe@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-12-16 09:13:26 +00:00
|
|
|
#define ADF_VF2PF_RATELIMIT_INTERVAL 8
|
|
|
|
#define ADF_VF2PF_RATELIMIT_BURST 130
|
|
|
|
|
2015-08-07 11:34:25 -07:00
|
|
|
static struct workqueue_struct *pf2vf_resp_wq;
|
|
|
|
|
2015-08-24 11:56:02 -07:00
|
|
|
struct adf_pf2vf_resp {
|
2015-08-07 11:34:25 -07:00
|
|
|
struct work_struct pf2vf_resp_work;
|
2015-08-24 11:56:02 -07:00
|
|
|
struct adf_accel_vf_info *vf_info;
|
2015-08-07 11:34:25 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static void adf_iov_send_resp(struct work_struct *work)
|
|
|
|
{
|
2015-08-24 11:56:02 -07:00
|
|
|
struct adf_pf2vf_resp *pf2vf_resp =
|
|
|
|
container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
|
2021-11-17 14:30:39 +00:00
|
|
|
struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
|
|
|
|
struct adf_accel_dev *accel_dev = vf_info->accel_dev;
|
|
|
|
u32 vf_nr = vf_info->vf_nr;
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
|
|
|
|
if (ret)
|
|
|
|
/* re-enable interrupt on PF from this VF */
|
|
|
|
adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
|
2015-08-07 11:34:25 -07:00
|
|
|
|
2015-08-24 11:56:02 -07:00
|
|
|
kfree(pf2vf_resp);
|
2015-08-07 11:34:25 -07:00
|
|
|
}
|
|
|
|
|
2021-08-12 21:21:18 +01:00
|
|
|
void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
|
2015-08-07 11:34:25 -07:00
|
|
|
{
|
2015-08-24 11:56:02 -07:00
|
|
|
struct adf_pf2vf_resp *pf2vf_resp;
|
|
|
|
|
|
|
|
pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
|
|
|
|
if (!pf2vf_resp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pf2vf_resp->vf_info = vf_info;
|
|
|
|
INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
|
|
|
|
queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
|
2015-08-07 11:34:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
|
|
|
int totalvfs = pci_sriov_get_totalvfs(pdev);
|
|
|
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
|
|
|
struct adf_accel_vf_info *vf_info;
|
2015-08-12 12:50:17 +08:00
|
|
|
int i;
|
2015-08-07 11:34:25 -07:00
|
|
|
|
|
|
|
for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
|
|
|
|
i++, vf_info++) {
|
|
|
|
/* This ptr will be populated when VFs will be created */
|
|
|
|
vf_info->accel_dev = accel_dev;
|
|
|
|
vf_info->vf_nr = i;
|
2021-12-16 09:13:24 +00:00
|
|
|
vf_info->vf_compat_ver = 0;
|
2015-08-07 11:34:25 -07:00
|
|
|
|
|
|
|
mutex_init(&vf_info->pf2vf_lock);
|
|
|
|
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
|
crypto: qat - introduce support for PFVF block messages
GEN2 devices use a single CSR for PFVF messages, which leaves up to 10 bits
of payload per single message. While such amount is sufficient for the
currently defined messages, the transfer of bigger and more complex data
streams from the PF to the VF requires a new mechanism that extends the
protocol.
This patch adds a new layer on top of the basic PFVF messaging, called
Block Messages, to encapsulate up to 126 bytes of data in a single
logical message across multiple PFVF messages of new types (SMALL,
MEDIUM and LARGE BLOCK), including (sub)types (BLKMSG_TYPE) to carry the
information about the actual Block Message.
Regardless of the size, each Block Message uses a two bytes header,
containing the version and size, to allow for extension while
maintaining compatibility. The size and the types of Block Messages are
defined as follow:
- small block messages: up to 16 BLKMSG types of up to 30 bytes
- medium block messages: up to 8 BLKMSG types of up to 62 bytes
- large block messages: up to 4 BLKMSG types of up to 126 bytes
It effectively works as reading a byte at a time from a block device and
for each of these new Block Messages:
- the requestor (always a VF) can either request a specific byte of the
larger message, in order to retrieve the full message, or request the
value of the CRC calculated for a specific message up to the provided
size (to allow for messages to grow while maintaining forward
compatibility)
- the responder (always the PF) will either return a single data or CRC
byte, along with the indication of response type (or error).
This patch provides the basic infrastructure to perform the above
operations, without defining any new message.
As CRCs are required, this code now depends on the CRC8 module.
Note: as a consequence of the Block Messages design, sending multiple
PFVF messages in bursts, the interrupt rate limiting values on the PF are
increased.
Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Fiona Trahe <fiona.trahe@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-12-16 09:13:26 +00:00
|
|
|
ADF_VF2PF_RATELIMIT_INTERVAL,
|
|
|
|
ADF_VF2PF_RATELIMIT_BURST);
|
2015-08-07 11:34:25 -07:00
|
|
|
}
|
|
|
|
|
2020-10-12 21:38:20 +01:00
|
|
|
/* Set Valid bits in AE Thread to PCIe Function Mapping */
|
2020-10-12 21:38:40 +01:00
|
|
|
if (hw_data->configure_iov_threads)
|
|
|
|
hw_data->configure_iov_threads(accel_dev, true);
|
2015-08-07 11:34:25 -07:00
|
|
|
|
|
|
|
/* Enable VF to PF interrupts for all VFs */
|
2021-11-17 14:30:47 +00:00
|
|
|
if (hw_data->pfvf_ops.get_pf2vf_offset)
|
2020-10-12 21:38:40 +01:00
|
|
|
adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
|
2015-08-07 11:34:25 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Due to the hardware design, when SR-IOV and the ring arbiter
|
|
|
|
* are enabled all the VFs supported in hardware must be enabled in
|
|
|
|
* order for all the hardware resources (i.e. bundles) to be usable.
|
|
|
|
* When SR-IOV is enabled, each of the VFs will own one bundle.
|
|
|
|
*/
|
2015-08-12 12:50:17 +08:00
|
|
|
return pci_enable_sriov(pdev, totalvfs);
|
2015-08-07 11:34:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* adf_disable_sriov() - Disable SRIOV for the device
|
2016-12-22 14:59:24 +00:00
|
|
|
* @accel_dev: Pointer to accel device.
|
2015-08-07 11:34:25 -07:00
|
|
|
*
|
2016-12-22 14:59:24 +00:00
|
|
|
* Function disables SRIOV for the accel device.
|
2015-08-07 11:34:25 -07:00
|
|
|
*
|
|
|
|
* Return: 0 on success, error code otherwise.
|
|
|
|
*/
|
|
|
|
void adf_disable_sriov(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
|
|
|
int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
|
|
|
|
struct adf_accel_vf_info *vf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!accel_dev->pf.vf_info)
|
|
|
|
return;
|
|
|
|
|
2021-11-17 14:30:47 +00:00
|
|
|
if (hw_data->pfvf_ops.get_pf2vf_offset)
|
2020-10-12 21:38:40 +01:00
|
|
|
adf_pf2vf_notify_restarting(accel_dev);
|
2015-08-07 11:34:25 -07:00
|
|
|
|
|
|
|
pci_disable_sriov(accel_to_pci_dev(accel_dev));
|
|
|
|
|
|
|
|
/* Disable VF to PF interrupts */
|
2021-11-17 14:30:47 +00:00
|
|
|
if (hw_data->pfvf_ops.get_pf2vf_offset)
|
2020-10-12 21:38:40 +01:00
|
|
|
adf_disable_vf2pf_interrupts(accel_dev, GENMASK(31, 0));
|
2015-08-07 11:34:25 -07:00
|
|
|
|
2020-10-12 21:38:20 +01:00
|
|
|
/* Clear Valid bits in AE Thread to PCIe Function Mapping */
|
2020-10-12 21:38:40 +01:00
|
|
|
if (hw_data->configure_iov_threads)
|
|
|
|
hw_data->configure_iov_threads(accel_dev, false);
|
2015-08-07 11:34:25 -07:00
|
|
|
|
|
|
|
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
|
|
|
|
mutex_destroy(&vf->pf2vf_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(accel_dev->pf.vf_info);
|
|
|
|
accel_dev->pf.vf_info = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(adf_disable_sriov);
|
|
|
|
|
2021-12-16 09:13:34 +00:00
|
|
|
static int adf_sriov_prepare_restart(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
|
|
|
|
ADF_SERVICES_ENABLED, services);
|
|
|
|
|
|
|
|
adf_dev_stop(accel_dev);
|
|
|
|
adf_dev_shutdown(accel_dev);
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
|
|
|
|
ADF_SERVICES_ENABLED,
|
|
|
|
services, ADF_STR);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-07 11:34:25 -07:00
|
|
|
/**
|
|
|
|
* adf_sriov_configure() - Enable SRIOV for the device
|
2020-11-03 17:29:36 +00:00
|
|
|
* @pdev: Pointer to PCI device.
|
2020-09-30 23:17:47 +01:00
|
|
|
* @numvfs: Number of virtual functions (VFs) to enable.
|
|
|
|
*
|
|
|
|
* Note that the @numvfs parameter is ignored and all VFs supported by the
|
|
|
|
* device are enabled due to the design of the hardware.
|
2015-08-07 11:34:25 -07:00
|
|
|
*
|
2020-11-03 17:29:36 +00:00
|
|
|
* Function enables SRIOV for the PCI device.
|
2015-08-07 11:34:25 -07:00
|
|
|
*
|
2020-09-30 23:17:47 +01:00
|
|
|
* Return: number of VFs enabled on success, error code otherwise.
|
2015-08-07 11:34:25 -07:00
|
|
|
*/
|
|
|
|
int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
|
|
|
|
{
|
|
|
|
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
|
|
|
|
int totalvfs = pci_sriov_get_totalvfs(pdev);
|
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!accel_dev) {
|
|
|
|
dev_err(&pdev->dev, "Failed to find accel_dev\n");
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2015-09-11 12:26:00 -07:00
|
|
|
if (!iommu_present(&pci_bus_type))
|
|
|
|
dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
|
2015-08-07 11:34:25 -07:00
|
|
|
|
|
|
|
if (accel_dev->pf.vf_info) {
|
|
|
|
dev_info(&pdev->dev, "Already enabled for this device\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (adf_dev_started(accel_dev)) {
|
|
|
|
if (adf_devmgr_in_reset(accel_dev) ||
|
|
|
|
adf_dev_in_use(accel_dev)) {
|
|
|
|
dev_err(&GET_DEV(accel_dev), "Device busy\n");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2021-12-16 09:13:34 +00:00
|
|
|
ret = adf_sriov_prepare_restart(accel_dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2015-08-07 11:34:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
|
|
|
|
return -EFAULT;
|
|
|
|
val = 0;
|
|
|
|
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
|
|
|
ADF_NUM_CY, (void *)&val, ADF_DEC))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
|
|
|
|
|
|
|
|
/* Allocate memory for VF info structs */
|
|
|
|
accel_dev->pf.vf_info = kcalloc(totalvfs,
|
|
|
|
sizeof(struct adf_accel_vf_info),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!accel_dev->pf.vf_info)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (adf_dev_init(accel_dev)) {
|
|
|
|
dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
|
|
|
|
accel_dev->accel_id);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (adf_dev_start(accel_dev)) {
|
|
|
|
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
|
|
|
|
accel_dev->accel_id);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = adf_enable_sriov(accel_dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return numvfs;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(adf_sriov_configure);
|
2016-04-25 07:32:19 -07:00
|
|
|
|
|
|
|
int __init adf_init_pf_wq(void)
|
|
|
|
{
|
|
|
|
/* Workqueue for PF2VF responses */
|
2016-06-08 02:47:47 +05:30
|
|
|
pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
|
2016-04-25 07:32:19 -07:00
|
|
|
|
|
|
|
return !pf2vf_resp_wq ? -ENOMEM : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void adf_exit_pf_wq(void)
|
|
|
|
{
|
|
|
|
if (pf2vf_resp_wq) {
|
|
|
|
destroy_workqueue(pf2vf_resp_wq);
|
|
|
|
pf2vf_resp_wq = NULL;
|
|
|
|
}
|
|
|
|
}
|