2022-11-28 12:21:20 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Copyright(c) 2022 Intel Corporation */
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include "adf_accel_devices.h"
|
|
|
|
#include "adf_common_drv.h"
|
|
|
|
#include "adf_transport.h"
|
|
|
|
#include "adf_transport_access_macros.h"
|
|
|
|
#include "adf_cfg.h"
|
|
|
|
#include "adf_cfg_strings.h"
|
|
|
|
#include "qat_compression.h"
|
|
|
|
#include "icp_qat_fw.h"
|
|
|
|
|
|
|
|
#define SEC ADF_KERNEL_SEC
|
|
|
|
|
|
|
|
static struct service_hndl qat_compression;
|
|
|
|
|
|
|
|
void qat_compression_put_instance(struct qat_compression_instance *inst)
|
|
|
|
{
|
|
|
|
atomic_dec(&inst->refctr);
|
|
|
|
adf_dev_put(inst->accel_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct qat_compression_instance *inst;
|
|
|
|
struct list_head *list_ptr, *tmp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) {
|
|
|
|
inst = list_entry(list_ptr,
|
|
|
|
struct qat_compression_instance, list);
|
|
|
|
|
|
|
|
for (i = 0; i < atomic_read(&inst->refctr); i++)
|
|
|
|
qat_compression_put_instance(inst);
|
|
|
|
|
|
|
|
if (inst->dc_tx)
|
|
|
|
adf_remove_ring(inst->dc_tx);
|
|
|
|
|
|
|
|
if (inst->dc_rx)
|
|
|
|
adf_remove_ring(inst->dc_rx);
|
|
|
|
|
|
|
|
list_del(list_ptr);
|
|
|
|
kfree(inst);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct qat_compression_instance *qat_compression_get_instance_node(int node)
|
|
|
|
{
|
|
|
|
struct qat_compression_instance *inst = NULL;
|
|
|
|
struct adf_accel_dev *accel_dev = NULL;
|
|
|
|
unsigned long best = ~0;
|
|
|
|
struct list_head *itr;
|
|
|
|
|
|
|
|
list_for_each(itr, adf_devmgr_get_head()) {
|
|
|
|
struct adf_accel_dev *tmp_dev;
|
|
|
|
unsigned long ctr;
|
|
|
|
int tmp_dev_node;
|
|
|
|
|
|
|
|
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
|
|
|
|
tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
|
|
|
|
|
|
|
|
if ((node == tmp_dev_node || tmp_dev_node < 0) &&
|
|
|
|
adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
|
|
|
|
ctr = atomic_read(&tmp_dev->ref_count);
|
|
|
|
if (best > ctr) {
|
|
|
|
accel_dev = tmp_dev;
|
|
|
|
best = ctr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!accel_dev) {
|
2023-02-01 17:04:41 +00:00
|
|
|
pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
|
2022-11-28 12:21:20 +00:00
|
|
|
/* Get any started device */
|
|
|
|
list_for_each(itr, adf_devmgr_get_head()) {
|
|
|
|
struct adf_accel_dev *tmp_dev;
|
|
|
|
|
|
|
|
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
|
|
|
|
if (adf_dev_started(tmp_dev) &&
|
|
|
|
!list_empty(&tmp_dev->compression_list)) {
|
|
|
|
accel_dev = tmp_dev;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!accel_dev)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
best = ~0;
|
|
|
|
list_for_each(itr, &accel_dev->compression_list) {
|
|
|
|
struct qat_compression_instance *tmp_inst;
|
|
|
|
unsigned long ctr;
|
|
|
|
|
|
|
|
tmp_inst = list_entry(itr, struct qat_compression_instance, list);
|
|
|
|
ctr = atomic_read(&tmp_inst->refctr);
|
|
|
|
if (best > ctr) {
|
|
|
|
inst = tmp_inst;
|
|
|
|
best = ctr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (inst) {
|
|
|
|
if (adf_dev_get(accel_dev)) {
|
|
|
|
dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
atomic_inc(&inst->refctr);
|
|
|
|
}
|
|
|
|
return inst;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct qat_compression_instance *inst;
|
|
|
|
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
|
|
|
|
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
|
|
|
|
unsigned long num_inst, num_msg_dc;
|
|
|
|
unsigned long bank;
|
|
|
|
int msg_size;
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&accel_dev->compression_list);
|
|
|
|
strscpy(key, ADF_NUM_DC, sizeof(key));
|
|
|
|
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = kstrtoul(val, 10, &num_inst);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < num_inst; i++) {
|
|
|
|
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
|
|
|
|
dev_to_node(&GET_DEV(accel_dev)));
|
|
|
|
if (!inst) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail(&inst->list, &accel_dev->compression_list);
|
|
|
|
inst->id = i;
|
|
|
|
atomic_set(&inst->refctr, 0);
|
|
|
|
inst->accel_dev = accel_dev;
|
|
|
|
|
|
|
|
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
|
|
|
|
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = kstrtoul(val, 10, &bank);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
|
|
|
|
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = kstrtoul(val, 10, &num_msg_dc);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
|
|
|
|
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
|
|
|
|
ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
|
|
|
|
msg_size, key, NULL, 0, &inst->dc_tx);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
|
|
|
|
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
|
|
|
|
ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
|
|
|
|
msg_size, key, qat_comp_alg_callback, 0,
|
|
|
|
&inst->dc_rx);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
inst->dc_data = accel_dev->dc_data;
|
|
|
|
INIT_LIST_HEAD(&inst->backlog.list);
|
|
|
|
spin_lock_init(&inst->backlog.lock);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
qat_compression_free_instances(accel_dev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct device *dev = &GET_DEV(accel_dev);
|
|
|
|
dma_addr_t obuff_p = DMA_MAPPING_ERROR;
|
|
|
|
size_t ovf_buff_sz = QAT_COMP_MAX_SKID;
|
|
|
|
struct adf_dc_data *dc_data = NULL;
|
|
|
|
u8 *obuff = NULL;
|
|
|
|
|
crypto: qat - use unmanaged allocation for dc_data
The dc_data structure holds data required for handling compression
operations, such as overflow buffers. In this context, the use of
managed memory allocation APIs (devm_kzalloc() and devm_kfree())
is not necessary, as these data structures are freed and
re-allocated when a device is restarted in adf_dev_down() and
adf_dev_up().
Additionally, managed APIs automatically handle memory cleanup when the
device is detached, which can lead to conflicts with manual cleanup
processes. Specifically, if a device driver invokes the adf_dev_down()
function as part of the cleanup registered with
devm_add_action_or_reset(), it may attempt to free memory that is also
managed by the device's resource management system, potentially leading
to a double-free.
This might result in a warning similar to the following when unloading
the device specific driver, for example qat_6xxx.ko:
qat_free_dc_data+0x4f/0x60 [intel_qat]
qat_compression_event_handler+0x3d/0x1d0 [intel_qat]
adf_dev_shutdown+0x6d/0x1a0 [intel_qat]
adf_dev_down+0x32/0x50 [intel_qat]
devres_release_all+0xb8/0x110
device_unbind_cleanup+0xe/0x70
device_release_driver_internal+0x1c1/0x200
driver_detach+0x48/0x90
bus_remove_driver+0x74/0xf0
pci_unregister_driver+0x2e/0xb0
Use unmanaged memory allocation APIs (kzalloc_node() and kfree()) for
the dc_data structure. This ensures that memory is explicitly allocated
and freed under the control of the driver code, preventing manual
deallocation from interfering with automatic cleanup.
Fixes: 1198ae56c9a5 ("crypto: qat - expose deflate through acomp api for QAT GEN2")
Signed-off-by: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-05-22 09:21:41 +01:00
|
|
|
dc_data = kzalloc_node(sizeof(*dc_data), GFP_KERNEL, dev_to_node(dev));
|
2022-11-28 12:21:20 +00:00
|
|
|
if (!dc_data)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev));
|
|
|
|
if (!obuff)
|
|
|
|
goto err;
|
|
|
|
|
crypto: qat - fix DMA direction for compression on GEN2 devices
QAT devices perform an additional integrity check during compression by
decompressing the output. Starting from QAT GEN4, this verification is
done in-line by the hardware. However, on GEN2 devices, the hardware
reads back the compressed output from the destination buffer and performs
a decompression operation using it as the source.
In the current QAT driver, destination buffers are always marked as
write-only. This is incorrect for QAT GEN2 compression, where the buffer
is also read during verification. Since commit 6f5dc7658094
("iommu/vt-d: Restore WO permissions on second-level paging entries"),
merged in v6.16-rc1, write-only permissions are strictly enforced, leading
to DMAR errors when using QAT GEN2 devices for compression, if VT-d is
enabled.
Mark the destination buffers as DMA_BIDIRECTIONAL. This ensures
compatibility with GEN2 devices, even though it is not required for
QAT GEN4 and later.
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Fixes: cf5bb835b7c8 ("crypto: qat - fix DMA transfer direction")
Reviewed-by: Ahsan Atta <ahsan.atta@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-07-14 08:07:49 +01:00
|
|
|
obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_BIDIRECTIONAL);
|
2022-11-28 12:21:20 +00:00
|
|
|
if (unlikely(dma_mapping_error(dev, obuff_p)))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
dc_data->ovf_buff = obuff;
|
|
|
|
dc_data->ovf_buff_p = obuff_p;
|
|
|
|
dc_data->ovf_buff_sz = ovf_buff_sz;
|
|
|
|
|
|
|
|
accel_dev->dc_data = dc_data;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
accel_dev->dc_data = NULL;
|
|
|
|
kfree(obuff);
|
|
|
|
devm_kfree(dev, dc_data);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
struct adf_dc_data *dc_data = accel_dev->dc_data;
|
|
|
|
struct device *dev = &GET_DEV(accel_dev);
|
|
|
|
|
|
|
|
if (!dc_data)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
|
crypto: qat - fix DMA direction for compression on GEN2 devices
QAT devices perform an additional integrity check during compression by
decompressing the output. Starting from QAT GEN4, this verification is
done in-line by the hardware. However, on GEN2 devices, the hardware
reads back the compressed output from the destination buffer and performs
a decompression operation using it as the source.
In the current QAT driver, destination buffers are always marked as
write-only. This is incorrect for QAT GEN2 compression, where the buffer
is also read during verification. Since commit 6f5dc7658094
("iommu/vt-d: Restore WO permissions on second-level paging entries"),
merged in v6.16-rc1, write-only permissions are strictly enforced, leading
to DMAR errors when using QAT GEN2 devices for compression, if VT-d is
enabled.
Mark the destination buffers as DMA_BIDIRECTIONAL. This ensures
compatibility with GEN2 devices, even though it is not required for
QAT GEN4 and later.
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Fixes: cf5bb835b7c8 ("crypto: qat - fix DMA transfer direction")
Reviewed-by: Ahsan Atta <ahsan.atta@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-07-14 08:07:49 +01:00
|
|
|
DMA_BIDIRECTIONAL);
|
2023-08-02 17:14:27 +08:00
|
|
|
kfree_sensitive(dc_data->ovf_buff);
|
crypto: qat - use unmanaged allocation for dc_data
The dc_data structure holds data required for handling compression
operations, such as overflow buffers. In this context, the use of
managed memory allocation APIs (devm_kzalloc() and devm_kfree())
is not necessary, as these data structures are freed and
re-allocated when a device is restarted in adf_dev_down() and
adf_dev_up().
Additionally, managed APIs automatically handle memory cleanup when the
device is detached, which can lead to conflicts with manual cleanup
processes. Specifically, if a device driver invokes the adf_dev_down()
function as part of the cleanup registered with
devm_add_action_or_reset(), it may attempt to free memory that is also
managed by the device's resource management system, potentially leading
to a double-free.
This might result in a warning similar to the following when unloading
the device specific driver, for example qat_6xxx.ko:
qat_free_dc_data+0x4f/0x60 [intel_qat]
qat_compression_event_handler+0x3d/0x1d0 [intel_qat]
adf_dev_shutdown+0x6d/0x1a0 [intel_qat]
adf_dev_down+0x32/0x50 [intel_qat]
devres_release_all+0xb8/0x110
device_unbind_cleanup+0xe/0x70
device_release_driver_internal+0x1c1/0x200
driver_detach+0x48/0x90
bus_remove_driver+0x74/0xf0
pci_unregister_driver+0x2e/0xb0
Use unmanaged memory allocation APIs (kzalloc_node() and kfree()) for
the dc_data structure. This ensures that memory is explicitly allocated
and freed under the control of the driver code, preventing manual
deallocation from interfering with automatic cleanup.
Fixes: 1198ae56c9a5 ("crypto: qat - expose deflate through acomp api for QAT GEN2")
Signed-off-by: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-05-22 09:21:41 +01:00
|
|
|
kfree(dc_data);
|
2022-11-28 12:21:20 +00:00
|
|
|
accel_dev->dc_data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qat_compression_init(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = qat_compression_alloc_dc_data(accel_dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = qat_compression_create_instances(accel_dev);
|
|
|
|
if (ret)
|
|
|
|
qat_free_dc_data(accel_dev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qat_compression_shutdown(struct adf_accel_dev *accel_dev)
|
|
|
|
{
|
|
|
|
qat_free_dc_data(accel_dev);
|
|
|
|
return qat_compression_free_instances(accel_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qat_compression_event_handler(struct adf_accel_dev *accel_dev,
|
|
|
|
enum adf_event event)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case ADF_EVENT_INIT:
|
|
|
|
ret = qat_compression_init(accel_dev);
|
|
|
|
break;
|
|
|
|
case ADF_EVENT_SHUTDOWN:
|
|
|
|
ret = qat_compression_shutdown(accel_dev);
|
|
|
|
break;
|
|
|
|
case ADF_EVENT_RESTARTING:
|
|
|
|
case ADF_EVENT_RESTARTED:
|
|
|
|
case ADF_EVENT_START:
|
|
|
|
case ADF_EVENT_STOP:
|
|
|
|
default:
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qat_compression_register(void)
|
|
|
|
{
|
|
|
|
memset(&qat_compression, 0, sizeof(qat_compression));
|
|
|
|
qat_compression.event_hld = qat_compression_event_handler;
|
|
|
|
qat_compression.name = "qat_compression";
|
|
|
|
return adf_service_register(&qat_compression);
|
|
|
|
}
|
|
|
|
|
|
|
|
int qat_compression_unregister(void)
|
|
|
|
{
|
|
|
|
return adf_service_unregister(&qat_compression);
|
|
|
|
}
|