2019-05-29 16:57:55 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2023-01-12 22:39:37 -08:00
|
|
|
/*
|
2017-03-27 15:15:14 +05:30
|
|
|
* Host side test driver to test endpoint functionality
|
|
|
|
*
|
|
|
|
* Copyright (C) 2017 Texas Instruments
|
|
|
|
* Author: Kishon Vijay Abraham I <kishon@ti.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/crc32.h>
|
2024-03-22 17:41:38 +01:00
|
|
|
#include <linux/cleanup.h>
|
2017-03-27 15:15:14 +05:30
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/miscdevice.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/slab.h>
|
2020-03-16 16:54:24 +05:30
|
|
|
#include <linux/uaccess.h>
|
2017-03-27 15:15:14 +05:30
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/pci_ids.h>
|
|
|
|
|
|
|
|
#include <linux/pci_regs.h>
|
|
|
|
|
|
|
|
#include <uapi/linux/pcitest.h>
|
|
|
|
|
2018-07-19 10:32:17 +02:00
|
|
|
#define DRV_MODULE_NAME "pci-endpoint-test"
|
|
|
|
|
|
|
|
#define PCI_ENDPOINT_TEST_MAGIC 0x0
|
|
|
|
|
|
|
|
#define PCI_ENDPOINT_TEST_COMMAND 0x4
|
2023-11-22 15:03:55 +09:00
|
|
|
#define COMMAND_RAISE_INTX_IRQ BIT(0)
|
2018-07-19 10:32:17 +02:00
|
|
|
#define COMMAND_RAISE_MSI_IRQ BIT(1)
|
2018-07-19 10:32:19 +02:00
|
|
|
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
|
2018-07-19 10:32:17 +02:00
|
|
|
#define COMMAND_READ BIT(3)
|
|
|
|
#define COMMAND_WRITE BIT(4)
|
|
|
|
#define COMMAND_COPY BIT(5)
|
2025-07-10 15:13:53 -04:00
|
|
|
#define COMMAND_ENABLE_DOORBELL BIT(6)
|
|
|
|
#define COMMAND_DISABLE_DOORBELL BIT(7)
|
2018-07-19 10:32:17 +02:00
|
|
|
|
|
|
|
#define PCI_ENDPOINT_TEST_STATUS 0x8
|
|
|
|
#define STATUS_READ_SUCCESS BIT(0)
|
|
|
|
#define STATUS_READ_FAIL BIT(1)
|
|
|
|
#define STATUS_WRITE_SUCCESS BIT(2)
|
|
|
|
#define STATUS_WRITE_FAIL BIT(3)
|
|
|
|
#define STATUS_COPY_SUCCESS BIT(4)
|
|
|
|
#define STATUS_COPY_FAIL BIT(5)
|
|
|
|
#define STATUS_IRQ_RAISED BIT(6)
|
|
|
|
#define STATUS_SRC_ADDR_INVALID BIT(7)
|
|
|
|
#define STATUS_DST_ADDR_INVALID BIT(8)
|
2025-07-10 15:13:53 -04:00
|
|
|
#define STATUS_DOORBELL_SUCCESS BIT(9)
|
|
|
|
#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
|
|
|
|
#define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
|
|
|
|
#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
|
|
|
|
#define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
|
2018-07-19 10:32:17 +02:00
|
|
|
|
|
|
|
#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
|
2017-03-27 15:15:14 +05:30
|
|
|
#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
|
|
|
|
|
|
|
|
#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
|
|
|
|
#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
|
|
|
|
|
2018-07-19 10:32:17 +02:00
|
|
|
#define PCI_ENDPOINT_TEST_SIZE 0x1c
|
|
|
|
#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
|
|
|
|
|
|
|
|
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
|
|
|
|
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
|
2025-07-10 15:13:53 -04:00
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
#define FLAG_USE_DMA BIT(0)
|
|
|
|
|
2024-12-03 07:38:54 +01:00
|
|
|
#define PCI_ENDPOINT_TEST_CAPS 0x30
|
|
|
|
#define CAP_UNALIGNED_ACCESS BIT(0)
|
misc: pci_endpoint_test: Add support for PCITEST_IRQ_TYPE_AUTO
For PCITEST_MSI we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSI, since we want to test if MSI works.
For PCITEST_MSIX we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSIX, since we want to test if MSI works.
For PCITEST_LEGACY_IRQ we really want to set PCITEST_SET_IRQTYPE
explicitly to PCITEST_IRQ_TYPE_INTX, since we want to test if INTx
works.
However, for PCITEST_WRITE, PCITEST_READ, PCITEST_COPY, we really don't
care which IRQ type that is used, we just want to use a IRQ type that is
supported by the EPC.
The old behavior was to always use MSI for PCITEST_WRITE, PCITEST_READ,
PCITEST_COPY, was to always set IRQ type to MSI before doing the actual
test, however, there are EPC drivers that do not support MSI.
Add a new PCITEST_IRQ_TYPE_AUTO, that will use the CAPS register to see
which IRQ types the endpoint supports, and use one of the supported IRQ
types.
For backwards compatibility, if the endpoint does not expose any supported
IRQ type in the CAPS register, simply fallback to using MSI, as it was
unconditionally done before.
Signed-off-by: Niklas Cassel <cassel@kernel.org>
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
Link: https://lore.kernel.org/r/20250310111016.859445-16-cassel@kernel.org
2025-03-10 12:10:24 +01:00
|
|
|
#define CAP_MSI BIT(1)
|
|
|
|
#define CAP_MSIX BIT(2)
|
|
|
|
#define CAP_INTX BIT(3)
|
2024-12-03 07:38:54 +01:00
|
|
|
|
2025-07-10 15:13:53 -04:00
|
|
|
#define PCI_ENDPOINT_TEST_DB_BAR 0x34
|
|
|
|
#define PCI_ENDPOINT_TEST_DB_OFFSET 0x38
|
|
|
|
#define PCI_ENDPOINT_TEST_DB_DATA 0x3c
|
|
|
|
|
2019-03-25 15:09:46 +05:30
|
|
|
#define PCI_DEVICE_ID_TI_AM654 0xb00c
|
2021-08-11 18:03:36 +05:30
|
|
|
#define PCI_DEVICE_ID_TI_J7200 0xb00f
|
|
|
|
#define PCI_DEVICE_ID_TI_AM64 0xb010
|
2023-10-20 17:32:48 +05:30
|
|
|
#define PCI_DEVICE_ID_TI_J721S2 0xb013
|
2020-09-18 16:00:23 +08:00
|
|
|
#define PCI_DEVICE_ID_LS1088A 0x80c0
|
2023-01-16 13:41:20 +08:00
|
|
|
#define PCI_DEVICE_ID_IMX8 0x0808
|
2019-03-25 15:09:46 +05:30
|
|
|
|
|
|
|
#define is_am654_pci_dev(pdev) \
|
|
|
|
((pdev)->device == PCI_DEVICE_ID_TI_AM654)
|
|
|
|
|
2020-08-14 18:30:34 +01:00
|
|
|
#define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
|
|
|
|
#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
|
2020-05-14 23:03:29 +01:00
|
|
|
#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
|
2020-09-04 11:38:51 +01:00
|
|
|
#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
|
2023-10-18 17:56:31 +09:00
|
|
|
#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
|
2020-05-14 23:03:29 +01:00
|
|
|
|
2024-06-07 13:14:31 +02:00
|
|
|
#define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
static DEFINE_IDA(pci_endpoint_test_ida);
|
|
|
|
|
|
|
|
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
|
|
|
|
miscdev)
|
2017-08-18 20:28:09 +05:30
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
enum pci_barno {
|
|
|
|
BAR_0,
|
|
|
|
BAR_1,
|
|
|
|
BAR_2,
|
|
|
|
BAR_3,
|
|
|
|
BAR_4,
|
|
|
|
BAR_5,
|
2025-07-10 15:13:53 -04:00
|
|
|
NO_BAR = -1,
|
2017-03-27 15:15:14 +05:30
|
|
|
};
|
|
|
|
|
|
|
|
struct pci_endpoint_test {
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
void __iomem *base;
|
2019-09-28 02:43:08 +03:00
|
|
|
void __iomem *bar[PCI_STD_NUM_BARS];
|
2017-03-27 15:15:14 +05:30
|
|
|
struct completion irq_raised;
|
|
|
|
int last_irq;
|
2017-10-11 14:14:38 +05:30
|
|
|
int num_irqs;
|
2020-03-17 15:31:54 +05:30
|
|
|
int irq_type;
|
2017-03-27 15:15:14 +05:30
|
|
|
/* mutex to protect the ioctls */
|
|
|
|
struct mutex mutex;
|
|
|
|
struct miscdevice miscdev;
|
2017-08-18 20:28:05 +05:30
|
|
|
enum pci_barno test_reg_bar;
|
2017-08-18 20:28:06 +05:30
|
|
|
size_t alignment;
|
misc: pci_endpoint_test: Add support for PCITEST_IRQ_TYPE_AUTO
For PCITEST_MSI we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSI, since we want to test if MSI works.
For PCITEST_MSIX we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSIX, since we want to test if MSI works.
For PCITEST_LEGACY_IRQ we really want to set PCITEST_SET_IRQTYPE
explicitly to PCITEST_IRQ_TYPE_INTX, since we want to test if INTx
works.
However, for PCITEST_WRITE, PCITEST_READ, PCITEST_COPY, we really don't
care which IRQ type that is used, we just want to use a IRQ type that is
supported by the EPC.
The old behavior was to always use MSI for PCITEST_WRITE, PCITEST_READ,
PCITEST_COPY, was to always set IRQ type to MSI before doing the actual
test, however, there are EPC drivers that do not support MSI.
Add a new PCITEST_IRQ_TYPE_AUTO, that will use the CAPS register to see
which IRQ types the endpoint supports, and use one of the supported IRQ
types.
For backwards compatibility, if the endpoint does not expose any supported
IRQ type in the CAPS register, simply fallback to using MSI, as it was
unconditionally done before.
Signed-off-by: Niklas Cassel <cassel@kernel.org>
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
Link: https://lore.kernel.org/r/20250310111016.859445-16-cassel@kernel.org
2025-03-10 12:10:24 +01:00
|
|
|
u32 ep_caps;
|
2020-03-17 15:31:58 +05:30
|
|
|
const char *name;
|
2017-03-27 15:15:14 +05:30
|
|
|
};
|
|
|
|
|
2017-08-18 20:28:05 +05:30
|
|
|
struct pci_endpoint_test_data {
|
|
|
|
enum pci_barno test_reg_bar;
|
2017-08-18 20:28:06 +05:30
|
|
|
size_t alignment;
|
2017-08-18 20:28:05 +05:30
|
|
|
};
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
|
|
|
|
u32 offset)
|
|
|
|
{
|
|
|
|
return readl(test->base + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
|
|
|
|
u32 offset, u32 value)
|
|
|
|
{
|
|
|
|
writel(value, test->base + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct pci_endpoint_test *test = dev_id;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
|
|
|
|
if (reg & STATUS_IRQ_RAISED) {
|
|
|
|
test->last_irq = irq;
|
|
|
|
complete(&test->irq_raised);
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2018-07-19 10:32:20 +02:00
|
|
|
static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
|
|
|
|
pci_free_irq_vectors(pdev);
|
2025-03-10 12:10:19 +01:00
|
|
|
test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
|
2018-07-19 10:32:20 +02:00
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
|
2018-07-19 10:32:20 +02:00
|
|
|
int type)
|
|
|
|
{
|
2025-01-16 22:46:48 +05:30
|
|
|
int irq;
|
2018-07-19 10:32:20 +02:00
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
|
|
|
|
switch (type) {
|
2025-03-10 12:10:19 +01:00
|
|
|
case PCITEST_IRQ_TYPE_INTX:
|
2023-11-22 15:03:55 +09:00
|
|
|
irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
|
2025-01-16 22:46:48 +05:30
|
|
|
if (irq < 0) {
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Failed to get Legacy interrupt\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
2018-07-19 10:32:20 +02:00
|
|
|
break;
|
2025-03-10 12:10:19 +01:00
|
|
|
case PCITEST_IRQ_TYPE_MSI:
|
2018-07-19 10:32:20 +02:00
|
|
|
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
|
2025-01-16 22:46:48 +05:30
|
|
|
if (irq < 0) {
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Failed to get MSI interrupts\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
2018-07-19 10:32:20 +02:00
|
|
|
break;
|
2025-03-10 12:10:19 +01:00
|
|
|
case PCITEST_IRQ_TYPE_MSIX:
|
2018-07-19 10:32:20 +02:00
|
|
|
irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
|
2025-01-16 22:46:48 +05:30
|
|
|
if (irq < 0) {
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Failed to get MSI-X interrupts\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
2018-07-19 10:32:20 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(dev, "Invalid IRQ type selected\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EINVAL;
|
2018-07-19 10:32:20 +02:00
|
|
|
}
|
2020-03-17 15:31:54 +05:30
|
|
|
|
|
|
|
test->irq_type = type;
|
2018-07-19 10:32:20 +02:00
|
|
|
test->num_irqs = irq;
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
return 0;
|
2018-07-19 10:32:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
|
|
|
|
for (i = 0; i < test->num_irqs; i++)
|
2025-02-25 20:02:52 +09:00
|
|
|
free_irq(pci_irq_vector(pdev, i), test);
|
2018-07-19 10:32:20 +02:00
|
|
|
|
|
|
|
test->num_irqs = 0;
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
|
2018-07-19 10:32:20 +02:00
|
|
|
{
|
|
|
|
int i;
|
2025-01-16 22:46:48 +05:30
|
|
|
int ret;
|
2018-07-19 10:32:20 +02:00
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
|
|
|
|
for (i = 0; i < test->num_irqs; i++) {
|
2025-02-25 20:02:52 +09:00
|
|
|
ret = request_irq(pci_irq_vector(pdev, i),
|
|
|
|
pci_endpoint_test_irqhandler, IRQF_SHARED,
|
|
|
|
test->name, test);
|
2025-01-16 22:46:48 +05:30
|
|
|
if (ret)
|
2018-07-19 10:32:20 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
return 0;
|
2018-07-19 10:32:20 +02:00
|
|
|
|
|
|
|
fail:
|
2025-02-25 20:02:49 +09:00
|
|
|
switch (test->irq_type) {
|
2025-03-10 12:10:19 +01:00
|
|
|
case PCITEST_IRQ_TYPE_INTX:
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Failed to request IRQ %d for Legacy\n",
|
|
|
|
pci_irq_vector(pdev, i));
|
|
|
|
break;
|
2025-03-10 12:10:19 +01:00
|
|
|
case PCITEST_IRQ_TYPE_MSI:
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
|
|
|
|
pci_irq_vector(pdev, i),
|
|
|
|
i + 1);
|
|
|
|
break;
|
2025-03-10 12:10:19 +01:00
|
|
|
case PCITEST_IRQ_TYPE_MSIX:
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
|
|
|
|
pci_irq_vector(pdev, i),
|
|
|
|
i + 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2025-02-25 20:02:48 +09:00
|
|
|
test->num_irqs = i;
|
|
|
|
pci_endpoint_test_release_irq(test);
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
return ret;
|
2018-07-19 10:32:20 +02:00
|
|
|
}
|
|
|
|
|
2023-12-15 11:59:51 +01:00
|
|
|
static const u32 bar_test_pattern[] = {
|
|
|
|
0xA0A0A0A0,
|
|
|
|
0xA1A1A1A1,
|
|
|
|
0xA2A2A2A2,
|
|
|
|
0xA3A3A3A3,
|
|
|
|
0xA4A4A4A4,
|
|
|
|
0xA5A5A5A5,
|
|
|
|
};
|
|
|
|
|
2024-03-22 17:41:38 +01:00
|
|
|
static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
|
2025-01-24 10:33:01 +01:00
|
|
|
enum pci_barno barno,
|
|
|
|
resource_size_t offset, void *write_buf,
|
|
|
|
void *read_buf, int size)
|
2024-03-22 17:41:38 +01:00
|
|
|
{
|
|
|
|
memset(write_buf, bar_test_pattern[barno], size);
|
|
|
|
memcpy_toio(test->bar[barno] + offset, write_buf, size);
|
|
|
|
|
|
|
|
memcpy_fromio(read_buf, test->bar[barno] + offset, size);
|
|
|
|
|
|
|
|
return memcmp(write_buf, read_buf, size);
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
|
2017-03-27 15:15:14 +05:30
|
|
|
enum pci_barno barno)
|
|
|
|
{
|
2025-01-24 10:33:01 +01:00
|
|
|
resource_size_t bar_size, offset = 0;
|
2024-03-22 17:41:38 +01:00
|
|
|
void *write_buf __free(kfree) = NULL;
|
|
|
|
void *read_buf __free(kfree) = NULL;
|
2017-08-18 20:28:08 +05:30
|
|
|
struct pci_dev *pdev = test->pdev;
|
2025-01-24 10:33:01 +01:00
|
|
|
int buf_size;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2025-01-23 13:01:48 +01:00
|
|
|
bar_size = pci_resource_len(pdev, barno);
|
|
|
|
if (!bar_size)
|
|
|
|
return -ENODATA;
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
if (!test->bar[barno])
|
2025-01-16 22:46:48 +05:30
|
|
|
return -ENOMEM;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2017-08-18 20:28:05 +05:30
|
|
|
if (barno == test->test_reg_bar)
|
2024-03-22 17:41:38 +01:00
|
|
|
bar_size = 0x4;
|
2017-08-18 20:28:05 +05:30
|
|
|
|
2024-03-22 17:41:38 +01:00
|
|
|
/*
|
|
|
|
* Allocate a buffer of max size 1MB, and reuse that buffer while
|
|
|
|
* iterating over the whole BAR size (which might be much larger).
|
|
|
|
*/
|
|
|
|
buf_size = min(SZ_1M, bar_size);
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2024-03-22 17:41:38 +01:00
|
|
|
write_buf = kmalloc(buf_size, GFP_KERNEL);
|
|
|
|
if (!write_buf)
|
2025-01-16 22:46:48 +05:30
|
|
|
return -ENOMEM;
|
2024-03-22 17:41:38 +01:00
|
|
|
|
|
|
|
read_buf = kmalloc(buf_size, GFP_KERNEL);
|
|
|
|
if (!read_buf)
|
2025-01-16 22:46:48 +05:30
|
|
|
return -ENOMEM;
|
2024-03-22 17:41:38 +01:00
|
|
|
|
2025-01-24 10:33:01 +01:00
|
|
|
while (offset < bar_size) {
|
|
|
|
if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf,
|
|
|
|
read_buf, buf_size))
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EIO;
|
2025-01-24 10:33:01 +01:00
|
|
|
offset += buf_size;
|
|
|
|
}
|
2024-03-22 17:41:38 +01:00
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
return 0;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
2024-11-16 04:20:45 +01:00
|
|
|
static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
/* Keep the BAR pattern in the top byte. */
|
|
|
|
val = bar_test_pattern[barno] & 0xff000000;
|
|
|
|
/* Store the (partial) offset in the remaining bytes. */
|
|
|
|
val |= offset & 0x00ffffff;
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
|
2024-11-16 04:20:45 +01:00
|
|
|
enum pci_barno barno)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
int j, size;
|
|
|
|
|
|
|
|
size = pci_resource_len(pdev, barno);
|
|
|
|
|
|
|
|
if (barno == test->test_reg_bar)
|
|
|
|
size = 0x4;
|
|
|
|
|
|
|
|
for (j = 0; j < size; j += 4)
|
|
|
|
writel_relaxed(bar_test_pattern_with_offset(barno, j),
|
|
|
|
test->bar[barno] + j);
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
|
2024-11-16 04:20:45 +01:00
|
|
|
enum pci_barno barno)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
int j, size;
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
size = pci_resource_len(pdev, barno);
|
|
|
|
|
|
|
|
if (barno == test->test_reg_bar)
|
|
|
|
size = 0x4;
|
|
|
|
|
|
|
|
for (j = 0; j < size; j += 4) {
|
|
|
|
u32 expected = bar_test_pattern_with_offset(barno, j);
|
|
|
|
|
|
|
|
val = readl_relaxed(test->bar[barno] + j);
|
|
|
|
if (val != expected) {
|
|
|
|
dev_err(dev,
|
|
|
|
"BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
|
|
|
|
barno, j, val, expected);
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EIO;
|
2024-11-16 04:20:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
return 0;
|
2024-11-16 04:20:45 +01:00
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
|
2024-11-16 04:20:45 +01:00
|
|
|
{
|
|
|
|
enum pci_barno bar;
|
2025-02-04 12:06:41 +01:00
|
|
|
int ret;
|
2024-11-16 04:20:45 +01:00
|
|
|
|
|
|
|
/* Write all BARs in order (without reading). */
|
|
|
|
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
|
|
|
|
if (test->bar[bar])
|
|
|
|
pci_endpoint_test_bars_write_bar(test, bar);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read all BARs in order (without writing).
|
|
|
|
* If there is an address translation issue on the EP, writing one BAR
|
|
|
|
* might have overwritten another BAR. Ensure that this is not the case.
|
|
|
|
* (Reading back the BAR directly after writing can not detect this.)
|
|
|
|
*/
|
|
|
|
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
|
|
|
if (test->bar[bar]) {
|
|
|
|
ret = pci_endpoint_test_bars_read_bar(test, bar);
|
2025-02-04 12:06:41 +01:00
|
|
|
if (ret)
|
2024-11-16 04:20:45 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
return 0;
|
2024-11-16 04:20:45 +01:00
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
|
2017-03-27 15:15:14 +05:30
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
2018-07-19 10:32:17 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
|
2025-03-10 12:10:19 +01:00
|
|
|
PCITEST_IRQ_TYPE_INTX);
|
2018-07-19 10:32:17 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
|
2017-03-27 15:15:14 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
2023-11-22 15:03:55 +09:00
|
|
|
COMMAND_RAISE_INTX_IRQ);
|
2017-03-27 15:15:14 +05:30
|
|
|
val = wait_for_completion_timeout(&test->irq_raised,
|
|
|
|
msecs_to_jiffies(1000));
|
|
|
|
if (!val)
|
2025-01-16 22:46:48 +05:30
|
|
|
return -ETIMEDOUT;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
return 0;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
|
2018-07-19 10:32:19 +02:00
|
|
|
u16 msi_num, bool msix)
|
2017-03-27 15:15:14 +05:30
|
|
|
{
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
2025-01-16 22:46:48 +05:30
|
|
|
u32 val;
|
|
|
|
int ret;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2018-07-19 10:32:17 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
|
2025-03-10 12:10:19 +01:00
|
|
|
msix ? PCITEST_IRQ_TYPE_MSIX :
|
|
|
|
PCITEST_IRQ_TYPE_MSI);
|
2018-07-19 10:32:17 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
|
2017-03-27 15:15:14 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
2023-04-15 11:35:42 +09:00
|
|
|
msix ? COMMAND_RAISE_MSIX_IRQ :
|
|
|
|
COMMAND_RAISE_MSI_IRQ);
|
2017-03-27 15:15:14 +05:30
|
|
|
val = wait_for_completion_timeout(&test->irq_raised,
|
|
|
|
msecs_to_jiffies(1000));
|
|
|
|
if (!val)
|
2025-01-16 22:46:48 +05:30
|
|
|
return -ETIMEDOUT;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = pci_irq_vector(pdev, msi_num - 1);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (ret != test->last_irq)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return 0;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
2022-09-07 11:00:59 +09:00
|
|
|
static int pci_endpoint_test_validate_xfer_params(struct device *dev,
|
|
|
|
struct pci_endpoint_test_xfer_param *param, size_t alignment)
|
|
|
|
{
|
2022-09-07 11:01:00 +09:00
|
|
|
if (!param->size) {
|
|
|
|
dev_dbg(dev, "Data size is zero\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-09-07 11:00:59 +09:00
|
|
|
if (param->size > SIZE_MAX - alignment) {
|
|
|
|
dev_dbg(dev, "Maximum transfer data size exceeded\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
|
2020-03-16 16:54:24 +05:30
|
|
|
unsigned long arg)
|
2017-03-27 15:15:14 +05:30
|
|
|
{
|
2020-03-16 16:54:24 +05:30
|
|
|
struct pci_endpoint_test_xfer_param param;
|
2017-03-27 15:15:14 +05:30
|
|
|
void *src_addr;
|
|
|
|
void *dst_addr;
|
2020-03-16 16:54:24 +05:30
|
|
|
u32 flags = 0;
|
|
|
|
bool use_dma;
|
|
|
|
size_t size;
|
2017-03-27 15:15:14 +05:30
|
|
|
dma_addr_t src_phys_addr;
|
|
|
|
dma_addr_t dst_phys_addr;
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
2017-08-18 20:28:06 +05:30
|
|
|
void *orig_src_addr;
|
|
|
|
dma_addr_t orig_src_phys_addr;
|
|
|
|
void *orig_dst_addr;
|
|
|
|
dma_addr_t orig_dst_phys_addr;
|
|
|
|
size_t offset;
|
|
|
|
size_t alignment = test->alignment;
|
2020-03-17 15:31:54 +05:30
|
|
|
int irq_type = test->irq_type;
|
2017-03-27 15:15:14 +05:30
|
|
|
u32 src_crc32;
|
|
|
|
u32 dst_crc32;
|
2025-01-16 22:46:48 +05:30
|
|
|
int ret;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
|
|
|
|
if (ret) {
|
2020-03-16 16:54:24 +05:30
|
|
|
dev_err(dev, "Failed to get transfer param\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EFAULT;
|
2020-03-16 16:54:24 +05:30
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2022-09-07 11:00:59 +09:00
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
size = param.size;
|
2017-09-30 11:15:52 +03:00
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
|
|
|
|
if (use_dma)
|
|
|
|
flags |= FLAG_USE_DMA;
|
|
|
|
|
2025-03-10 12:10:19 +01:00
|
|
|
if (irq_type < PCITEST_IRQ_TYPE_INTX ||
|
|
|
|
irq_type > PCITEST_IRQ_TYPE_MSIX) {
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Invalid IRQ type option\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EINVAL;
|
2018-07-19 10:32:20 +02:00
|
|
|
}
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
|
2017-08-18 20:28:06 +05:30
|
|
|
if (!orig_src_addr) {
|
2018-05-14 17:56:23 +01:00
|
|
|
dev_err(dev, "Failed to allocate source buffer\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -ENOMEM;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
get_random_bytes(orig_src_addr, size + alignment);
|
|
|
|
orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
|
|
|
|
size + alignment, DMA_TO_DEVICE);
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = dma_mapping_error(dev, orig_src_phys_addr);
|
|
|
|
if (ret) {
|
2020-03-16 16:54:22 +05:30
|
|
|
dev_err(dev, "failed to map source buffer address\n");
|
|
|
|
goto err_src_phys_addr;
|
|
|
|
}
|
|
|
|
|
2017-08-18 20:28:06 +05:30
|
|
|
if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
|
|
|
|
src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
|
|
|
|
offset = src_phys_addr - orig_src_phys_addr;
|
|
|
|
src_addr = orig_src_addr + offset;
|
|
|
|
} else {
|
|
|
|
src_phys_addr = orig_src_phys_addr;
|
|
|
|
src_addr = orig_src_addr;
|
|
|
|
}
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
|
|
|
|
lower_32_bits(src_phys_addr));
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
|
|
|
|
upper_32_bits(src_phys_addr));
|
|
|
|
|
|
|
|
src_crc32 = crc32_le(~0, src_addr, size);
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
|
2017-08-18 20:28:06 +05:30
|
|
|
if (!orig_dst_addr) {
|
2018-05-14 17:56:23 +01:00
|
|
|
dev_err(dev, "Failed to allocate destination address\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = -ENOMEM;
|
2020-03-16 16:54:22 +05:30
|
|
|
goto err_dst_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
|
|
|
|
size + alignment, DMA_FROM_DEVICE);
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = dma_mapping_error(dev, orig_dst_phys_addr);
|
|
|
|
if (ret) {
|
2020-03-16 16:54:22 +05:30
|
|
|
dev_err(dev, "failed to map destination buffer address\n");
|
|
|
|
goto err_dst_phys_addr;
|
2017-08-18 20:28:06 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
|
|
|
|
dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
|
|
|
|
offset = dst_phys_addr - orig_dst_phys_addr;
|
|
|
|
dst_addr = orig_dst_addr + offset;
|
|
|
|
} else {
|
|
|
|
dst_phys_addr = orig_dst_phys_addr;
|
|
|
|
dst_addr = orig_dst_addr;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
|
|
|
|
lower_32_bits(dst_phys_addr));
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
|
|
|
|
upper_32_bits(dst_phys_addr));
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
|
|
|
|
size);
|
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
|
2018-07-19 10:32:18 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
|
2018-07-19 10:32:17 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
|
2017-03-27 15:15:14 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
2018-07-19 10:32:17 +02:00
|
|
|
COMMAND_COPY);
|
2017-03-27 15:15:14 +05:30
|
|
|
|
|
|
|
wait_for_completion(&test->irq_raised);
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
dst_crc32 = crc32_le(~0, dst_addr, size);
|
2025-01-16 22:46:48 +05:30
|
|
|
if (dst_crc32 != src_crc32)
|
|
|
|
ret = -EIO;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
err_dst_phys_addr:
|
|
|
|
kfree(orig_dst_addr);
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
err_dst_addr:
|
|
|
|
dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
err_src_phys_addr:
|
|
|
|
kfree(orig_src_addr);
|
2017-03-27 15:15:14 +05:30
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_write(struct pci_endpoint_test *test,
|
2020-03-16 16:54:24 +05:30
|
|
|
unsigned long arg)
|
2017-03-27 15:15:14 +05:30
|
|
|
{
|
2020-03-16 16:54:24 +05:30
|
|
|
struct pci_endpoint_test_xfer_param param;
|
|
|
|
u32 flags = 0;
|
|
|
|
bool use_dma;
|
2017-03-27 15:15:14 +05:30
|
|
|
u32 reg;
|
|
|
|
void *addr;
|
|
|
|
dma_addr_t phys_addr;
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
2017-08-18 20:28:06 +05:30
|
|
|
void *orig_addr;
|
|
|
|
dma_addr_t orig_phys_addr;
|
|
|
|
size_t offset;
|
|
|
|
size_t alignment = test->alignment;
|
2020-03-17 15:31:54 +05:30
|
|
|
int irq_type = test->irq_type;
|
2020-03-16 16:54:24 +05:30
|
|
|
size_t size;
|
2017-03-27 15:15:14 +05:30
|
|
|
u32 crc32;
|
2025-01-16 22:46:48 +05:30
|
|
|
int ret;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
|
|
|
|
if (ret) {
|
2020-03-16 16:54:24 +05:30
|
|
|
dev_err(dev, "Failed to get transfer param\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EFAULT;
|
2020-03-16 16:54:24 +05:30
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2022-09-07 11:00:59 +09:00
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
size = param.size;
|
2017-09-30 11:15:52 +03:00
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
|
|
|
|
if (use_dma)
|
|
|
|
flags |= FLAG_USE_DMA;
|
|
|
|
|
2025-03-10 12:10:19 +01:00
|
|
|
if (irq_type < PCITEST_IRQ_TYPE_INTX ||
|
|
|
|
irq_type > PCITEST_IRQ_TYPE_MSIX) {
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Invalid IRQ type option\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EINVAL;
|
2018-07-19 10:32:20 +02:00
|
|
|
}
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
|
2017-08-18 20:28:06 +05:30
|
|
|
if (!orig_addr) {
|
2018-05-14 17:56:23 +01:00
|
|
|
dev_err(dev, "Failed to allocate address\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -ENOMEM;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
get_random_bytes(orig_addr, size + alignment);
|
|
|
|
|
|
|
|
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
|
|
|
|
DMA_TO_DEVICE);
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = dma_mapping_error(dev, orig_phys_addr);
|
|
|
|
if (ret) {
|
2020-03-16 16:54:22 +05:30
|
|
|
dev_err(dev, "failed to map source buffer address\n");
|
|
|
|
goto err_phys_addr;
|
|
|
|
}
|
|
|
|
|
2017-08-18 20:28:06 +05:30
|
|
|
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
|
|
|
|
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
|
|
|
|
offset = phys_addr - orig_phys_addr;
|
|
|
|
addr = orig_addr + offset;
|
|
|
|
} else {
|
|
|
|
phys_addr = orig_phys_addr;
|
|
|
|
addr = orig_addr;
|
|
|
|
}
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
crc32 = crc32_le(~0, addr, size);
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
|
|
|
|
crc32);
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
|
|
|
|
lower_32_bits(phys_addr));
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
|
|
|
|
upper_32_bits(phys_addr));
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
|
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
|
2018-07-19 10:32:18 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
|
2018-07-19 10:32:17 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
|
2017-03-27 15:15:14 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
2018-07-19 10:32:17 +02:00
|
|
|
COMMAND_READ);
|
2017-03-27 15:15:14 +05:30
|
|
|
|
|
|
|
wait_for_completion(&test->irq_raised);
|
|
|
|
|
|
|
|
reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
|
2025-01-16 22:46:48 +05:30
|
|
|
if (!(reg & STATUS_READ_SUCCESS))
|
|
|
|
ret = -EIO;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
dma_unmap_single(dev, orig_phys_addr, size + alignment,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
err_phys_addr:
|
|
|
|
kfree(orig_addr);
|
2017-03-27 15:15:14 +05:30
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_read(struct pci_endpoint_test *test,
|
2020-03-16 16:54:24 +05:30
|
|
|
unsigned long arg)
|
2017-03-27 15:15:14 +05:30
|
|
|
{
|
2020-03-16 16:54:24 +05:30
|
|
|
struct pci_endpoint_test_xfer_param param;
|
|
|
|
u32 flags = 0;
|
|
|
|
bool use_dma;
|
|
|
|
size_t size;
|
2017-03-27 15:15:14 +05:30
|
|
|
void *addr;
|
|
|
|
dma_addr_t phys_addr;
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
2017-08-18 20:28:06 +05:30
|
|
|
void *orig_addr;
|
|
|
|
dma_addr_t orig_phys_addr;
|
|
|
|
size_t offset;
|
|
|
|
size_t alignment = test->alignment;
|
2020-03-17 15:31:54 +05:30
|
|
|
int irq_type = test->irq_type;
|
2017-03-27 15:15:14 +05:30
|
|
|
u32 crc32;
|
2025-01-16 22:46:48 +05:30
|
|
|
int ret;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
|
|
|
|
if (ret) {
|
2020-03-16 16:54:24 +05:30
|
|
|
dev_err(dev, "Failed to get transfer param\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EFAULT;
|
2020-03-16 16:54:24 +05:30
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2022-09-07 11:00:59 +09:00
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
size = param.size;
|
2017-09-30 11:15:52 +03:00
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
|
|
|
|
if (use_dma)
|
|
|
|
flags |= FLAG_USE_DMA;
|
|
|
|
|
2025-03-10 12:10:19 +01:00
|
|
|
if (irq_type < PCITEST_IRQ_TYPE_INTX ||
|
|
|
|
irq_type > PCITEST_IRQ_TYPE_MSIX) {
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Invalid IRQ type option\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EINVAL;
|
2018-07-19 10:32:20 +02:00
|
|
|
}
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
|
2017-08-18 20:28:06 +05:30
|
|
|
if (!orig_addr) {
|
2018-05-14 17:56:23 +01:00
|
|
|
dev_err(dev, "Failed to allocate destination address\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -ENOMEM;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
|
|
|
|
DMA_FROM_DEVICE);
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = dma_mapping_error(dev, orig_phys_addr);
|
|
|
|
if (ret) {
|
2020-03-16 16:54:22 +05:30
|
|
|
dev_err(dev, "failed to map source buffer address\n");
|
|
|
|
goto err_phys_addr;
|
|
|
|
}
|
|
|
|
|
2017-08-18 20:28:06 +05:30
|
|
|
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
|
|
|
|
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
|
|
|
|
offset = phys_addr - orig_phys_addr;
|
|
|
|
addr = orig_addr + offset;
|
|
|
|
} else {
|
|
|
|
phys_addr = orig_phys_addr;
|
|
|
|
addr = orig_addr;
|
|
|
|
}
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
|
|
|
|
lower_32_bits(phys_addr));
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
|
|
|
|
upper_32_bits(phys_addr));
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
|
|
|
|
|
2020-03-16 16:54:24 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
|
2018-07-19 10:32:18 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
|
2018-07-19 10:32:17 +02:00
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
|
2017-03-27 15:15:14 +05:30
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
2018-07-19 10:32:17 +02:00
|
|
|
COMMAND_WRITE);
|
2017-03-27 15:15:14 +05:30
|
|
|
|
|
|
|
wait_for_completion(&test->irq_raised);
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
dma_unmap_single(dev, orig_phys_addr, size + alignment,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
crc32 = crc32_le(~0, addr, size);
|
2025-01-16 22:46:48 +05:30
|
|
|
if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
|
|
|
|
ret = -EIO;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
err_phys_addr:
|
|
|
|
kfree(orig_addr);
|
2017-03-27 15:15:14 +05:30
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
|
2020-03-17 15:31:55 +05:30
|
|
|
{
|
|
|
|
pci_endpoint_test_release_irq(test);
|
|
|
|
pci_endpoint_test_free_irq_vectors(test);
|
2025-01-16 22:46:48 +05:30
|
|
|
|
|
|
|
return 0;
|
2020-03-17 15:31:55 +05:30
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
|
2018-07-19 10:32:20 +02:00
|
|
|
int req_irq_type)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
2025-01-16 22:46:48 +05:30
|
|
|
int ret;
|
2018-07-19 10:32:20 +02:00
|
|
|
|
2025-03-10 12:10:19 +01:00
|
|
|
if (req_irq_type < PCITEST_IRQ_TYPE_INTX ||
|
misc: pci_endpoint_test: Add support for PCITEST_IRQ_TYPE_AUTO
For PCITEST_MSI we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSI, since we want to test if MSI works.
For PCITEST_MSIX we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSIX, since we want to test if MSI works.
For PCITEST_LEGACY_IRQ we really want to set PCITEST_SET_IRQTYPE
explicitly to PCITEST_IRQ_TYPE_INTX, since we want to test if INTx
works.
However, for PCITEST_WRITE, PCITEST_READ, PCITEST_COPY, we really don't
care which IRQ type that is used, we just want to use a IRQ type that is
supported by the EPC.
The old behavior was to always use MSI for PCITEST_WRITE, PCITEST_READ,
PCITEST_COPY, was to always set IRQ type to MSI before doing the actual
test, however, there are EPC drivers that do not support MSI.
Add a new PCITEST_IRQ_TYPE_AUTO, that will use the CAPS register to see
which IRQ types the endpoint supports, and use one of the supported IRQ
types.
For backwards compatibility, if the endpoint does not expose any supported
IRQ type in the CAPS register, simply fallback to using MSI, as it was
unconditionally done before.
Signed-off-by: Niklas Cassel <cassel@kernel.org>
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
Link: https://lore.kernel.org/r/20250310111016.859445-16-cassel@kernel.org
2025-03-10 12:10:24 +01:00
|
|
|
req_irq_type > PCITEST_IRQ_TYPE_AUTO) {
|
2018-07-19 10:32:20 +02:00
|
|
|
dev_err(dev, "Invalid IRQ type option\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return -EINVAL;
|
2018-07-19 10:32:20 +02:00
|
|
|
}
|
|
|
|
|
misc: pci_endpoint_test: Add support for PCITEST_IRQ_TYPE_AUTO
For PCITEST_MSI we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSI, since we want to test if MSI works.
For PCITEST_MSIX we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSIX, since we want to test if MSI works.
For PCITEST_LEGACY_IRQ we really want to set PCITEST_SET_IRQTYPE
explicitly to PCITEST_IRQ_TYPE_INTX, since we want to test if INTx
works.
However, for PCITEST_WRITE, PCITEST_READ, PCITEST_COPY, we really don't
care which IRQ type that is used, we just want to use a IRQ type that is
supported by the EPC.
The old behavior was to always use MSI for PCITEST_WRITE, PCITEST_READ,
PCITEST_COPY, was to always set IRQ type to MSI before doing the actual
test, however, there are EPC drivers that do not support MSI.
Add a new PCITEST_IRQ_TYPE_AUTO, that will use the CAPS register to see
which IRQ types the endpoint supports, and use one of the supported IRQ
types.
For backwards compatibility, if the endpoint does not expose any supported
IRQ type in the CAPS register, simply fallback to using MSI, as it was
unconditionally done before.
Signed-off-by: Niklas Cassel <cassel@kernel.org>
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
Link: https://lore.kernel.org/r/20250310111016.859445-16-cassel@kernel.org
2025-03-10 12:10:24 +01:00
|
|
|
if (req_irq_type == PCITEST_IRQ_TYPE_AUTO) {
|
|
|
|
if (test->ep_caps & CAP_MSI)
|
|
|
|
req_irq_type = PCITEST_IRQ_TYPE_MSI;
|
|
|
|
else if (test->ep_caps & CAP_MSIX)
|
|
|
|
req_irq_type = PCITEST_IRQ_TYPE_MSIX;
|
|
|
|
else if (test->ep_caps & CAP_INTX)
|
|
|
|
req_irq_type = PCITEST_IRQ_TYPE_INTX;
|
|
|
|
else
|
|
|
|
/* fallback to MSI if no caps defined */
|
|
|
|
req_irq_type = PCITEST_IRQ_TYPE_MSI;
|
|
|
|
}
|
|
|
|
|
2020-03-17 15:31:54 +05:30
|
|
|
if (test->irq_type == req_irq_type)
|
2025-01-16 22:46:48 +05:30
|
|
|
return 0;
|
2018-07-19 10:32:20 +02:00
|
|
|
|
|
|
|
pci_endpoint_test_release_irq(test);
|
|
|
|
pci_endpoint_test_free_irq_vectors(test);
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-07-19 10:32:20 +02:00
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = pci_endpoint_test_request_irq(test);
|
|
|
|
if (ret) {
|
|
|
|
pci_endpoint_test_free_irq_vectors(test);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-07-19 10:32:20 +02:00
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
return 0;
|
2018-07-19 10:32:20 +02:00
|
|
|
}
|
|
|
|
|
2025-07-10 15:13:53 -04:00
|
|
|
static int pci_endpoint_test_doorbell(struct pci_endpoint_test *test)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
int irq_type = test->irq_type;
|
|
|
|
enum pci_barno bar;
|
|
|
|
u32 data, status;
|
|
|
|
u32 addr;
|
|
|
|
int left;
|
|
|
|
|
|
|
|
if (irq_type < PCITEST_IRQ_TYPE_INTX ||
|
|
|
|
irq_type > PCITEST_IRQ_TYPE_MSIX) {
|
|
|
|
dev_err(dev, "Invalid IRQ type\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
|
|
|
COMMAND_ENABLE_DOORBELL);
|
|
|
|
|
|
|
|
left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
|
|
|
|
|
|
|
|
status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
|
|
|
|
if (!left || (status & STATUS_DOORBELL_ENABLE_FAIL)) {
|
|
|
|
dev_err(dev, "Failed to enable doorbell\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
data = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_DATA);
|
|
|
|
addr = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_OFFSET);
|
|
|
|
bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0);
|
|
|
|
|
|
|
|
bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
|
|
|
|
|
|
|
|
writel(data, test->bar[bar] + addr);
|
|
|
|
|
|
|
|
left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
|
|
|
|
|
|
|
|
status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
|
|
|
|
|
|
|
|
if (!left || !(status & STATUS_DOORBELL_SUCCESS))
|
|
|
|
dev_err(dev, "Failed to trigger doorbell in endpoint\n");
|
|
|
|
|
|
|
|
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
|
|
|
COMMAND_DISABLE_DOORBELL);
|
|
|
|
|
|
|
|
wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
|
|
|
|
|
|
|
|
status |= pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
|
|
|
|
|
|
|
|
if (status & STATUS_DOORBELL_DISABLE_FAIL) {
|
|
|
|
dev_err(dev, "Failed to disable doorbell\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(status & STATUS_DOORBELL_SUCCESS))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
int ret = -EINVAL;
|
|
|
|
enum pci_barno bar;
|
|
|
|
struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
|
2019-03-25 15:09:46 +05:30
|
|
|
struct pci_dev *pdev = test->pdev;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
|
|
|
mutex_lock(&test->mutex);
|
2023-04-15 11:35:40 +09:00
|
|
|
|
|
|
|
reinit_completion(&test->irq_raised);
|
|
|
|
test->last_irq = -ENODATA;
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
switch (cmd) {
|
|
|
|
case PCITEST_BAR:
|
|
|
|
bar = arg;
|
2020-10-21 23:28:10 +02:00
|
|
|
if (bar > BAR_5)
|
2017-03-27 15:15:14 +05:30
|
|
|
goto ret;
|
2019-03-25 15:09:46 +05:30
|
|
|
if (is_am654_pci_dev(pdev) && bar == BAR_0)
|
|
|
|
goto ret;
|
2017-03-27 15:15:14 +05:30
|
|
|
ret = pci_endpoint_test_bar(test, bar);
|
|
|
|
break;
|
2024-11-16 04:20:45 +01:00
|
|
|
case PCITEST_BARS:
|
|
|
|
ret = pci_endpoint_test_bars(test);
|
|
|
|
break;
|
2023-11-22 15:03:55 +09:00
|
|
|
case PCITEST_INTX_IRQ:
|
|
|
|
ret = pci_endpoint_test_intx_irq(test);
|
2017-03-27 15:15:14 +05:30
|
|
|
break;
|
|
|
|
case PCITEST_MSI:
|
2018-07-19 10:32:19 +02:00
|
|
|
case PCITEST_MSIX:
|
|
|
|
ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
|
2017-03-27 15:15:14 +05:30
|
|
|
break;
|
|
|
|
case PCITEST_WRITE:
|
|
|
|
ret = pci_endpoint_test_write(test, arg);
|
|
|
|
break;
|
|
|
|
case PCITEST_READ:
|
|
|
|
ret = pci_endpoint_test_read(test, arg);
|
|
|
|
break;
|
|
|
|
case PCITEST_COPY:
|
|
|
|
ret = pci_endpoint_test_copy(test, arg);
|
|
|
|
break;
|
2018-07-19 10:32:20 +02:00
|
|
|
case PCITEST_SET_IRQTYPE:
|
|
|
|
ret = pci_endpoint_test_set_irq(test, arg);
|
|
|
|
break;
|
|
|
|
case PCITEST_GET_IRQTYPE:
|
misc: pci_endpoint_test: Remove global 'irq_type' and 'no_msi'
The global variable "irq_type" preserves the current value of
ioctl(GET_IRQTYPE).
However, all tests that use interrupts first call ioctl(SET_IRQTYPE)
to set "test->irq_type", then write the value of test->irq_type into
the register pointed by test_reg_bar, and request the interrupt to the
endpoint. The endpoint function driver, pci-epf-test, refers to the
register, and determine which type of interrupt to raise.
The global variable "irq_type" is never used in the actual test,
so remove the variable and replace it with "test->irq_type".
Also, for the same reason, the variable "no_msi" can be removed.
Initially, "test->irq_type" has IRQ_TYPE_UNDEFINED, and the
ioctl(GET_IRQTYPE) before calling ioctl(SET_IRQTYPE) will return
an error.
Suggested-by: Niklas Cassel <cassel@kernel.org>
Suggested-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
[kwilczynski: commit log]
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20250225110252.28866-6-hayashi.kunihiko@socionext.com
2025-02-25 20:02:51 +09:00
|
|
|
ret = test->irq_type;
|
2018-07-19 10:32:20 +02:00
|
|
|
break;
|
2020-03-17 15:31:55 +05:30
|
|
|
case PCITEST_CLEAR_IRQ:
|
|
|
|
ret = pci_endpoint_test_clear_irq(test);
|
|
|
|
break;
|
2025-07-10 15:13:53 -04:00
|
|
|
case PCITEST_DOORBELL:
|
|
|
|
ret = pci_endpoint_test_doorbell(test);
|
|
|
|
break;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
ret:
|
|
|
|
mutex_unlock(&test->mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations pci_endpoint_test_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.unlocked_ioctl = pci_endpoint_test_ioctl,
|
|
|
|
};
|
|
|
|
|
2024-12-03 07:38:54 +01:00
|
|
|
static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = test->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
|
misc: pci_endpoint_test: Add support for PCITEST_IRQ_TYPE_AUTO
For PCITEST_MSI we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSI, since we want to test if MSI works.
For PCITEST_MSIX we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSIX, since we want to test if MSI works.
For PCITEST_LEGACY_IRQ we really want to set PCITEST_SET_IRQTYPE
explicitly to PCITEST_IRQ_TYPE_INTX, since we want to test if INTx
works.
However, for PCITEST_WRITE, PCITEST_READ, PCITEST_COPY, we really don't
care which IRQ type that is used, we just want to use a IRQ type that is
supported by the EPC.
The old behavior was to always use MSI for PCITEST_WRITE, PCITEST_READ,
PCITEST_COPY, was to always set IRQ type to MSI before doing the actual
test, however, there are EPC drivers that do not support MSI.
Add a new PCITEST_IRQ_TYPE_AUTO, that will use the CAPS register to see
which IRQ types the endpoint supports, and use one of the supported IRQ
types.
For backwards compatibility, if the endpoint does not expose any supported
IRQ type in the CAPS register, simply fallback to using MSI, as it was
unconditionally done before.
Signed-off-by: Niklas Cassel <cassel@kernel.org>
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
Link: https://lore.kernel.org/r/20250310111016.859445-16-cassel@kernel.org
2025-03-10 12:10:24 +01:00
|
|
|
test->ep_caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
|
|
|
|
dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", test->ep_caps);
|
2024-12-03 07:38:54 +01:00
|
|
|
|
|
|
|
/* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
|
misc: pci_endpoint_test: Add support for PCITEST_IRQ_TYPE_AUTO
For PCITEST_MSI we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSI, since we want to test if MSI works.
For PCITEST_MSIX we really want to set PCITEST_SET_IRQTYPE explicitly
to PCITEST_IRQ_TYPE_MSIX, since we want to test if MSI works.
For PCITEST_LEGACY_IRQ we really want to set PCITEST_SET_IRQTYPE
explicitly to PCITEST_IRQ_TYPE_INTX, since we want to test if INTx
works.
However, for PCITEST_WRITE, PCITEST_READ, PCITEST_COPY, we really don't
care which IRQ type that is used, we just want to use a IRQ type that is
supported by the EPC.
The old behavior was to always use MSI for PCITEST_WRITE, PCITEST_READ,
PCITEST_COPY, was to always set IRQ type to MSI before doing the actual
test, however, there are EPC drivers that do not support MSI.
Add a new PCITEST_IRQ_TYPE_AUTO, that will use the CAPS register to see
which IRQ types the endpoint supports, and use one of the supported IRQ
types.
For backwards compatibility, if the endpoint does not expose any supported
IRQ type in the CAPS register, simply fallback to using MSI, as it was
unconditionally done before.
Signed-off-by: Niklas Cassel <cassel@kernel.org>
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
Link: https://lore.kernel.org/r/20250310111016.859445-16-cassel@kernel.org
2025-03-10 12:10:24 +01:00
|
|
|
if (test->ep_caps & CAP_UNALIGNED_ACCESS)
|
2024-12-03 07:38:54 +01:00
|
|
|
test->alignment = 0;
|
|
|
|
}
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
|
|
|
const struct pci_device_id *ent)
|
|
|
|
{
|
2025-01-16 22:46:48 +05:30
|
|
|
int ret;
|
2017-03-27 15:15:14 +05:30
|
|
|
int id;
|
2025-01-23 11:31:28 +01:00
|
|
|
char name[29];
|
2017-03-27 15:15:14 +05:30
|
|
|
enum pci_barno bar;
|
|
|
|
void __iomem *base;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
struct pci_endpoint_test *test;
|
2017-08-18 20:28:05 +05:30
|
|
|
struct pci_endpoint_test_data *data;
|
|
|
|
enum pci_barno test_reg_bar = BAR_0;
|
2017-03-27 15:15:14 +05:30
|
|
|
struct miscdevice *misc_device;
|
|
|
|
|
|
|
|
if (pci_is_bridge(pdev))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
|
|
|
|
if (!test)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-08-18 20:28:05 +05:30
|
|
|
test->test_reg_bar = 0;
|
2017-08-18 20:28:06 +05:30
|
|
|
test->alignment = 0;
|
2017-03-27 15:15:14 +05:30
|
|
|
test->pdev = pdev;
|
2025-03-10 12:10:19 +01:00
|
|
|
test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
|
2017-08-18 20:28:05 +05:30
|
|
|
|
|
|
|
data = (struct pci_endpoint_test_data *)ent->driver_data;
|
2017-08-18 20:28:06 +05:30
|
|
|
if (data) {
|
2017-08-18 20:28:05 +05:30
|
|
|
test_reg_bar = data->test_reg_bar;
|
2019-03-25 15:09:47 +05:30
|
|
|
test->test_reg_bar = test_reg_bar;
|
2017-08-18 20:28:06 +05:30
|
|
|
test->alignment = data->alignment;
|
|
|
|
}
|
2017-08-18 20:28:05 +05:30
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
init_completion(&test->irq_raised);
|
|
|
|
mutex_init(&test->mutex);
|
|
|
|
|
2024-05-02 15:59:03 -04:00
|
|
|
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
|
2020-03-16 16:54:22 +05:30
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = pci_enable_device(pdev);
|
|
|
|
if (ret) {
|
2017-03-27 15:15:14 +05:30
|
|
|
dev_err(dev, "Cannot enable PCI device\n");
|
2025-01-16 22:46:48 +05:30
|
|
|
return ret;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = pci_request_regions(pdev, DRV_MODULE_NAME);
|
|
|
|
if (ret) {
|
2017-03-27 15:15:14 +05:30
|
|
|
dev_err(dev, "Cannot obtain PCI resources\n");
|
|
|
|
goto err_disable_pdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
|
2019-09-28 02:43:08 +03:00
|
|
|
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
2018-03-28 13:50:17 +02:00
|
|
|
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
|
|
|
|
base = pci_ioremap_bar(pdev, bar);
|
|
|
|
if (!base) {
|
2018-05-14 17:56:23 +01:00
|
|
|
dev_err(dev, "Failed to read BAR%d\n", bar);
|
2018-03-28 13:50:17 +02:00
|
|
|
WARN_ON(bar == test_reg_bar);
|
|
|
|
}
|
|
|
|
test->bar[bar] = base;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-18 20:28:05 +05:30
|
|
|
test->base = test->bar[test_reg_bar];
|
2017-03-27 15:15:14 +05:30
|
|
|
if (!test->base) {
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = -ENOMEM;
|
2017-08-18 20:28:05 +05:30
|
|
|
dev_err(dev, "Cannot perform PCI test without BAR%d\n",
|
|
|
|
test_reg_bar);
|
2017-03-27 15:15:14 +05:30
|
|
|
goto err_iounmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_drvdata(pdev, test);
|
|
|
|
|
2023-12-19 07:15:37 +01:00
|
|
|
id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
|
2017-03-27 15:15:14 +05:30
|
|
|
if (id < 0) {
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = id;
|
2018-05-14 17:56:23 +01:00
|
|
|
dev_err(dev, "Unable to get id\n");
|
2017-03-27 15:15:14 +05:30
|
|
|
goto err_iounmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
|
2020-03-17 15:31:58 +05:30
|
|
|
test->name = kstrdup(name, GFP_KERNEL);
|
|
|
|
if (!test->name) {
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = -ENOMEM;
|
2020-03-17 15:31:58 +05:30
|
|
|
goto err_ida_remove;
|
|
|
|
}
|
|
|
|
|
2024-12-03 07:38:54 +01:00
|
|
|
pci_endpoint_test_get_capabilities(test);
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
misc_device = &test->miscdev;
|
|
|
|
misc_device->minor = MISC_DYNAMIC_MINOR;
|
2017-10-11 14:14:37 +05:30
|
|
|
misc_device->name = kstrdup(name, GFP_KERNEL);
|
|
|
|
if (!misc_device->name) {
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = -ENOMEM;
|
misc: pci_endpoint_test: Defer IRQ allocation until ioctl(PCITEST_SET_IRQTYPE)
Commit a402006d48a9 ("misc: pci_endpoint_test: Remove global 'irq_type'
and 'no_msi'") changed so that the default IRQ vector requested by
pci_endpoint_test_probe() was no longer the module param 'irq_type', but
instead test->irq_type. test->irq_type is by default IRQ_TYPE_UNDEFINED
(until someone calls ioctl(PCITEST_SET_IRQTYPE)).
However, the commit also changed so that after initializing test->irq_type
to IRQ_TYPE_UNDEFINED, it also overrides it with driver_data->irq_type, if
the PCI device and vendor ID provides driver_data.
This causes a regression for PCI device and vendor IDs that do not provide
driver_data, and the host side pci_endpoint_test_driver driver failed to
probe on such platforms:
pci-endpoint-test 0001:01:00.0: Invalid IRQ type selected
pci-endpoint-test 0001:01:00.0: probe with driver pci-endpoint-test failed with error -22
Considering that the pci endpoint selftests and the old pcitest.sh always
call ioctl(PCITEST_SET_IRQTYPE) before performing any test that requires
IRQs, fix the regression by removing the allocation of IRQs in
pci_endpoint_test_probe(). The IRQ allocation will occur when
ioctl(PCITEST_SET_IRQTYPE) is called.
A positive side effect of this is that even if the endpoint controller has
issues with IRQs, the user can do still do all the tests/ioctls() that do
not require working IRQs, e.g. PCITEST_BAR and PCITEST_BARS.
This also means that we can remove the now unused irq_type from
driver_data. The irq_type will always be the one configured by the user
using ioctl(PCITEST_SET_IRQTYPE). (A user that does not know, or care
which irq_type that is used, can use PCITEST_IRQ_TYPE_AUTO. This has
superseded the need for a default irq_type in driver_data.)
[bhelgaas: add probe failure details]
Fixes: a402006d48a9c ("misc: pci_endpoint_test: Remove global 'irq_type' and 'no_msi'")
Signed-off-by: Niklas Cassel <cassel@kernel.org>
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Tested-by: Frank Li <Frank.Li@nxp.com>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Link: https://patch.msgid.link/20250416142825.336554-2-cassel@kernel.org
2025-04-16 16:28:26 +02:00
|
|
|
goto err_kfree_test_name;
|
2017-10-11 14:14:37 +05:30
|
|
|
}
|
2021-07-06 17:43:10 +02:00
|
|
|
misc_device->parent = &pdev->dev;
|
2022-01-08 17:09:37 +08:00
|
|
|
misc_device->fops = &pci_endpoint_test_fops;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
ret = misc_register(misc_device);
|
|
|
|
if (ret) {
|
2018-05-14 17:56:23 +01:00
|
|
|
dev_err(dev, "Failed to register device\n");
|
2017-10-11 14:14:37 +05:30
|
|
|
goto err_kfree_name;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2017-10-11 14:14:37 +05:30
|
|
|
err_kfree_name:
|
|
|
|
kfree(misc_device->name);
|
|
|
|
|
2020-03-17 15:31:58 +05:30
|
|
|
err_kfree_test_name:
|
|
|
|
kfree(test->name);
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
err_ida_remove:
|
2023-12-19 07:15:37 +01:00
|
|
|
ida_free(&pci_endpoint_test_ida, id);
|
2017-03-27 15:15:14 +05:30
|
|
|
|
|
|
|
err_iounmap:
|
2019-09-28 02:43:08 +03:00
|
|
|
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
2017-03-27 15:15:14 +05:30
|
|
|
if (test->bar[bar])
|
|
|
|
pci_iounmap(pdev, test->bar[bar]);
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
|
|
|
|
err_disable_pdev:
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
|
2025-01-16 22:46:48 +05:30
|
|
|
return ret;
|
2017-03-27 15:15:14 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
static void pci_endpoint_test_remove(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
int id;
|
|
|
|
enum pci_barno bar;
|
|
|
|
struct pci_endpoint_test *test = pci_get_drvdata(pdev);
|
|
|
|
struct miscdevice *misc_device = &test->miscdev;
|
|
|
|
|
|
|
|
if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
|
|
|
|
return;
|
2017-09-30 11:16:51 +03:00
|
|
|
if (id < 0)
|
|
|
|
return;
|
2017-03-27 15:15:14 +05:30
|
|
|
|
2023-04-15 11:35:39 +09:00
|
|
|
pci_endpoint_test_release_irq(test);
|
|
|
|
pci_endpoint_test_free_irq_vectors(test);
|
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
misc_deregister(&test->miscdev);
|
2017-10-11 14:14:37 +05:30
|
|
|
kfree(misc_device->name);
|
2020-03-17 15:31:58 +05:30
|
|
|
kfree(test->name);
|
2023-12-19 07:15:37 +01:00
|
|
|
ida_free(&pci_endpoint_test_ida, id);
|
2019-09-28 02:43:08 +03:00
|
|
|
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
2017-03-27 15:15:14 +05:30
|
|
|
if (test->bar[bar])
|
|
|
|
pci_iounmap(pdev, test->bar[bar]);
|
|
|
|
}
|
2018-07-19 10:32:20 +02:00
|
|
|
|
2017-03-27 15:15:14 +05:30
|
|
|
pci_release_regions(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
}
|
|
|
|
|
2020-03-16 16:54:22 +05:30
|
|
|
static const struct pci_endpoint_test_data default_data = {
|
|
|
|
.test_reg_bar = BAR_0,
|
|
|
|
.alignment = SZ_4K,
|
|
|
|
};
|
|
|
|
|
2019-03-25 15:09:46 +05:30
|
|
|
static const struct pci_endpoint_test_data am654_data = {
|
|
|
|
.test_reg_bar = BAR_2,
|
|
|
|
.alignment = SZ_64K,
|
|
|
|
};
|
|
|
|
|
2020-07-22 16:33:16 +05:30
|
|
|
static const struct pci_endpoint_test_data j721e_data = {
|
|
|
|
.alignment = 256,
|
|
|
|
};
|
|
|
|
|
2024-06-07 13:14:31 +02:00
|
|
|
static const struct pci_endpoint_test_data rk3588_data = {
|
|
|
|
.alignment = SZ_64K,
|
|
|
|
};
|
|
|
|
|
2024-06-11 21:50:57 +09:00
|
|
|
/*
|
|
|
|
* If the controller's Vendor/Device ID are programmable, you may be able to
|
|
|
|
* use one of the existing entries for testing instead of adding a new one.
|
|
|
|
*/
|
2017-03-27 15:15:14 +05:30
|
|
|
static const struct pci_device_id pci_endpoint_test_tbl[] = {
|
2020-03-16 16:54:22 +05:30
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
|
|
|
|
.driver_data = (kernel_ulong_t)&default_data,
|
|
|
|
},
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
|
|
|
|
.driver_data = (kernel_ulong_t)&default_data,
|
|
|
|
},
|
2020-09-18 16:00:24 +08:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
|
|
|
|
.driver_data = (kernel_ulong_t)&default_data,
|
|
|
|
},
|
2023-01-16 13:41:20 +08:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
|
2020-09-18 16:00:24 +08:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
|
|
|
|
.driver_data = (kernel_ulong_t)&default_data,
|
|
|
|
},
|
2019-06-04 15:29:25 +02:00
|
|
|
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
|
2019-03-25 15:09:46 +05:30
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
|
|
|
|
.driver_data = (kernel_ulong_t)&am654_data
|
|
|
|
},
|
2020-08-14 18:30:34 +01:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
|
2020-09-04 11:38:51 +01:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
|
2023-10-18 17:56:31 +09:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
|
|
|
|
.driver_data = (kernel_ulong_t)&default_data,
|
|
|
|
},
|
2020-07-22 16:33:16 +05:30
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
|
|
|
|
.driver_data = (kernel_ulong_t)&j721e_data,
|
|
|
|
},
|
2021-08-11 18:03:36 +05:30
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
|
|
|
|
.driver_data = (kernel_ulong_t)&j721e_data,
|
|
|
|
},
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
|
|
|
|
.driver_data = (kernel_ulong_t)&j721e_data,
|
|
|
|
},
|
2023-10-20 17:32:48 +05:30
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
|
|
|
|
.driver_data = (kernel_ulong_t)&j721e_data,
|
|
|
|
},
|
2024-06-07 13:14:31 +02:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
|
|
|
|
.driver_data = (kernel_ulong_t)&rk3588_data,
|
|
|
|
},
|
2017-03-27 15:15:14 +05:30
|
|
|
{ }
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
|
|
|
|
|
|
|
|
static struct pci_driver pci_endpoint_test_driver = {
|
|
|
|
.name = DRV_MODULE_NAME,
|
|
|
|
.id_table = pci_endpoint_test_tbl,
|
|
|
|
.probe = pci_endpoint_test_probe,
|
|
|
|
.remove = pci_endpoint_test_remove,
|
2021-08-19 18:03:42 +05:30
|
|
|
.sriov_configure = pci_sriov_configure_simple,
|
2017-03-27 15:15:14 +05:30
|
|
|
};
|
|
|
|
module_pci_driver(pci_endpoint_test_driver);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
|
|
|
|
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
|
|
|
|
MODULE_LICENSE("GPL v2");
|