2018-01-26 12:50:27 -06:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2021-03-11 00:17:17 +00:00
|
|
|
/*
|
2017-03-27 15:15:03 +05:30
|
|
|
* Test driver to test endpoint functionality
|
|
|
|
*
|
|
|
|
* Copyright (C) 2017 Texas Instruments
|
|
|
|
* Author: Kishon Vijay Abraham I <kishon@ti.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/crc32.h>
|
|
|
|
#include <linux/delay.h>
|
2020-03-16 16:54:20 +05:30
|
|
|
#include <linux/dmaengine.h>
|
2017-03-27 15:15:03 +05:30
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/module.h>
|
2025-07-10 15:13:52 -04:00
|
|
|
#include <linux/msi.h>
|
2017-03-27 15:15:03 +05:30
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/pci_ids.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
|
|
|
|
#include <linux/pci-epc.h>
|
|
|
|
#include <linux/pci-epf.h>
|
2025-07-10 15:13:52 -04:00
|
|
|
#include <linux/pci-ep-msi.h>
|
2017-03-27 15:15:03 +05:30
|
|
|
#include <linux/pci_regs.h>
|
|
|
|
|
2023-11-22 15:03:54 +09:00
|
|
|
#define IRQ_TYPE_INTX 0
|
2018-07-19 10:32:17 +02:00
|
|
|
#define IRQ_TYPE_MSI 1
|
2018-07-19 10:32:19 +02:00
|
|
|
#define IRQ_TYPE_MSIX 2
|
2018-07-19 10:32:17 +02:00
|
|
|
|
2023-11-22 15:03:54 +09:00
|
|
|
#define COMMAND_RAISE_INTX_IRQ BIT(0)
|
2017-03-27 15:15:03 +05:30
|
|
|
#define COMMAND_RAISE_MSI_IRQ BIT(1)
|
2018-07-19 10:32:19 +02:00
|
|
|
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
|
2018-07-19 10:32:17 +02:00
|
|
|
#define COMMAND_READ BIT(3)
|
|
|
|
#define COMMAND_WRITE BIT(4)
|
|
|
|
#define COMMAND_COPY BIT(5)
|
2025-07-10 15:13:52 -04:00
|
|
|
#define COMMAND_ENABLE_DOORBELL BIT(6)
|
|
|
|
#define COMMAND_DISABLE_DOORBELL BIT(7)
|
2017-03-27 15:15:03 +05:30
|
|
|
|
|
|
|
#define STATUS_READ_SUCCESS BIT(0)
|
|
|
|
#define STATUS_READ_FAIL BIT(1)
|
|
|
|
#define STATUS_WRITE_SUCCESS BIT(2)
|
|
|
|
#define STATUS_WRITE_FAIL BIT(3)
|
|
|
|
#define STATUS_COPY_SUCCESS BIT(4)
|
|
|
|
#define STATUS_COPY_FAIL BIT(5)
|
|
|
|
#define STATUS_IRQ_RAISED BIT(6)
|
|
|
|
#define STATUS_SRC_ADDR_INVALID BIT(7)
|
|
|
|
#define STATUS_DST_ADDR_INVALID BIT(8)
|
2025-07-10 15:13:52 -04:00
|
|
|
#define STATUS_DOORBELL_SUCCESS BIT(9)
|
|
|
|
#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
|
|
|
|
#define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
|
|
|
|
#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
|
|
|
|
#define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2020-03-16 16:54:20 +05:30
|
|
|
#define FLAG_USE_DMA BIT(0)
|
|
|
|
|
2017-03-27 15:15:03 +05:30
|
|
|
#define TIMER_RESOLUTION 1
|
|
|
|
|
2024-12-03 07:38:53 +01:00
|
|
|
#define CAP_UNALIGNED_ACCESS BIT(0)
|
2025-03-10 12:10:23 +01:00
|
|
|
#define CAP_MSI BIT(1)
|
|
|
|
#define CAP_MSIX BIT(2)
|
|
|
|
#define CAP_INTX BIT(3)
|
2024-12-03 07:38:53 +01:00
|
|
|
|
2017-03-27 15:15:03 +05:30
|
|
|
static struct workqueue_struct *kpcitest_workqueue;
|
|
|
|
|
|
|
|
struct pci_epf_test {
|
2019-09-28 02:43:08 +03:00
|
|
|
void *reg[PCI_STD_NUM_BARS];
|
2017-03-27 15:15:03 +05:30
|
|
|
struct pci_epf *epf;
|
2017-08-18 20:27:59 +05:30
|
|
|
enum pci_barno test_reg_bar;
|
2020-02-25 13:47:01 +05:30
|
|
|
size_t msix_table_offset;
|
2017-03-27 15:15:03 +05:30
|
|
|
struct delayed_work cmd_handler;
|
2022-05-24 10:21:59 -05:00
|
|
|
struct dma_chan *dma_chan_tx;
|
|
|
|
struct dma_chan *dma_chan_rx;
|
2023-04-15 11:35:29 +09:00
|
|
|
struct dma_chan *transfer_chan;
|
|
|
|
dma_cookie_t transfer_cookie;
|
|
|
|
enum dma_status transfer_status;
|
2020-03-16 16:54:20 +05:30
|
|
|
struct completion transfer_complete;
|
|
|
|
bool dma_supported;
|
2022-05-24 10:21:59 -05:00
|
|
|
bool dma_private;
|
2019-01-14 16:45:09 +05:30
|
|
|
const struct pci_epc_features *epc_features;
|
2025-07-10 15:13:52 -04:00
|
|
|
struct pci_epf_bar db_bar;
|
2017-03-27 15:15:03 +05:30
|
|
|
};
|
|
|
|
|
|
|
|
struct pci_epf_test_reg {
|
2025-01-27 17:12:42 +01:00
|
|
|
__le32 magic;
|
|
|
|
__le32 command;
|
|
|
|
__le32 status;
|
|
|
|
__le64 src_addr;
|
|
|
|
__le64 dst_addr;
|
|
|
|
__le32 size;
|
|
|
|
__le32 checksum;
|
|
|
|
__le32 irq_type;
|
|
|
|
__le32 irq_number;
|
|
|
|
__le32 flags;
|
|
|
|
__le32 caps;
|
2025-07-10 15:13:52 -04:00
|
|
|
__le32 doorbell_bar;
|
|
|
|
__le32 doorbell_offset;
|
|
|
|
__le32 doorbell_data;
|
2017-03-27 15:15:03 +05:30
|
|
|
} __packed;
|
|
|
|
|
|
|
|
static struct pci_epf_header test_header = {
|
|
|
|
.vendorid = PCI_ANY_ID,
|
|
|
|
.deviceid = PCI_ANY_ID,
|
|
|
|
.baseclass_code = PCI_CLASS_OTHERS,
|
|
|
|
.interrupt_pin = PCI_INTERRUPT_INTA,
|
|
|
|
};
|
|
|
|
|
2018-03-28 13:50:06 +02:00
|
|
|
static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2020-03-16 16:54:20 +05:30
|
|
|
static void pci_epf_test_dma_callback(void *param)
|
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = param;
|
2023-04-15 11:35:29 +09:00
|
|
|
struct dma_tx_state state;
|
|
|
|
|
|
|
|
epf_test->transfer_status =
|
|
|
|
dmaengine_tx_status(epf_test->transfer_chan,
|
|
|
|
epf_test->transfer_cookie, &state);
|
|
|
|
if (epf_test->transfer_status == DMA_COMPLETE ||
|
|
|
|
epf_test->transfer_status == DMA_ERROR)
|
|
|
|
complete(&epf_test->transfer_complete);
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
|
|
|
|
* data between PCIe EP and remote PCIe RC
|
|
|
|
* @epf_test: the EPF test device that performs the data transfer operation
|
|
|
|
* @dma_dst: The destination address of the data transfer. It can be a physical
|
|
|
|
* address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
|
|
|
|
* @dma_src: The source address of the data transfer. It can be a physical
|
|
|
|
* address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
|
|
|
|
* @len: The size of the data transfer
|
2022-05-24 10:21:59 -05:00
|
|
|
* @dma_remote: remote RC physical address
|
|
|
|
* @dir: DMA transfer direction
|
2020-03-16 16:54:20 +05:30
|
|
|
*
|
|
|
|
* Function that uses dmaengine API to transfer data between PCIe EP and remote
|
|
|
|
* PCIe RC. The source and destination address can be a physical address given
|
|
|
|
* by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
|
|
|
|
*
|
|
|
|
* The function returns '0' on success and negative value on failure.
|
|
|
|
*/
|
|
|
|
static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
|
|
|
|
dma_addr_t dma_dst, dma_addr_t dma_src,
|
2022-05-24 10:21:59 -05:00
|
|
|
size_t len, dma_addr_t dma_remote,
|
|
|
|
enum dma_transfer_direction dir)
|
2020-03-16 16:54:20 +05:30
|
|
|
{
|
2023-04-12 15:34:47 +09:00
|
|
|
struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
|
2022-05-24 10:21:59 -05:00
|
|
|
epf_test->dma_chan_tx : epf_test->dma_chan_rx;
|
|
|
|
dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
|
2020-03-16 16:54:20 +05:30
|
|
|
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
|
|
|
|
struct pci_epf *epf = epf_test->epf;
|
|
|
|
struct dma_async_tx_descriptor *tx;
|
2022-05-24 10:21:59 -05:00
|
|
|
struct dma_slave_config sconf = {};
|
2020-03-16 16:54:20 +05:30
|
|
|
struct device *dev = &epf->dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(chan)) {
|
|
|
|
dev_err(dev, "Invalid DMA memcpy channel\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-05-24 10:21:59 -05:00
|
|
|
if (epf_test->dma_private) {
|
|
|
|
sconf.direction = dir;
|
|
|
|
if (dir == DMA_MEM_TO_DEV)
|
|
|
|
sconf.dst_addr = dma_remote;
|
|
|
|
else
|
|
|
|
sconf.src_addr = dma_remote;
|
|
|
|
|
|
|
|
if (dmaengine_slave_config(chan, &sconf)) {
|
|
|
|
dev_err(dev, "DMA slave config fail\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
|
|
|
|
flags);
|
|
|
|
} else {
|
|
|
|
tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
|
|
|
|
flags);
|
|
|
|
}
|
|
|
|
|
2020-03-16 16:54:20 +05:30
|
|
|
if (!tx) {
|
|
|
|
dev_err(dev, "Failed to prepare DMA memcpy\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2023-04-15 11:35:28 +09:00
|
|
|
reinit_completion(&epf_test->transfer_complete);
|
2023-04-15 11:35:29 +09:00
|
|
|
epf_test->transfer_chan = chan;
|
2020-03-16 16:54:20 +05:30
|
|
|
tx->callback = pci_epf_test_dma_callback;
|
|
|
|
tx->callback_param = epf_test;
|
2023-04-15 11:35:30 +09:00
|
|
|
epf_test->transfer_cookie = dmaengine_submit(tx);
|
2020-03-16 16:54:20 +05:30
|
|
|
|
2023-04-15 11:35:29 +09:00
|
|
|
ret = dma_submit_error(epf_test->transfer_cookie);
|
2020-03-16 16:54:20 +05:30
|
|
|
if (ret) {
|
2023-04-15 11:35:29 +09:00
|
|
|
dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
|
|
|
|
goto terminate;
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
dma_async_issue_pending(chan);
|
|
|
|
ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
|
|
|
|
if (ret < 0) {
|
2023-04-15 11:35:29 +09:00
|
|
|
dev_err(dev, "DMA wait_for_completion interrupted\n");
|
|
|
|
goto terminate;
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
|
|
|
|
2023-04-15 11:35:29 +09:00
|
|
|
if (epf_test->transfer_status == DMA_ERROR) {
|
|
|
|
dev_err(dev, "DMA transfer failed\n");
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
terminate:
|
|
|
|
dmaengine_terminate_sync(chan);
|
|
|
|
|
|
|
|
return ret;
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
|
|
|
|
2022-05-24 10:21:59 -05:00
|
|
|
struct epf_dma_filter {
|
|
|
|
struct device *dev;
|
|
|
|
u32 dma_mask;
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
|
|
|
|
{
|
|
|
|
struct epf_dma_filter *filter = node;
|
|
|
|
struct dma_slave_caps caps;
|
|
|
|
|
|
|
|
memset(&caps, 0, sizeof(caps));
|
|
|
|
dma_get_slave_caps(chan, &caps);
|
|
|
|
|
|
|
|
return chan->device->dev == filter->dev
|
|
|
|
&& (filter->dma_mask & caps.directions);
|
|
|
|
}
|
|
|
|
|
2020-03-16 16:54:20 +05:30
|
|
|
/**
|
|
|
|
* pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
|
|
|
|
* @epf_test: the EPF test device that performs data transfer operation
|
|
|
|
*
|
|
|
|
* Function to initialize EPF test DMA channel.
|
|
|
|
*/
|
|
|
|
static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
|
|
|
|
{
|
|
|
|
struct pci_epf *epf = epf_test->epf;
|
|
|
|
struct device *dev = &epf->dev;
|
2022-05-24 10:21:59 -05:00
|
|
|
struct epf_dma_filter filter;
|
2020-03-16 16:54:20 +05:30
|
|
|
struct dma_chan *dma_chan;
|
|
|
|
dma_cap_mask_t mask;
|
|
|
|
int ret;
|
|
|
|
|
2022-05-24 10:21:59 -05:00
|
|
|
filter.dev = epf->epc->dev.parent;
|
|
|
|
filter.dma_mask = BIT(DMA_DEV_TO_MEM);
|
|
|
|
|
|
|
|
dma_cap_zero(mask);
|
|
|
|
dma_cap_set(DMA_SLAVE, mask);
|
|
|
|
dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
|
|
|
|
if (!dma_chan) {
|
|
|
|
dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
|
|
|
|
goto fail_back_tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
epf_test->dma_chan_rx = dma_chan;
|
|
|
|
|
|
|
|
filter.dma_mask = BIT(DMA_MEM_TO_DEV);
|
|
|
|
dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
|
|
|
|
|
|
|
|
if (!dma_chan) {
|
|
|
|
dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
|
|
|
|
goto fail_back_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
epf_test->dma_chan_tx = dma_chan;
|
|
|
|
epf_test->dma_private = true;
|
|
|
|
|
|
|
|
init_completion(&epf_test->transfer_complete);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_back_rx:
|
|
|
|
dma_release_channel(epf_test->dma_chan_rx);
|
2024-12-27 08:08:41 -08:00
|
|
|
epf_test->dma_chan_rx = NULL;
|
2022-05-24 10:21:59 -05:00
|
|
|
|
|
|
|
fail_back_tx:
|
2020-03-16 16:54:20 +05:30
|
|
|
dma_cap_zero(mask);
|
|
|
|
dma_cap_set(DMA_MEMCPY, mask);
|
|
|
|
|
|
|
|
dma_chan = dma_request_chan_by_mask(&mask);
|
|
|
|
if (IS_ERR(dma_chan)) {
|
|
|
|
ret = PTR_ERR(dma_chan);
|
|
|
|
if (ret != -EPROBE_DEFER)
|
|
|
|
dev_err(dev, "Failed to get DMA channel\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
init_completion(&epf_test->transfer_complete);
|
|
|
|
|
2022-05-24 10:21:59 -05:00
|
|
|
epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
|
2020-03-16 16:54:20 +05:30
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
|
2020-07-29 22:12:19 +02:00
|
|
|
* @epf_test: the EPF test device that performs data transfer operation
|
2020-03-16 16:54:20 +05:30
|
|
|
*
|
|
|
|
* Helper to cleanup EPF test DMA channel.
|
|
|
|
*/
|
|
|
|
static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
|
|
|
|
{
|
2020-04-22 16:24:47 +09:00
|
|
|
if (!epf_test->dma_supported)
|
|
|
|
return;
|
|
|
|
|
2022-05-24 10:21:59 -05:00
|
|
|
dma_release_channel(epf_test->dma_chan_tx);
|
|
|
|
if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
|
|
|
|
epf_test->dma_chan_tx = NULL;
|
|
|
|
epf_test->dma_chan_rx = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_release_channel(epf_test->dma_chan_rx);
|
|
|
|
epf_test->dma_chan_rx = NULL;
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
|
|
|
|
2023-04-15 11:35:38 +09:00
|
|
|
static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
|
|
|
|
const char *op, u64 size,
|
2020-03-16 16:54:21 +05:30
|
|
|
struct timespec64 *start,
|
|
|
|
struct timespec64 *end, bool dma)
|
|
|
|
{
|
2023-04-15 11:35:38 +09:00
|
|
|
struct timespec64 ts = timespec64_sub(*end, *start);
|
|
|
|
u64 rate = 0, ns;
|
2020-03-16 16:54:21 +05:30
|
|
|
|
|
|
|
/* calculate the rate */
|
2023-04-15 11:35:38 +09:00
|
|
|
ns = timespec64_to_ns(&ts);
|
|
|
|
if (ns)
|
|
|
|
rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
|
2020-03-16 16:54:21 +05:30
|
|
|
|
2023-04-15 11:35:38 +09:00
|
|
|
dev_info(&epf_test->epf->dev,
|
|
|
|
"%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
|
|
|
|
op, size, dma ? "YES" : "NO",
|
|
|
|
(u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
|
2020-03-16 16:54:21 +05:30
|
|
|
}
|
|
|
|
|
2023-04-15 11:35:36 +09:00
|
|
|
static void pci_epf_test_copy(struct pci_epf_test *epf_test,
|
|
|
|
struct pci_epf_test_reg *reg)
|
2017-03-27 15:15:03 +05:30
|
|
|
{
|
2024-10-12 20:32:45 +09:00
|
|
|
int ret = 0;
|
2020-03-16 16:54:21 +05:30
|
|
|
struct timespec64 start, end;
|
2017-03-27 15:15:03 +05:30
|
|
|
struct pci_epf *epf = epf_test->epf;
|
|
|
|
struct pci_epc *epc = epf->epc;
|
2024-10-12 20:32:45 +09:00
|
|
|
struct device *dev = &epf->dev;
|
|
|
|
struct pci_epc_map src_map, dst_map;
|
2025-01-27 17:12:42 +01:00
|
|
|
u64 src_addr = le64_to_cpu(reg->src_addr);
|
|
|
|
u64 dst_addr = le64_to_cpu(reg->dst_addr);
|
|
|
|
size_t orig_size, copy_size;
|
2024-10-12 20:32:45 +09:00
|
|
|
ssize_t map_size = 0;
|
2025-01-27 17:12:42 +01:00
|
|
|
u32 flags = le32_to_cpu(reg->flags);
|
|
|
|
u32 status = 0;
|
2024-10-12 20:32:45 +09:00
|
|
|
void *copy_buf = NULL, *buf;
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2025-01-27 17:12:42 +01:00
|
|
|
orig_size = copy_size = le32_to_cpu(reg->size);
|
|
|
|
|
|
|
|
if (flags & FLAG_USE_DMA) {
|
2025-01-16 22:46:47 +05:30
|
|
|
if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
|
|
|
|
dev_err(dev, "DMA controller doesn't support MEMCPY\n");
|
2022-05-24 10:21:59 -05:00
|
|
|
ret = -EINVAL;
|
2024-10-12 20:32:45 +09:00
|
|
|
goto set_status;
|
2022-05-24 10:21:59 -05:00
|
|
|
}
|
2020-03-16 16:54:20 +05:30
|
|
|
} else {
|
2024-10-12 20:32:45 +09:00
|
|
|
copy_buf = kzalloc(copy_size, GFP_KERNEL);
|
|
|
|
if (!copy_buf) {
|
PCI: endpoint: Fix alignment fault error in copy tests
The copy test uses the memcpy() to copy data between IO memory spaces.
This can trigger an alignment fault error (pasted the error logs below)
because memcpy() may use unaligned accesses on a mapped memory that is
just IO, which does not support unaligned memory accesses.
Fix it by using the correct memcpy API to copy from/to IO memory.
Alignment fault error logs:
Unable to handle kernel paging request at virtual address ffff8000101cd3c1
Mem abort info:
ESR = 0x96000021
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
FSC = 0x21: alignment fault
Data abort info:
ISV = 0, ISS = 0x00000021
CM = 0, WnR = 0
swapper pgtable: 4k pages, 48-bit VAs, pgdp=0000000081773000
[ffff8000101cd3c1] pgd=1000000082410003, p4d=1000000082410003, pud=1000000082411003, pmd=1000000082412003, pte=0068004000001f13
Internal error: Oops: 96000021 [#1] PREEMPT SMP
Modules linked in:
CPU: 0 PID: 6 Comm: kworker/0:0H Not tainted 5.15.0-rc1-next-20210914-dirty #2
Hardware name: LS1012A RDB Board (DT)
Workqueue: kpcitest pci_epf_test_cmd_handler
pstate: 80000005 (Nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : __memcpy+0x168/0x230
lr : pci_epf_test_cmd_handler+0x6f0/0xa68
sp : ffff80001003bce0
x29: ffff80001003bce0 x28: ffff800010135000 x27: ffff8000101e5000
x26: ffff8000101cd000 x25: ffff6cda941cf6c8 x24: 0000000000000000
x23: ffff6cda863f2000 x22: ffff6cda9096c800 x21: ffff800010135000
x20: ffff6cda941cf680 x19: ffffaf39fd999000 x18: 0000000000000000
x17: 0000000000000000 x16: 0000000000000000 x15: ffffaf39fd2b6000
x14: 0000000000000000 x13: 15f5c8fa2f984d57 x12: 604d132b60275454
x11: 065cee5e5fb428b6 x10: aae662eb17d0cf3e x9 : 1d97c9a1b4ddef37
x8 : 7541b65edebf928c x7 : e71937c4fc595de0 x6 : b8a0e09562430d1c
x5 : ffff8000101e5401 x4 : ffff8000101cd401 x3 : ffff8000101e5380
x2 : fffffffffffffff1 x1 : ffff8000101cd3c0 x0 : ffff8000101e5000
Call trace:
__memcpy+0x168/0x230
process_one_work+0x1ec/0x370
worker_thread+0x44/0x478
kthread+0x154/0x160
ret_from_fork+0x10/0x20
Code: a984346c a9c4342c f1010042 54fffee8 (a97c3c8e)
---[ end trace 568c28c7b6336335 ]---
Link: https://lore.kernel.org/r/20211217094708.28678-1-Zhiqiang.Hou@nxp.com
Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Reviewed-by: Kishon Vijay Abraham I <kishon@ti.com>
2021-12-17 17:47:08 +08:00
|
|
|
ret = -ENOMEM;
|
2024-10-12 20:32:45 +09:00
|
|
|
goto set_status;
|
PCI: endpoint: Fix alignment fault error in copy tests
The copy test uses the memcpy() to copy data between IO memory spaces.
This can trigger an alignment fault error (pasted the error logs below)
because memcpy() may use unaligned accesses on a mapped memory that is
just IO, which does not support unaligned memory accesses.
Fix it by using the correct memcpy API to copy from/to IO memory.
Alignment fault error logs:
Unable to handle kernel paging request at virtual address ffff8000101cd3c1
Mem abort info:
ESR = 0x96000021
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
FSC = 0x21: alignment fault
Data abort info:
ISV = 0, ISS = 0x00000021
CM = 0, WnR = 0
swapper pgtable: 4k pages, 48-bit VAs, pgdp=0000000081773000
[ffff8000101cd3c1] pgd=1000000082410003, p4d=1000000082410003, pud=1000000082411003, pmd=1000000082412003, pte=0068004000001f13
Internal error: Oops: 96000021 [#1] PREEMPT SMP
Modules linked in:
CPU: 0 PID: 6 Comm: kworker/0:0H Not tainted 5.15.0-rc1-next-20210914-dirty #2
Hardware name: LS1012A RDB Board (DT)
Workqueue: kpcitest pci_epf_test_cmd_handler
pstate: 80000005 (Nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : __memcpy+0x168/0x230
lr : pci_epf_test_cmd_handler+0x6f0/0xa68
sp : ffff80001003bce0
x29: ffff80001003bce0 x28: ffff800010135000 x27: ffff8000101e5000
x26: ffff8000101cd000 x25: ffff6cda941cf6c8 x24: 0000000000000000
x23: ffff6cda863f2000 x22: ffff6cda9096c800 x21: ffff800010135000
x20: ffff6cda941cf680 x19: ffffaf39fd999000 x18: 0000000000000000
x17: 0000000000000000 x16: 0000000000000000 x15: ffffaf39fd2b6000
x14: 0000000000000000 x13: 15f5c8fa2f984d57 x12: 604d132b60275454
x11: 065cee5e5fb428b6 x10: aae662eb17d0cf3e x9 : 1d97c9a1b4ddef37
x8 : 7541b65edebf928c x7 : e71937c4fc595de0 x6 : b8a0e09562430d1c
x5 : ffff8000101e5401 x4 : ffff8000101cd401 x3 : ffff8000101e5380
x2 : fffffffffffffff1 x1 : ffff8000101cd3c0 x0 : ffff8000101e5000
Call trace:
__memcpy+0x168/0x230
process_one_work+0x1ec/0x370
worker_thread+0x44/0x478
kthread+0x154/0x160
ret_from_fork+0x10/0x20
Code: a984346c a9c4342c f1010042 54fffee8 (a97c3c8e)
---[ end trace 568c28c7b6336335 ]---
Link: https://lore.kernel.org/r/20211217094708.28678-1-Zhiqiang.Hou@nxp.com
Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Reviewed-by: Kishon Vijay Abraham I <kishon@ti.com>
2021-12-17 17:47:08 +08:00
|
|
|
}
|
2024-10-12 20:32:45 +09:00
|
|
|
buf = copy_buf;
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
while (copy_size) {
|
|
|
|
ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
|
|
|
|
src_addr, copy_size, &src_map);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "Failed to map source address\n");
|
2025-01-27 17:12:42 +01:00
|
|
|
status = STATUS_SRC_ADDR_INVALID;
|
2024-10-12 20:32:45 +09:00
|
|
|
goto free_buf;
|
|
|
|
}
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
|
|
|
|
dst_addr, copy_size, &dst_map);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "Failed to map destination address\n");
|
2025-01-27 17:12:42 +01:00
|
|
|
status = STATUS_DST_ADDR_INVALID;
|
2024-10-12 20:32:45 +09:00
|
|
|
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
|
|
|
|
&src_map);
|
|
|
|
goto free_buf;
|
|
|
|
}
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
ktime_get_ts64(&start);
|
2025-01-27 17:12:42 +01:00
|
|
|
if (flags & FLAG_USE_DMA) {
|
2024-10-12 20:32:45 +09:00
|
|
|
ret = pci_epf_test_data_transfer(epf_test,
|
|
|
|
dst_map.phys_addr, src_map.phys_addr,
|
|
|
|
map_size, 0, DMA_MEM_TO_MEM);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "Data transfer failed\n");
|
|
|
|
goto unmap;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
memcpy_fromio(buf, src_map.virt_addr, map_size);
|
|
|
|
memcpy_toio(dst_map.virt_addr, buf, map_size);
|
|
|
|
buf += map_size;
|
|
|
|
}
|
|
|
|
ktime_get_ts64(&end);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
copy_size -= map_size;
|
|
|
|
src_addr += map_size;
|
|
|
|
dst_addr += map_size;
|
|
|
|
|
|
|
|
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
|
|
|
|
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
|
|
|
|
map_size = 0;
|
|
|
|
}
|
|
|
|
|
2025-01-27 17:12:42 +01:00
|
|
|
pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
|
|
|
|
flags & FLAG_USE_DMA);
|
2024-10-12 20:32:45 +09:00
|
|
|
|
|
|
|
unmap:
|
|
|
|
if (map_size) {
|
|
|
|
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
|
|
|
|
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
free_buf:
|
|
|
|
kfree(copy_buf);
|
|
|
|
|
|
|
|
set_status:
|
2023-04-15 11:35:36 +09:00
|
|
|
if (!ret)
|
2025-01-27 17:12:42 +01:00
|
|
|
status |= STATUS_COPY_SUCCESS;
|
2023-04-15 11:35:36 +09:00
|
|
|
else
|
2025-01-27 17:12:42 +01:00
|
|
|
status |= STATUS_COPY_FAIL;
|
|
|
|
reg->status = cpu_to_le32(status);
|
2017-03-27 15:15:03 +05:30
|
|
|
}
|
|
|
|
|
2023-04-15 11:35:36 +09:00
|
|
|
static void pci_epf_test_read(struct pci_epf_test *epf_test,
|
|
|
|
struct pci_epf_test_reg *reg)
|
2017-03-27 15:15:03 +05:30
|
|
|
{
|
2024-10-12 20:32:45 +09:00
|
|
|
int ret = 0;
|
|
|
|
void *src_buf, *buf;
|
2017-03-27 15:15:03 +05:30
|
|
|
u32 crc32;
|
2024-10-12 20:32:45 +09:00
|
|
|
struct pci_epc_map map;
|
2020-03-16 16:54:20 +05:30
|
|
|
phys_addr_t dst_phys_addr;
|
2020-03-16 16:54:21 +05:30
|
|
|
struct timespec64 start, end;
|
2017-03-27 15:15:03 +05:30
|
|
|
struct pci_epf *epf = epf_test->epf;
|
|
|
|
struct pci_epc *epc = epf->epc;
|
2024-10-12 20:32:45 +09:00
|
|
|
struct device *dev = &epf->dev;
|
2020-03-16 16:54:20 +05:30
|
|
|
struct device *dma_dev = epf->epc->dev.parent;
|
2025-01-27 17:12:42 +01:00
|
|
|
u64 src_addr = le64_to_cpu(reg->src_addr);
|
|
|
|
size_t orig_size, src_size;
|
2024-10-12 20:32:45 +09:00
|
|
|
ssize_t map_size = 0;
|
2025-01-27 17:12:42 +01:00
|
|
|
u32 flags = le32_to_cpu(reg->flags);
|
|
|
|
u32 checksum = le32_to_cpu(reg->checksum);
|
|
|
|
u32 status = 0;
|
|
|
|
|
|
|
|
orig_size = src_size = le32_to_cpu(reg->size);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
src_buf = kzalloc(src_size, GFP_KERNEL);
|
|
|
|
if (!src_buf) {
|
2017-03-27 15:15:03 +05:30
|
|
|
ret = -ENOMEM;
|
2024-10-12 20:32:45 +09:00
|
|
|
goto set_status;
|
2017-03-27 15:15:03 +05:30
|
|
|
}
|
2024-10-12 20:32:45 +09:00
|
|
|
buf = src_buf;
|
|
|
|
|
|
|
|
while (src_size) {
|
|
|
|
ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
|
|
|
|
src_addr, src_size, &map);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "Failed to map address\n");
|
2025-01-27 17:12:42 +01:00
|
|
|
status = STATUS_SRC_ADDR_INVALID;
|
2024-10-12 20:32:45 +09:00
|
|
|
goto free_buf;
|
|
|
|
}
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
map_size = map.pci_size;
|
2025-01-27 17:12:42 +01:00
|
|
|
if (flags & FLAG_USE_DMA) {
|
2024-10-12 20:32:45 +09:00
|
|
|
dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (dma_mapping_error(dma_dev, dst_phys_addr)) {
|
|
|
|
dev_err(dev,
|
|
|
|
"Failed to map destination buffer addr\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto unmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
ktime_get_ts64(&start);
|
|
|
|
ret = pci_epf_test_data_transfer(epf_test,
|
|
|
|
dst_phys_addr, map.phys_addr,
|
|
|
|
map_size, src_addr, DMA_DEV_TO_MEM);
|
|
|
|
if (ret)
|
|
|
|
dev_err(dev, "Data transfer failed\n");
|
|
|
|
ktime_get_ts64(&end);
|
|
|
|
|
|
|
|
dma_unmap_single(dma_dev, dst_phys_addr, map_size,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto unmap;
|
|
|
|
} else {
|
|
|
|
ktime_get_ts64(&start);
|
|
|
|
memcpy_fromio(buf, map.virt_addr, map_size);
|
|
|
|
ktime_get_ts64(&end);
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
src_size -= map_size;
|
|
|
|
src_addr += map_size;
|
|
|
|
buf += map_size;
|
2020-03-16 16:54:20 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
|
|
|
|
map_size = 0;
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2025-01-27 17:12:42 +01:00
|
|
|
pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
|
|
|
|
flags & FLAG_USE_DMA);
|
2020-03-16 16:54:21 +05:30
|
|
|
|
2025-01-27 17:12:42 +01:00
|
|
|
crc32 = crc32_le(~0, src_buf, orig_size);
|
|
|
|
if (crc32 != checksum)
|
2017-03-27 15:15:03 +05:30
|
|
|
ret = -EIO;
|
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
unmap:
|
|
|
|
if (map_size)
|
|
|
|
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
free_buf:
|
|
|
|
kfree(src_buf);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
set_status:
|
2023-04-15 11:35:36 +09:00
|
|
|
if (!ret)
|
2025-01-27 17:12:42 +01:00
|
|
|
status |= STATUS_READ_SUCCESS;
|
2023-04-15 11:35:36 +09:00
|
|
|
else
|
2025-01-27 17:12:42 +01:00
|
|
|
status |= STATUS_READ_FAIL;
|
|
|
|
reg->status = cpu_to_le32(status);
|
2017-03-27 15:15:03 +05:30
|
|
|
}
|
|
|
|
|
2023-04-15 11:35:36 +09:00
|
|
|
static void pci_epf_test_write(struct pci_epf_test *epf_test,
|
|
|
|
struct pci_epf_test_reg *reg)
|
2017-03-27 15:15:03 +05:30
|
|
|
{
|
2024-10-12 20:32:45 +09:00
|
|
|
int ret = 0;
|
|
|
|
void *dst_buf, *buf;
|
|
|
|
struct pci_epc_map map;
|
2020-03-16 16:54:20 +05:30
|
|
|
phys_addr_t src_phys_addr;
|
2020-03-16 16:54:21 +05:30
|
|
|
struct timespec64 start, end;
|
2017-03-27 15:15:03 +05:30
|
|
|
struct pci_epf *epf = epf_test->epf;
|
|
|
|
struct pci_epc *epc = epf->epc;
|
2024-10-12 20:32:45 +09:00
|
|
|
struct device *dev = &epf->dev;
|
2020-03-16 16:54:20 +05:30
|
|
|
struct device *dma_dev = epf->epc->dev.parent;
|
2025-01-27 17:12:42 +01:00
|
|
|
u64 dst_addr = le64_to_cpu(reg->dst_addr);
|
|
|
|
size_t orig_size, dst_size;
|
2024-10-12 20:32:45 +09:00
|
|
|
ssize_t map_size = 0;
|
2025-01-27 17:12:42 +01:00
|
|
|
u32 flags = le32_to_cpu(reg->flags);
|
|
|
|
u32 status = 0;
|
|
|
|
|
|
|
|
orig_size = dst_size = le32_to_cpu(reg->size);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
dst_buf = kzalloc(dst_size, GFP_KERNEL);
|
|
|
|
if (!dst_buf) {
|
2017-03-27 15:15:03 +05:30
|
|
|
ret = -ENOMEM;
|
2024-10-12 20:32:45 +09:00
|
|
|
goto set_status;
|
2017-03-27 15:15:03 +05:30
|
|
|
}
|
2024-10-12 20:32:45 +09:00
|
|
|
get_random_bytes(dst_buf, dst_size);
|
2025-01-27 17:12:42 +01:00
|
|
|
reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
|
2024-10-12 20:32:45 +09:00
|
|
|
buf = dst_buf;
|
|
|
|
|
|
|
|
while (dst_size) {
|
|
|
|
ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
|
|
|
|
dst_addr, dst_size, &map);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "Failed to map address\n");
|
2025-01-27 17:12:42 +01:00
|
|
|
status = STATUS_DST_ADDR_INVALID;
|
2024-10-12 20:32:45 +09:00
|
|
|
goto free_buf;
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
map_size = map.pci_size;
|
2025-01-27 17:12:42 +01:00
|
|
|
if (flags & FLAG_USE_DMA) {
|
2024-10-12 20:32:45 +09:00
|
|
|
src_phys_addr = dma_map_single(dma_dev, buf, map_size,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(dma_dev, src_phys_addr)) {
|
|
|
|
dev_err(dev,
|
|
|
|
"Failed to map source buffer addr\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto unmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
ktime_get_ts64(&start);
|
|
|
|
|
|
|
|
ret = pci_epf_test_data_transfer(epf_test,
|
|
|
|
map.phys_addr, src_phys_addr,
|
|
|
|
map_size, dst_addr,
|
|
|
|
DMA_MEM_TO_DEV);
|
|
|
|
if (ret)
|
|
|
|
dev_err(dev, "Data transfer failed\n");
|
|
|
|
ktime_get_ts64(&end);
|
|
|
|
|
|
|
|
dma_unmap_single(dma_dev, src_phys_addr, map_size,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto unmap;
|
|
|
|
} else {
|
|
|
|
ktime_get_ts64(&start);
|
|
|
|
memcpy_toio(map.virt_addr, buf, map_size);
|
|
|
|
ktime_get_ts64(&end);
|
|
|
|
}
|
2022-05-24 10:21:59 -05:00
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
dst_size -= map_size;
|
|
|
|
dst_addr += map_size;
|
|
|
|
buf += map_size;
|
2020-03-16 16:54:20 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
|
|
|
|
map_size = 0;
|
2020-03-16 16:54:20 +05:30
|
|
|
}
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2025-01-27 17:12:42 +01:00
|
|
|
pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
|
|
|
|
flags & FLAG_USE_DMA);
|
2020-03-16 16:54:21 +05:30
|
|
|
|
2017-03-27 15:15:03 +05:30
|
|
|
/*
|
|
|
|
* wait 1ms inorder for the write to complete. Without this delay L3
|
|
|
|
* error in observed in the host system.
|
|
|
|
*/
|
2018-04-10 21:04:06 +08:00
|
|
|
usleep_range(1000, 2000);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
unmap:
|
|
|
|
if (map_size)
|
|
|
|
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
free_buf:
|
|
|
|
kfree(dst_buf);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-10-12 20:32:45 +09:00
|
|
|
set_status:
|
2023-04-15 11:35:36 +09:00
|
|
|
if (!ret)
|
2025-01-27 17:12:42 +01:00
|
|
|
status |= STATUS_WRITE_SUCCESS;
|
2023-04-15 11:35:36 +09:00
|
|
|
else
|
2025-01-27 17:12:42 +01:00
|
|
|
status |= STATUS_WRITE_FAIL;
|
|
|
|
reg->status = cpu_to_le32(status);
|
2017-03-27 15:15:03 +05:30
|
|
|
}
|
|
|
|
|
2023-04-15 11:35:32 +09:00
|
|
|
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
|
|
|
|
struct pci_epf_test_reg *reg)
|
2017-03-27 15:15:03 +05:30
|
|
|
{
|
|
|
|
struct pci_epf *epf = epf_test->epf;
|
2018-07-19 10:32:17 +02:00
|
|
|
struct device *dev = &epf->dev;
|
2017-03-27 15:15:03 +05:30
|
|
|
struct pci_epc *epc = epf->epc;
|
2025-01-27 17:12:42 +01:00
|
|
|
u32 status = le32_to_cpu(reg->status);
|
|
|
|
u32 irq_number = le32_to_cpu(reg->irq_number);
|
|
|
|
u32 irq_type = le32_to_cpu(reg->irq_type);
|
2023-04-15 11:35:33 +09:00
|
|
|
int count;
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2023-04-15 11:35:34 +09:00
|
|
|
/*
|
|
|
|
* Set the status before raising the IRQ to ensure that the host sees
|
|
|
|
* the updated value when it gets the IRQ.
|
|
|
|
*/
|
2025-01-27 17:12:42 +01:00
|
|
|
status |= STATUS_IRQ_RAISED;
|
|
|
|
WRITE_ONCE(reg->status, cpu_to_le32(status));
|
2018-07-19 10:32:17 +02:00
|
|
|
|
2025-01-27 17:12:42 +01:00
|
|
|
switch (irq_type) {
|
2023-11-22 15:03:54 +09:00
|
|
|
case IRQ_TYPE_INTX:
|
2021-08-19 18:03:39 +05:30
|
|
|
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
|
2023-11-22 15:03:52 +09:00
|
|
|
PCI_IRQ_INTX, 0);
|
2018-07-19 10:32:17 +02:00
|
|
|
break;
|
|
|
|
case IRQ_TYPE_MSI:
|
2023-04-15 11:35:33 +09:00
|
|
|
count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
|
2025-01-27 17:12:42 +01:00
|
|
|
if (irq_number > count || count <= 0) {
|
2023-04-15 11:35:33 +09:00
|
|
|
dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
|
2025-01-27 17:12:42 +01:00
|
|
|
irq_number, count);
|
2023-04-15 11:35:33 +09:00
|
|
|
return;
|
|
|
|
}
|
2021-08-19 18:03:39 +05:30
|
|
|
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
|
2025-01-27 17:12:42 +01:00
|
|
|
PCI_IRQ_MSI, irq_number);
|
2018-07-19 10:32:17 +02:00
|
|
|
break;
|
2018-07-19 10:32:19 +02:00
|
|
|
case IRQ_TYPE_MSIX:
|
2023-04-15 11:35:33 +09:00
|
|
|
count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
|
2025-01-27 17:12:42 +01:00
|
|
|
if (irq_number > count || count <= 0) {
|
2025-03-07 17:17:15 -06:00
|
|
|
dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
|
2025-01-27 17:12:42 +01:00
|
|
|
irq_number, count);
|
2023-04-15 11:35:33 +09:00
|
|
|
return;
|
|
|
|
}
|
2021-08-19 18:03:39 +05:30
|
|
|
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
|
2025-01-27 17:12:42 +01:00
|
|
|
PCI_IRQ_MSIX, irq_number);
|
2018-07-19 10:32:19 +02:00
|
|
|
break;
|
2018-07-19 10:32:17 +02:00
|
|
|
default:
|
|
|
|
dev_err(dev, "Failed to raise IRQ, unknown type\n");
|
|
|
|
break;
|
|
|
|
}
|
2017-03-27 15:15:03 +05:30
|
|
|
}
|
|
|
|
|
2025-07-10 15:13:52 -04:00
|
|
|
static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = data;
|
|
|
|
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
|
|
|
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
|
|
|
|
u32 status = le32_to_cpu(reg->status);
|
|
|
|
|
|
|
|
status |= STATUS_DOORBELL_SUCCESS;
|
|
|
|
reg->status = cpu_to_le32(status);
|
|
|
|
pci_epf_test_raise_irq(epf_test, reg);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
|
|
|
|
{
|
|
|
|
struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
|
|
|
|
struct pci_epf *epf = epf_test->epf;
|
|
|
|
|
|
|
|
free_irq(epf->db_msg[0].virq, epf_test);
|
|
|
|
reg->doorbell_bar = cpu_to_le32(NO_BAR);
|
|
|
|
|
|
|
|
pci_epf_free_doorbell(epf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
|
|
|
|
struct pci_epf_test_reg *reg)
|
|
|
|
{
|
|
|
|
u32 status = le32_to_cpu(reg->status);
|
|
|
|
struct pci_epf *epf = epf_test->epf;
|
|
|
|
struct pci_epc *epc = epf->epc;
|
|
|
|
struct msi_msg *msg;
|
|
|
|
enum pci_barno bar;
|
|
|
|
size_t offset;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = pci_epf_alloc_doorbell(epf, 1);
|
|
|
|
if (ret)
|
|
|
|
goto set_status_err;
|
|
|
|
|
|
|
|
msg = &epf->db_msg[0].msg;
|
|
|
|
bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
|
|
|
|
if (bar < BAR_0)
|
|
|
|
goto err_doorbell_cleanup;
|
|
|
|
|
|
|
|
ret = request_irq(epf->db_msg[0].virq, pci_epf_test_doorbell_handler, 0,
|
|
|
|
"pci-ep-test-doorbell", epf_test);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&epf->dev,
|
|
|
|
"Failed to request doorbell IRQ: %d\n",
|
|
|
|
epf->db_msg[0].virq);
|
|
|
|
goto err_doorbell_cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
reg->doorbell_data = cpu_to_le32(msg->data);
|
|
|
|
reg->doorbell_bar = cpu_to_le32(bar);
|
|
|
|
|
|
|
|
msg = &epf->db_msg[0].msg;
|
|
|
|
ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
|
|
|
|
&epf_test->db_bar.phys_addr, &offset);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto err_doorbell_cleanup;
|
|
|
|
|
|
|
|
reg->doorbell_offset = cpu_to_le32(offset);
|
|
|
|
|
|
|
|
epf_test->db_bar.barno = bar;
|
|
|
|
epf_test->db_bar.size = epf->bar[bar].size;
|
|
|
|
epf_test->db_bar.flags = epf->bar[bar].flags;
|
|
|
|
|
|
|
|
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
|
|
|
|
if (ret)
|
|
|
|
goto err_doorbell_cleanup;
|
|
|
|
|
|
|
|
status |= STATUS_DOORBELL_ENABLE_SUCCESS;
|
|
|
|
reg->status = cpu_to_le32(status);
|
|
|
|
return;
|
|
|
|
|
|
|
|
err_doorbell_cleanup:
|
|
|
|
pci_epf_test_doorbell_cleanup(epf_test);
|
|
|
|
set_status_err:
|
|
|
|
status |= STATUS_DOORBELL_ENABLE_FAIL;
|
|
|
|
reg->status = cpu_to_le32(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
|
|
|
|
struct pci_epf_test_reg *reg)
|
|
|
|
{
|
|
|
|
enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
|
|
|
|
u32 status = le32_to_cpu(reg->status);
|
|
|
|
struct pci_epf *epf = epf_test->epf;
|
|
|
|
struct pci_epc *epc = epf->epc;
|
|
|
|
|
|
|
|
if (bar < BAR_0)
|
|
|
|
goto set_status_err;
|
|
|
|
|
|
|
|
pci_epf_test_doorbell_cleanup(epf_test);
|
|
|
|
pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
|
|
|
|
|
|
|
|
status |= STATUS_DOORBELL_DISABLE_SUCCESS;
|
|
|
|
reg->status = cpu_to_le32(status);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
set_status_err:
|
|
|
|
status |= STATUS_DOORBELL_DISABLE_FAIL;
|
|
|
|
reg->status = cpu_to_le32(status);
|
|
|
|
}
|
|
|
|
|
2017-03-27 15:15:03 +05:30
|
|
|
static void pci_epf_test_cmd_handler(struct work_struct *work)
|
|
|
|
{
|
2017-08-18 20:27:58 +05:30
|
|
|
u32 command;
|
2017-03-27 15:15:03 +05:30
|
|
|
struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
|
|
|
|
cmd_handler.work);
|
|
|
|
struct pci_epf *epf = epf_test->epf;
|
2018-07-19 10:32:17 +02:00
|
|
|
struct device *dev = &epf->dev;
|
2017-08-18 20:27:59 +05:30
|
|
|
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
|
|
|
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
|
2025-01-27 17:12:42 +01:00
|
|
|
u32 irq_type = le32_to_cpu(reg->irq_type);
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2025-01-27 17:12:42 +01:00
|
|
|
command = le32_to_cpu(READ_ONCE(reg->command));
|
2017-08-18 20:27:58 +05:30
|
|
|
if (!command)
|
2017-03-27 15:15:03 +05:30
|
|
|
goto reset_handler;
|
|
|
|
|
2023-04-15 11:35:34 +09:00
|
|
|
WRITE_ONCE(reg->command, 0);
|
|
|
|
WRITE_ONCE(reg->status, 0);
|
2017-08-18 20:27:58 +05:30
|
|
|
|
2025-01-27 17:12:42 +01:00
|
|
|
if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
|
2023-04-15 11:35:37 +09:00
|
|
|
!epf_test->dma_supported) {
|
|
|
|
dev_err(dev, "Cannot transfer data using DMA\n");
|
|
|
|
goto reset_handler;
|
|
|
|
}
|
|
|
|
|
2025-01-27 17:12:42 +01:00
|
|
|
if (irq_type > IRQ_TYPE_MSIX) {
|
2018-07-19 10:32:17 +02:00
|
|
|
dev_err(dev, "Failed to detect IRQ type\n");
|
|
|
|
goto reset_handler;
|
|
|
|
}
|
2017-09-20 13:56:06 -05:00
|
|
|
|
2023-04-15 11:35:35 +09:00
|
|
|
switch (command) {
|
2023-11-22 15:03:54 +09:00
|
|
|
case COMMAND_RAISE_INTX_IRQ:
|
2023-04-15 11:35:35 +09:00
|
|
|
case COMMAND_RAISE_MSI_IRQ:
|
|
|
|
case COMMAND_RAISE_MSIX_IRQ:
|
2023-04-15 11:35:33 +09:00
|
|
|
pci_epf_test_raise_irq(epf_test, reg);
|
2023-04-15 11:35:35 +09:00
|
|
|
break;
|
|
|
|
case COMMAND_WRITE:
|
2023-04-15 11:35:36 +09:00
|
|
|
pci_epf_test_write(epf_test, reg);
|
2023-04-15 11:35:32 +09:00
|
|
|
pci_epf_test_raise_irq(epf_test, reg);
|
2023-04-15 11:35:35 +09:00
|
|
|
break;
|
|
|
|
case COMMAND_READ:
|
2023-04-15 11:35:36 +09:00
|
|
|
pci_epf_test_read(epf_test, reg);
|
2023-04-15 11:35:32 +09:00
|
|
|
pci_epf_test_raise_irq(epf_test, reg);
|
2023-04-15 11:35:35 +09:00
|
|
|
break;
|
|
|
|
case COMMAND_COPY:
|
2023-04-15 11:35:36 +09:00
|
|
|
pci_epf_test_copy(epf_test, reg);
|
2023-04-15 11:35:32 +09:00
|
|
|
pci_epf_test_raise_irq(epf_test, reg);
|
2023-04-15 11:35:35 +09:00
|
|
|
break;
|
2025-07-10 15:13:52 -04:00
|
|
|
case COMMAND_ENABLE_DOORBELL:
|
|
|
|
pci_epf_test_enable_doorbell(epf_test, reg);
|
|
|
|
pci_epf_test_raise_irq(epf_test, reg);
|
|
|
|
break;
|
|
|
|
case COMMAND_DISABLE_DOORBELL:
|
|
|
|
pci_epf_test_disable_doorbell(epf_test, reg);
|
|
|
|
pci_epf_test_raise_irq(epf_test, reg);
|
|
|
|
break;
|
2023-04-15 11:35:35 +09:00
|
|
|
default:
|
|
|
|
dev_err(dev, "Invalid command 0x%x\n", command);
|
|
|
|
break;
|
2017-03-27 15:15:03 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
reset_handler:
|
|
|
|
queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
|
|
|
|
msecs_to_jiffies(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pci_epf_test_set_bar(struct pci_epf *epf)
|
|
|
|
{
|
2024-03-20 12:31:51 +01:00
|
|
|
int bar, ret;
|
2017-03-27 15:15:03 +05:30
|
|
|
struct pci_epc *epc = epf->epc;
|
|
|
|
struct device *dev = &epf->dev;
|
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
2017-08-18 20:27:59 +05:30
|
|
|
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
2018-03-28 13:50:06 +02:00
|
|
|
|
2024-03-20 12:31:51 +01:00
|
|
|
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
|
|
|
if (!epf_test->reg[bar])
|
2019-01-14 16:45:09 +05:30
|
|
|
continue;
|
|
|
|
|
2021-08-19 18:03:39 +05:30
|
|
|
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
|
2024-03-20 12:31:51 +01:00
|
|
|
&epf->bar[bar]);
|
2017-03-27 15:15:03 +05:30
|
|
|
if (ret) {
|
2021-02-02 01:27:58 +05:30
|
|
|
pci_epf_free_space(epf, epf_test->reg[bar], bar,
|
|
|
|
PRIMARY_INTERFACE);
|
PCI: endpoint: pci-epf-test: Fix double free that causes kernel to oops
Fix a kernel oops found while testing the stm32_pcie Endpoint driver
with handling of PERST# deassertion:
During EP initialization, pci_epf_test_alloc_space() allocates all BARs,
which are further freed if epc_set_bar() fails (for instance, due to no
free inbound window).
However, when pci_epc_set_bar() fails, the error path:
pci_epc_set_bar() ->
pci_epf_free_space()
does not clear the previous assignment to epf_test->reg[bar].
Then, if the host reboots, the PERST# deassertion restarts the BAR
allocation sequence with the same allocation failure (no free inbound
window), creating a double free situation since epf_test->reg[bar] was
deallocated and is still non-NULL.
Thus, make sure that pci_epf_alloc_space() and pci_epf_free_space()
invocations are symmetric, and as such, set epf_test->reg[bar] to NULL
when memory is freed.
Reviewed-by: Niklas Cassel <cassel@kernel.org>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Christian Bruel <christian.bruel@foss.st.com>
Link: https://lore.kernel.org/r/20250124123043.96112-1-christian.bruel@foss.st.com
[kwilczynski: commit log]
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
2025-01-24 13:30:43 +01:00
|
|
|
epf_test->reg[bar] = NULL;
|
2018-05-14 17:56:24 +01:00
|
|
|
dev_err(dev, "Failed to set BAR%d\n", bar);
|
2017-08-18 20:27:59 +05:30
|
|
|
if (bar == test_reg_bar)
|
2017-03-27 15:15:03 +05:30
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-04-30 11:43:46 +05:30
|
|
|
static void pci_epf_test_clear_bar(struct pci_epf *epf)
|
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
|
|
|
struct pci_epc *epc = epf->epc;
|
|
|
|
int bar;
|
|
|
|
|
|
|
|
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
|
|
|
if (!epf_test->reg[bar])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
|
|
|
|
&epf->bar[bar]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-12-03 07:38:53 +01:00
|
|
|
static void pci_epf_test_set_capabilities(struct pci_epf *epf)
|
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
|
|
|
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
|
|
|
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
|
|
|
|
struct pci_epc *epc = epf->epc;
|
|
|
|
u32 caps = 0;
|
|
|
|
|
|
|
|
if (epc->ops->align_addr)
|
|
|
|
caps |= CAP_UNALIGNED_ACCESS;
|
|
|
|
|
2025-03-10 12:10:23 +01:00
|
|
|
if (epf_test->epc_features->msi_capable)
|
|
|
|
caps |= CAP_MSI;
|
|
|
|
|
|
|
|
if (epf_test->epc_features->msix_capable)
|
|
|
|
caps |= CAP_MSIX;
|
|
|
|
|
|
|
|
if (epf_test->epc_features->intx_capable)
|
|
|
|
caps |= CAP_INTX;
|
|
|
|
|
2024-12-03 07:38:53 +01:00
|
|
|
reg->caps = cpu_to_le32(caps);
|
|
|
|
}
|
|
|
|
|
2024-04-30 11:43:43 +05:30
|
|
|
static int pci_epf_test_epc_init(struct pci_epf *epf)
|
2020-02-17 17:40:36 +05:30
|
|
|
{
|
2020-02-25 13:47:01 +05:30
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
2020-02-17 17:40:36 +05:30
|
|
|
struct pci_epf_header *header = epf->header;
|
2024-04-18 13:29:58 +05:30
|
|
|
const struct pci_epc_features *epc_features = epf_test->epc_features;
|
2020-02-17 17:40:36 +05:30
|
|
|
struct pci_epc *epc = epf->epc;
|
|
|
|
struct device *dev = &epf->dev;
|
2024-03-27 14:43:37 +05:30
|
|
|
bool linkup_notifier = false;
|
2020-02-17 17:40:36 +05:30
|
|
|
int ret;
|
|
|
|
|
2024-04-30 11:43:47 +05:30
|
|
|
epf_test->dma_supported = true;
|
|
|
|
|
|
|
|
ret = pci_epf_test_init_dma_chan(epf_test);
|
|
|
|
if (ret)
|
|
|
|
epf_test->dma_supported = false;
|
|
|
|
|
2021-08-19 18:03:39 +05:30
|
|
|
if (epf->vfunc_no <= 1) {
|
|
|
|
ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "Configuration header write failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2020-02-17 17:40:36 +05:30
|
|
|
}
|
|
|
|
|
2024-12-03 07:38:53 +01:00
|
|
|
pci_epf_test_set_capabilities(epf);
|
|
|
|
|
2020-02-17 17:40:36 +05:30
|
|
|
ret = pci_epf_test_set_bar(epf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2024-04-18 13:29:58 +05:30
|
|
|
if (epc_features->msi_capable) {
|
2021-08-19 18:03:39 +05:30
|
|
|
ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
|
|
|
|
epf->msi_interrupts);
|
2020-02-17 17:40:36 +05:30
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "MSI configuration failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-18 13:29:58 +05:30
|
|
|
if (epc_features->msix_capable) {
|
2021-08-19 18:03:39 +05:30
|
|
|
ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
|
|
|
|
epf->msix_interrupts,
|
2020-02-25 13:47:01 +05:30
|
|
|
epf_test->test_reg_bar,
|
|
|
|
epf_test->msix_table_offset);
|
2020-02-17 17:40:36 +05:30
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "MSI-X configuration failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-27 14:43:37 +05:30
|
|
|
linkup_notifier = epc_features->linkup_notifier;
|
|
|
|
if (!linkup_notifier)
|
|
|
|
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
|
|
|
|
|
2020-02-17 17:40:36 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-06-06 12:56:35 +05:30
|
|
|
static void pci_epf_test_epc_deinit(struct pci_epf *epf)
|
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
|
|
|
|
2024-10-17 10:06:48 +09:00
|
|
|
cancel_delayed_work_sync(&epf_test->cmd_handler);
|
2024-06-06 12:56:35 +05:30
|
|
|
pci_epf_test_clean_dma_chan(epf_test);
|
|
|
|
pci_epf_test_clear_bar(epf);
|
|
|
|
}
|
|
|
|
|
2023-01-24 12:41:58 +05:30
|
|
|
static int pci_epf_test_link_up(struct pci_epf *epf)
|
2020-02-17 17:40:36 +05:30
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
|
|
|
|
2023-01-24 12:41:58 +05:30
|
|
|
queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
|
|
|
|
msecs_to_jiffies(1));
|
2020-02-17 17:40:36 +05:30
|
|
|
|
2023-01-24 12:41:58 +05:30
|
|
|
return 0;
|
2020-02-17 17:40:36 +05:30
|
|
|
}
|
|
|
|
|
2024-04-30 11:43:51 +05:30
|
|
|
static int pci_epf_test_link_down(struct pci_epf *epf)
|
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
|
|
|
|
|
|
|
cancel_delayed_work_sync(&epf_test->cmd_handler);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-01-24 12:41:58 +05:30
|
|
|
static const struct pci_epc_event_ops pci_epf_test_event_ops = {
|
2024-04-30 11:43:43 +05:30
|
|
|
.epc_init = pci_epf_test_epc_init,
|
2024-06-06 12:56:35 +05:30
|
|
|
.epc_deinit = pci_epf_test_epc_deinit,
|
2023-01-24 12:41:58 +05:30
|
|
|
.link_up = pci_epf_test_link_up,
|
2024-04-30 11:43:51 +05:30
|
|
|
.link_down = pci_epf_test_link_down,
|
2023-01-24 12:41:58 +05:30
|
|
|
};
|
|
|
|
|
2017-03-27 15:15:03 +05:30
|
|
|
static int pci_epf_test_alloc_space(struct pci_epf *epf)
|
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
|
|
|
struct device *dev = &epf->dev;
|
2020-02-25 13:47:01 +05:30
|
|
|
size_t msix_table_size = 0;
|
|
|
|
size_t test_reg_bar_size;
|
|
|
|
size_t pba_size = 0;
|
2017-03-27 15:15:03 +05:30
|
|
|
void *base;
|
2017-08-18 20:27:59 +05:30
|
|
|
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
2024-03-20 12:31:48 +01:00
|
|
|
enum pci_barno bar;
|
2024-04-18 13:29:59 +05:30
|
|
|
const struct pci_epc_features *epc_features = epf_test->epc_features;
|
2019-05-23 14:47:59 -07:00
|
|
|
size_t test_reg_size;
|
2019-01-14 16:45:09 +05:30
|
|
|
|
2020-02-25 13:47:01 +05:30
|
|
|
test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
|
|
|
|
|
2024-04-18 13:29:59 +05:30
|
|
|
if (epc_features->msix_capable) {
|
2020-02-25 13:47:01 +05:30
|
|
|
msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
|
|
|
|
epf_test->msix_table_offset = test_reg_bar_size;
|
|
|
|
/* Align to QWORD or 8 Bytes */
|
|
|
|
pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
|
|
|
|
}
|
|
|
|
test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
|
|
|
|
|
|
|
|
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
|
2024-02-07 22:39:14 +01:00
|
|
|
epc_features, PRIMARY_INTERFACE);
|
2017-03-27 15:15:03 +05:30
|
|
|
if (!base) {
|
2018-05-14 17:56:24 +01:00
|
|
|
dev_err(dev, "Failed to allocated register space\n");
|
2017-03-27 15:15:03 +05:30
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2017-08-18 20:27:59 +05:30
|
|
|
epf_test->reg[test_reg_bar] = base;
|
2017-03-27 15:15:03 +05:30
|
|
|
|
2024-03-20 12:31:48 +01:00
|
|
|
for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
|
|
|
|
bar = pci_epc_get_next_free_bar(epc_features, bar);
|
|
|
|
if (bar == NO_BAR)
|
|
|
|
break;
|
2019-05-23 14:55:40 -07:00
|
|
|
|
2017-08-18 20:27:59 +05:30
|
|
|
if (bar == test_reg_bar)
|
|
|
|
continue;
|
2019-01-14 16:45:09 +05:30
|
|
|
|
2019-03-25 15:09:39 +05:30
|
|
|
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
|
2024-02-07 22:39:14 +01:00
|
|
|
epc_features, PRIMARY_INTERFACE);
|
2017-03-27 15:15:03 +05:30
|
|
|
if (!base)
|
2018-05-14 17:56:24 +01:00
|
|
|
dev_err(dev, "Failed to allocate space for BAR%d\n",
|
2017-03-27 15:15:03 +05:30
|
|
|
bar);
|
|
|
|
epf_test->reg[bar] = base;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-04-30 11:43:46 +05:30
|
|
|
static void pci_epf_test_free_space(struct pci_epf *epf)
|
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
|
|
|
int bar;
|
|
|
|
|
|
|
|
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
|
|
|
if (!epf_test->reg[bar])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pci_epf_free_space(epf, epf_test->reg[bar], bar,
|
|
|
|
PRIMARY_INTERFACE);
|
PCI: endpoint: pci-epf-test: Fix double free that causes kernel to oops
Fix a kernel oops found while testing the stm32_pcie Endpoint driver
with handling of PERST# deassertion:
During EP initialization, pci_epf_test_alloc_space() allocates all BARs,
which are further freed if epc_set_bar() fails (for instance, due to no
free inbound window).
However, when pci_epc_set_bar() fails, the error path:
pci_epc_set_bar() ->
pci_epf_free_space()
does not clear the previous assignment to epf_test->reg[bar].
Then, if the host reboots, the PERST# deassertion restarts the BAR
allocation sequence with the same allocation failure (no free inbound
window), creating a double free situation since epf_test->reg[bar] was
deallocated and is still non-NULL.
Thus, make sure that pci_epf_alloc_space() and pci_epf_free_space()
invocations are symmetric, and as such, set epf_test->reg[bar] to NULL
when memory is freed.
Reviewed-by: Niklas Cassel <cassel@kernel.org>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Christian Bruel <christian.bruel@foss.st.com>
Link: https://lore.kernel.org/r/20250124123043.96112-1-christian.bruel@foss.st.com
[kwilczynski: commit log]
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
2025-01-24 13:30:43 +01:00
|
|
|
epf_test->reg[bar] = NULL;
|
2024-04-30 11:43:46 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-27 15:15:03 +05:30
|
|
|
static int pci_epf_test_bind(struct pci_epf *epf)
|
|
|
|
{
|
|
|
|
int ret;
|
2017-08-18 20:28:00 +05:30
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
2019-01-14 16:45:09 +05:30
|
|
|
const struct pci_epc_features *epc_features;
|
|
|
|
enum pci_barno test_reg_bar = BAR_0;
|
2017-03-27 15:15:03 +05:30
|
|
|
struct pci_epc *epc = epf->epc;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!epc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-08-19 18:03:39 +05:30
|
|
|
epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
|
2021-03-24 15:46:09 +05:30
|
|
|
if (!epc_features) {
|
|
|
|
dev_err(&epf->dev, "epc_features not implemented\n");
|
|
|
|
return -EOPNOTSUPP;
|
2019-01-14 16:45:09 +05:30
|
|
|
}
|
2018-07-19 10:32:19 +02:00
|
|
|
|
2021-03-24 15:46:09 +05:30
|
|
|
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
|
|
|
|
if (test_reg_bar < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-14 16:45:09 +05:30
|
|
|
epf_test->test_reg_bar = test_reg_bar;
|
|
|
|
epf_test->epc_features = epc_features;
|
2018-05-15 15:41:42 +01:00
|
|
|
|
2017-03-27 15:15:03 +05:30
|
|
|
ret = pci_epf_test_alloc_space(epf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-04-30 11:43:46 +05:30
|
|
|
static void pci_epf_test_unbind(struct pci_epf *epf)
|
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
2024-06-06 12:56:35 +05:30
|
|
|
struct pci_epc *epc = epf->epc;
|
2024-04-30 11:43:46 +05:30
|
|
|
|
2024-10-17 10:06:48 +09:00
|
|
|
cancel_delayed_work_sync(&epf_test->cmd_handler);
|
2024-06-06 12:56:35 +05:30
|
|
|
if (epc->init_complete) {
|
|
|
|
pci_epf_test_clean_dma_chan(epf_test);
|
|
|
|
pci_epf_test_clear_bar(epf);
|
|
|
|
}
|
2024-04-30 11:43:46 +05:30
|
|
|
pci_epf_test_free_space(epf);
|
|
|
|
}
|
|
|
|
|
2017-08-18 20:27:59 +05:30
|
|
|
static const struct pci_epf_device_id pci_epf_test_ids[] = {
|
|
|
|
{
|
|
|
|
.name = "pci_epf_test",
|
|
|
|
},
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2023-06-02 17:17:49 +05:30
|
|
|
static int pci_epf_test_probe(struct pci_epf *epf,
|
|
|
|
const struct pci_epf_device_id *id)
|
2017-03-27 15:15:03 +05:30
|
|
|
{
|
|
|
|
struct pci_epf_test *epf_test;
|
|
|
|
struct device *dev = &epf->dev;
|
|
|
|
|
|
|
|
epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
|
|
|
|
if (!epf_test)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
epf->header = &test_header;
|
|
|
|
epf_test->epf = epf;
|
|
|
|
|
|
|
|
INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
|
|
|
|
|
PCI: endpoint: Use callback mechanism for passing events from EPC to EPF
Instead of using the notifiers for passing the events from EPC to EPF,
let's introduce a callback based mechanism where the EPF drivers can
populate relevant callbacks for EPC events they want to subscribe.
The use of notifiers in kernel is not recommended if there is a real link
between the sender and receiver, like in this case. Also, the existing
atomic notifier forces the notification functions to be in atomic context
while the caller may be in non-atomic context. For instance, the two
in-kernel users of the notifiers, pcie-qcom and pcie-tegra194, both are
calling the notifier functions in non-atomic context (from threaded IRQ
handlers). This creates a sleeping in atomic context issue with the
existing EPF_TEST driver that calls the EPC APIs that may sleep.
For all these reasons, let's get rid of the notifier chains and use the
simple callback mechanism for signalling the events from EPC to EPF
drivers. This preserves the context of the caller and avoids the latency
of going through a separate interface for triggering the notifications.
As a first step of the transition, the core_init() callback is introduced
in this commit, that'll replace the existing CORE_INIT notifier used for
signalling the init complete event from EPC.
During the occurrence of the event, EPC will go over the list of EPF
drivers attached to it and will call the core_init() callback if available.
Link: https://lore.kernel.org/linux-pci/20230124071158.5503-5-manivannan.sadhasivam@linaro.org
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
Acked-by: Kishon Vijay Abraham I <kishon@kernel.org>
2023-01-24 12:41:57 +05:30
|
|
|
epf->event_ops = &pci_epf_test_event_ops;
|
|
|
|
|
2017-03-27 15:15:03 +05:30
|
|
|
epf_set_drvdata(epf, epf_test);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-07-22 16:08:48 -07:00
|
|
|
static const struct pci_epf_ops ops = {
|
2017-03-27 15:15:03 +05:30
|
|
|
.unbind = pci_epf_test_unbind,
|
|
|
|
.bind = pci_epf_test_bind,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct pci_epf_driver test_driver = {
|
|
|
|
.driver.name = "pci_epf_test",
|
|
|
|
.probe = pci_epf_test_probe,
|
|
|
|
.id_table = pci_epf_test_ids,
|
|
|
|
.ops = &ops,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init pci_epf_test_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
kpcitest_workqueue = alloc_workqueue("kpcitest",
|
|
|
|
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
|
2019-03-15 00:07:10 -05:00
|
|
|
if (!kpcitest_workqueue) {
|
|
|
|
pr_err("Failed to allocate the kpcitest work queue\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-03-27 15:15:03 +05:30
|
|
|
ret = pci_epf_register_driver(&test_driver);
|
|
|
|
if (ret) {
|
2021-03-31 16:40:12 +08:00
|
|
|
destroy_workqueue(kpcitest_workqueue);
|
2018-05-14 17:56:24 +01:00
|
|
|
pr_err("Failed to register pci epf test driver --> %d\n", ret);
|
2017-03-27 15:15:03 +05:30
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
module_init(pci_epf_test_init);
|
|
|
|
|
|
|
|
static void __exit pci_epf_test_exit(void)
|
|
|
|
{
|
2021-03-31 16:40:12 +08:00
|
|
|
if (kpcitest_workqueue)
|
|
|
|
destroy_workqueue(kpcitest_workqueue);
|
2017-03-27 15:15:03 +05:30
|
|
|
pci_epf_unregister_driver(&test_driver);
|
|
|
|
}
|
|
|
|
module_exit(pci_epf_test_exit);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
|
|
|
|
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
|
|
|
|
MODULE_LICENSE("GPL v2");
|