2021-09-14 12:14:22 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
// Copyright(c) 2021 Intel Corporation. All rights reserved.
|
|
|
|
|
|
|
|
#include <linux/platform_device.h>
|
2025-05-27 16:34:51 +01:00
|
|
|
#include <linux/memory_hotplug.h>
|
2021-09-14 12:14:22 -07:00
|
|
|
#include <linux/genalloc.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/acpi.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/mm.h>
|
2022-02-01 12:24:30 -08:00
|
|
|
#include <cxlmem.h>
|
2022-12-13 08:44:24 -08:00
|
|
|
|
|
|
|
#include "../watermark.h"
|
2021-09-14 12:14:22 -07:00
|
|
|
#include "mock.h"
|
|
|
|
|
2022-11-30 14:47:26 -08:00
|
|
|
static int interleave_arithmetic;
|
|
|
|
|
2024-02-06 12:03:40 -07:00
|
|
|
#define FAKE_QTG_ID 42
|
|
|
|
|
2022-01-23 16:32:01 -08:00
|
|
|
#define NR_CXL_HOST_BRIDGES 2
|
2022-11-03 17:30:48 -07:00
|
|
|
#define NR_CXL_SINGLE_HOST 1
|
2022-12-01 13:34:21 -08:00
|
|
|
#define NR_CXL_RCH 1
|
2021-09-14 12:14:22 -07:00
|
|
|
#define NR_CXL_ROOT_PORTS 2
|
2022-01-23 16:32:01 -08:00
|
|
|
#define NR_CXL_SWITCH_PORTS 2
|
2022-05-24 08:56:58 -07:00
|
|
|
#define NR_CXL_PORT_DECODERS 8
|
2022-12-01 13:34:21 -08:00
|
|
|
#define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
|
2021-09-14 12:14:22 -07:00
|
|
|
|
|
|
|
static struct platform_device *cxl_acpi;
|
|
|
|
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
|
2022-11-03 17:30:48 -07:00
|
|
|
#define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
|
|
|
|
static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
|
|
|
|
static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
|
|
|
|
#define NR_MEM_MULTI \
|
|
|
|
(NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
|
|
|
|
static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
|
|
|
|
|
|
|
|
static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
|
|
|
|
static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
|
|
|
|
static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
|
|
|
|
#define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
|
|
|
|
static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
|
|
|
|
|
|
|
|
struct platform_device *cxl_mem[NR_MEM_MULTI];
|
|
|
|
struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
|
|
|
|
|
2022-12-01 13:34:21 -08:00
|
|
|
static struct platform_device *cxl_rch[NR_CXL_RCH];
|
|
|
|
static struct platform_device *cxl_rcd[NR_CXL_RCH];
|
2022-11-03 17:30:48 -07:00
|
|
|
|
|
|
|
static inline bool is_multi_bridge(struct device *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
|
|
|
|
if (&cxl_host_bridge[i]->dev == dev)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_single_bridge(struct device *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
|
|
|
|
if (&cxl_hb_single[i]->dev == dev)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
2021-09-14 12:14:22 -07:00
|
|
|
|
|
|
|
static struct acpi_device acpi0017_mock;
|
2022-12-01 13:34:21 -08:00
|
|
|
static struct acpi_device host_bridge[NR_BRIDGES] = {
|
2021-09-14 12:14:22 -07:00
|
|
|
[0] = {
|
|
|
|
.handle = &host_bridge[0],
|
2023-12-21 15:03:45 -07:00
|
|
|
.pnp.unique_id = "0",
|
2021-09-14 12:14:22 -07:00
|
|
|
},
|
|
|
|
[1] = {
|
|
|
|
.handle = &host_bridge[1],
|
2023-12-21 15:03:45 -07:00
|
|
|
.pnp.unique_id = "1",
|
2021-09-14 12:14:22 -07:00
|
|
|
},
|
2022-11-03 17:30:48 -07:00
|
|
|
[2] = {
|
|
|
|
.handle = &host_bridge[2],
|
2023-12-21 15:03:45 -07:00
|
|
|
.pnp.unique_id = "2",
|
2022-11-03 17:30:48 -07:00
|
|
|
},
|
2022-12-01 13:34:21 -08:00
|
|
|
[3] = {
|
|
|
|
.handle = &host_bridge[3],
|
2023-12-21 15:03:45 -07:00
|
|
|
.pnp.unique_id = "3",
|
2022-12-01 13:34:21 -08:00
|
|
|
},
|
2021-09-14 12:14:22 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static bool is_mock_dev(struct device *dev)
|
|
|
|
{
|
2021-09-08 22:13:21 -07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
|
|
|
|
if (dev == &cxl_mem[i]->dev)
|
|
|
|
return true;
|
2022-11-03 17:30:48 -07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
|
|
|
|
if (dev == &cxl_mem_single[i]->dev)
|
|
|
|
return true;
|
2022-12-01 13:34:21 -08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++)
|
|
|
|
if (dev == &cxl_rcd[i]->dev)
|
|
|
|
return true;
|
2021-09-14 12:14:22 -07:00
|
|
|
if (dev == &cxl_acpi->dev)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_mock_adev(struct acpi_device *adev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (adev == &acpi0017_mock)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
|
|
|
|
if (adev == &host_bridge[i])
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
struct acpi_table_cedt cedt;
|
2022-12-01 13:34:21 -08:00
|
|
|
struct acpi_cedt_chbs chbs[NR_BRIDGES];
|
2021-09-14 12:14:22 -07:00
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cfmws cfmws;
|
|
|
|
u32 target[1];
|
|
|
|
} cfmws0;
|
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cfmws cfmws;
|
2022-01-23 16:32:01 -08:00
|
|
|
u32 target[2];
|
2021-09-14 12:14:22 -07:00
|
|
|
} cfmws1;
|
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cfmws cfmws;
|
|
|
|
u32 target[1];
|
|
|
|
} cfmws2;
|
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cfmws cfmws;
|
2022-01-23 16:32:01 -08:00
|
|
|
u32 target[2];
|
2021-09-14 12:14:22 -07:00
|
|
|
} cfmws3;
|
2022-11-03 17:30:48 -07:00
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cfmws cfmws;
|
|
|
|
u32 target[1];
|
|
|
|
} cfmws4;
|
2022-12-01 13:34:21 -08:00
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cfmws cfmws;
|
|
|
|
u32 target[1];
|
|
|
|
} cfmws5;
|
2022-11-30 14:47:26 -08:00
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cfmws cfmws;
|
2022-12-05 12:32:11 -08:00
|
|
|
u32 target[1];
|
2022-11-30 14:47:26 -08:00
|
|
|
} cfmws6;
|
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cfmws cfmws;
|
2022-12-05 12:32:11 -08:00
|
|
|
u32 target[2];
|
2022-11-30 14:47:26 -08:00
|
|
|
} cfmws7;
|
2022-12-05 12:32:11 -08:00
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cfmws cfmws;
|
2025-02-26 14:19:27 -08:00
|
|
|
u32 target[3];
|
2022-12-05 12:32:11 -08:00
|
|
|
} cfmws8;
|
2022-11-30 14:47:26 -08:00
|
|
|
struct {
|
|
|
|
struct acpi_cedt_cxims cxims;
|
|
|
|
u64 xormap_list[2];
|
|
|
|
} cxims0;
|
2021-09-14 12:14:22 -07:00
|
|
|
} __packed mock_cedt = {
|
|
|
|
.cedt = {
|
|
|
|
.header = {
|
|
|
|
.signature = "CEDT",
|
|
|
|
.length = sizeof(mock_cedt),
|
|
|
|
.revision = 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
.chbs[0] = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CHBS,
|
|
|
|
.length = sizeof(mock_cedt.chbs[0]),
|
|
|
|
},
|
|
|
|
.uid = 0,
|
|
|
|
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
|
|
|
|
},
|
|
|
|
.chbs[1] = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CHBS,
|
|
|
|
.length = sizeof(mock_cedt.chbs[0]),
|
|
|
|
},
|
|
|
|
.uid = 1,
|
|
|
|
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
|
|
|
|
},
|
2022-11-03 17:30:48 -07:00
|
|
|
.chbs[2] = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CHBS,
|
|
|
|
.length = sizeof(mock_cedt.chbs[0]),
|
|
|
|
},
|
|
|
|
.uid = 2,
|
|
|
|
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
|
|
|
|
},
|
2022-12-01 13:34:21 -08:00
|
|
|
.chbs[3] = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CHBS,
|
|
|
|
.length = sizeof(mock_cedt.chbs[0]),
|
|
|
|
},
|
|
|
|
.uid = 3,
|
|
|
|
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11,
|
|
|
|
},
|
2021-09-14 12:14:22 -07:00
|
|
|
.cfmws0 = {
|
|
|
|
.cfmws = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CFMWS,
|
|
|
|
.length = sizeof(mock_cedt.cfmws0),
|
|
|
|
},
|
|
|
|
.interleave_ways = 0,
|
|
|
|
.granularity = 4,
|
|
|
|
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
|
|
|
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
|
2024-02-06 12:03:40 -07:00
|
|
|
.qtg_id = FAKE_QTG_ID,
|
2022-05-24 08:56:58 -07:00
|
|
|
.window_size = SZ_256M * 4UL,
|
2021-09-14 12:14:22 -07:00
|
|
|
},
|
|
|
|
.target = { 0 },
|
|
|
|
},
|
|
|
|
.cfmws1 = {
|
|
|
|
.cfmws = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CFMWS,
|
|
|
|
.length = sizeof(mock_cedt.cfmws1),
|
|
|
|
},
|
2022-01-23 16:32:01 -08:00
|
|
|
.interleave_ways = 1,
|
2021-09-14 12:14:22 -07:00
|
|
|
.granularity = 4,
|
|
|
|
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
|
|
|
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
|
2024-02-06 12:03:40 -07:00
|
|
|
.qtg_id = FAKE_QTG_ID,
|
2022-05-24 08:56:58 -07:00
|
|
|
.window_size = SZ_256M * 8UL,
|
2021-09-14 12:14:22 -07:00
|
|
|
},
|
2022-01-23 16:32:01 -08:00
|
|
|
.target = { 0, 1, },
|
2021-09-14 12:14:22 -07:00
|
|
|
},
|
|
|
|
.cfmws2 = {
|
|
|
|
.cfmws = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CFMWS,
|
|
|
|
.length = sizeof(mock_cedt.cfmws2),
|
|
|
|
},
|
|
|
|
.interleave_ways = 0,
|
|
|
|
.granularity = 4,
|
|
|
|
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
|
|
|
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
|
2024-02-06 12:03:40 -07:00
|
|
|
.qtg_id = FAKE_QTG_ID,
|
2022-05-24 08:56:58 -07:00
|
|
|
.window_size = SZ_256M * 4UL,
|
2021-09-14 12:14:22 -07:00
|
|
|
},
|
|
|
|
.target = { 0 },
|
|
|
|
},
|
|
|
|
.cfmws3 = {
|
|
|
|
.cfmws = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CFMWS,
|
|
|
|
.length = sizeof(mock_cedt.cfmws3),
|
|
|
|
},
|
2022-01-23 16:32:01 -08:00
|
|
|
.interleave_ways = 1,
|
2021-09-14 12:14:22 -07:00
|
|
|
.granularity = 4,
|
|
|
|
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
|
|
|
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
|
2024-02-06 12:03:40 -07:00
|
|
|
.qtg_id = FAKE_QTG_ID,
|
2022-05-24 08:56:58 -07:00
|
|
|
.window_size = SZ_256M * 8UL,
|
2021-09-14 12:14:22 -07:00
|
|
|
},
|
2022-01-23 16:32:01 -08:00
|
|
|
.target = { 0, 1, },
|
2021-09-14 12:14:22 -07:00
|
|
|
},
|
2022-11-03 17:30:48 -07:00
|
|
|
.cfmws4 = {
|
|
|
|
.cfmws = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CFMWS,
|
|
|
|
.length = sizeof(mock_cedt.cfmws4),
|
|
|
|
},
|
|
|
|
.interleave_ways = 0,
|
|
|
|
.granularity = 4,
|
|
|
|
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
|
|
|
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
|
2024-02-06 12:03:40 -07:00
|
|
|
.qtg_id = FAKE_QTG_ID,
|
2022-11-03 17:30:48 -07:00
|
|
|
.window_size = SZ_256M * 4UL,
|
|
|
|
},
|
|
|
|
.target = { 2 },
|
|
|
|
},
|
2022-12-01 13:34:21 -08:00
|
|
|
.cfmws5 = {
|
|
|
|
.cfmws = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CFMWS,
|
|
|
|
.length = sizeof(mock_cedt.cfmws5),
|
|
|
|
},
|
|
|
|
.interleave_ways = 0,
|
|
|
|
.granularity = 4,
|
|
|
|
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
|
|
|
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
|
2024-02-06 12:03:40 -07:00
|
|
|
.qtg_id = FAKE_QTG_ID,
|
2022-12-01 13:34:21 -08:00
|
|
|
.window_size = SZ_256M,
|
|
|
|
},
|
|
|
|
.target = { 3 },
|
|
|
|
},
|
2022-12-05 12:32:11 -08:00
|
|
|
/* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */
|
|
|
|
.cfmws6 = {
|
|
|
|
.cfmws = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CFMWS,
|
|
|
|
.length = sizeof(mock_cedt.cfmws6),
|
|
|
|
},
|
2022-11-30 14:47:26 -08:00
|
|
|
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
|
|
|
|
.interleave_ways = 0,
|
|
|
|
.granularity = 4,
|
|
|
|
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
|
|
|
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
|
2024-02-06 12:03:40 -07:00
|
|
|
.qtg_id = FAKE_QTG_ID,
|
2022-11-30 14:47:26 -08:00
|
|
|
.window_size = SZ_256M * 8UL,
|
|
|
|
},
|
|
|
|
.target = { 0, },
|
|
|
|
},
|
2022-12-05 12:32:11 -08:00
|
|
|
.cfmws7 = {
|
2022-11-30 14:47:26 -08:00
|
|
|
.cfmws = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CFMWS,
|
2022-12-05 12:32:11 -08:00
|
|
|
.length = sizeof(mock_cedt.cfmws7),
|
2022-11-30 14:47:26 -08:00
|
|
|
},
|
|
|
|
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
|
|
|
|
.interleave_ways = 1,
|
|
|
|
.granularity = 0,
|
|
|
|
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
|
|
|
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
|
2024-02-06 12:03:40 -07:00
|
|
|
.qtg_id = FAKE_QTG_ID,
|
2022-11-30 14:47:26 -08:00
|
|
|
.window_size = SZ_256M * 8UL,
|
|
|
|
},
|
|
|
|
.target = { 0, 1, },
|
|
|
|
},
|
2022-12-05 12:32:11 -08:00
|
|
|
.cfmws8 = {
|
2022-11-30 14:47:26 -08:00
|
|
|
.cfmws = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CFMWS,
|
2022-12-05 12:32:11 -08:00
|
|
|
.length = sizeof(mock_cedt.cfmws8),
|
2022-11-30 14:47:26 -08:00
|
|
|
},
|
|
|
|
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
|
2025-02-26 14:19:27 -08:00
|
|
|
.interleave_ways = 8,
|
|
|
|
.granularity = 1,
|
2022-11-30 14:47:26 -08:00
|
|
|
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
|
|
|
|
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
|
2024-02-06 12:03:40 -07:00
|
|
|
.qtg_id = FAKE_QTG_ID,
|
2025-02-26 14:19:27 -08:00
|
|
|
.window_size = SZ_512M * 6UL,
|
2022-11-30 14:47:26 -08:00
|
|
|
},
|
2025-02-26 14:19:27 -08:00
|
|
|
.target = { 0, 1, 2, },
|
2022-11-30 14:47:26 -08:00
|
|
|
},
|
|
|
|
.cxims0 = {
|
|
|
|
.cxims = {
|
|
|
|
.header = {
|
|
|
|
.type = ACPI_CEDT_TYPE_CXIMS,
|
|
|
|
.length = sizeof(mock_cedt.cxims0),
|
|
|
|
},
|
|
|
|
.hbig = 0,
|
|
|
|
.nr_xormaps = 2,
|
|
|
|
},
|
|
|
|
.xormap_list = { 0x404100, 0x808200, },
|
|
|
|
},
|
2021-09-14 12:14:22 -07:00
|
|
|
};
|
|
|
|
|
2022-11-03 17:30:48 -07:00
|
|
|
struct acpi_cedt_cfmws *mock_cfmws[] = {
|
2021-10-29 12:51:53 -07:00
|
|
|
[0] = &mock_cedt.cfmws0.cfmws,
|
|
|
|
[1] = &mock_cedt.cfmws1.cfmws,
|
|
|
|
[2] = &mock_cedt.cfmws2.cfmws,
|
|
|
|
[3] = &mock_cedt.cfmws3.cfmws,
|
2022-11-03 17:30:48 -07:00
|
|
|
[4] = &mock_cedt.cfmws4.cfmws,
|
2022-12-01 13:34:21 -08:00
|
|
|
[5] = &mock_cedt.cfmws5.cfmws,
|
2022-12-05 12:32:11 -08:00
|
|
|
/* Modulo Math above, XOR Math below */
|
2022-11-30 14:47:26 -08:00
|
|
|
[6] = &mock_cedt.cfmws6.cfmws,
|
|
|
|
[7] = &mock_cedt.cfmws7.cfmws,
|
2022-12-05 12:32:11 -08:00
|
|
|
[8] = &mock_cedt.cfmws8.cfmws,
|
2022-11-30 14:47:26 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int cfmws_start;
|
|
|
|
static int cfmws_end;
|
|
|
|
#define CFMWS_MOD_ARRAY_START 0
|
2022-12-05 12:32:11 -08:00
|
|
|
#define CFMWS_MOD_ARRAY_END 5
|
|
|
|
#define CFMWS_XOR_ARRAY_START 6
|
|
|
|
#define CFMWS_XOR_ARRAY_END 8
|
2022-11-30 14:47:26 -08:00
|
|
|
|
|
|
|
struct acpi_cedt_cxims *mock_cxims[1] = {
|
|
|
|
[0] = &mock_cedt.cxims0.cxims,
|
2021-10-29 12:51:53 -07:00
|
|
|
};
|
|
|
|
|
2021-09-14 12:14:22 -07:00
|
|
|
struct cxl_mock_res {
|
|
|
|
struct list_head list;
|
|
|
|
struct range range;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(mock_res);
|
|
|
|
static DEFINE_MUTEX(mock_res_lock);
|
|
|
|
static struct gen_pool *cxl_mock_pool;
|
|
|
|
|
|
|
|
static void depopulate_all_mock_resources(void)
|
|
|
|
{
|
|
|
|
struct cxl_mock_res *res, *_res;
|
|
|
|
|
|
|
|
mutex_lock(&mock_res_lock);
|
|
|
|
list_for_each_entry_safe(res, _res, &mock_res, list) {
|
|
|
|
gen_pool_free(cxl_mock_pool, res->range.start,
|
|
|
|
range_len(&res->range));
|
|
|
|
list_del(&res->list);
|
|
|
|
kfree(res);
|
|
|
|
}
|
|
|
|
mutex_unlock(&mock_res_lock);
|
|
|
|
}
|
|
|
|
|
2022-12-01 13:34:21 -08:00
|
|
|
static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
|
2021-09-14 12:14:22 -07:00
|
|
|
{
|
|
|
|
struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
|
|
|
|
struct genpool_data_align data = {
|
2022-12-01 13:34:21 -08:00
|
|
|
.align = align,
|
2021-09-14 12:14:22 -07:00
|
|
|
};
|
|
|
|
unsigned long phys;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&res->list);
|
|
|
|
phys = gen_pool_alloc_algo(cxl_mock_pool, size,
|
|
|
|
gen_pool_first_fit_align, &data);
|
|
|
|
if (!phys)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
res->range = (struct range) {
|
|
|
|
.start = phys,
|
|
|
|
.end = phys + size - 1,
|
|
|
|
};
|
|
|
|
mutex_lock(&mock_res_lock);
|
|
|
|
list_add(&res->list, &mock_res);
|
|
|
|
mutex_unlock(&mock_res_lock);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int populate_cedt(void)
|
|
|
|
{
|
|
|
|
struct cxl_mock_res *res;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
|
|
|
|
struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
|
|
|
|
resource_size_t size;
|
|
|
|
|
|
|
|
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
|
|
|
|
size = ACPI_CEDT_CHBS_LENGTH_CXL20;
|
|
|
|
else
|
|
|
|
size = ACPI_CEDT_CHBS_LENGTH_CXL11;
|
|
|
|
|
2022-12-01 13:34:21 -08:00
|
|
|
res = alloc_mock_res(size, size);
|
2021-09-14 12:14:22 -07:00
|
|
|
if (!res)
|
|
|
|
return -ENOMEM;
|
|
|
|
chbs->base = res->range.start;
|
|
|
|
chbs->length = size;
|
|
|
|
}
|
|
|
|
|
2022-11-30 14:47:26 -08:00
|
|
|
for (i = cfmws_start; i <= cfmws_end; i++) {
|
2021-10-29 12:51:53 -07:00
|
|
|
struct acpi_cedt_cfmws *window = mock_cfmws[i];
|
2021-09-14 12:14:22 -07:00
|
|
|
|
2022-12-01 13:34:21 -08:00
|
|
|
res = alloc_mock_res(window->window_size, SZ_256M);
|
2021-09-14 12:14:22 -07:00
|
|
|
if (!res)
|
|
|
|
return -ENOMEM;
|
|
|
|
window->base_hpa = res->range.start;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-12-01 13:33:54 -08:00
|
|
|
static bool is_mock_port(struct device *dev);
|
|
|
|
|
2021-10-29 12:51:53 -07:00
|
|
|
/*
|
2022-12-01 13:33:54 -08:00
|
|
|
* WARNING, this hack assumes the format of 'struct cxl_cfmws_context'
|
|
|
|
* and 'struct cxl_chbs_context' share the property that the first
|
|
|
|
* struct member is a cxl_test device being probed by the cxl_acpi
|
2021-10-29 12:51:53 -07:00
|
|
|
* driver.
|
|
|
|
*/
|
|
|
|
struct cxl_cedt_context {
|
|
|
|
struct device *dev;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
|
|
|
|
acpi_tbl_entry_handler_arg handler_arg,
|
|
|
|
void *arg)
|
2021-09-14 12:14:22 -07:00
|
|
|
{
|
2021-10-29 12:51:53 -07:00
|
|
|
struct cxl_cedt_context *ctx = arg;
|
|
|
|
struct device *dev = ctx->dev;
|
|
|
|
union acpi_subtable_headers *h;
|
|
|
|
unsigned long end;
|
|
|
|
int i;
|
2021-09-14 12:14:22 -07:00
|
|
|
|
2022-12-01 13:33:54 -08:00
|
|
|
if (!is_mock_port(dev) && !is_mock_dev(dev))
|
2021-10-29 12:51:53 -07:00
|
|
|
return acpi_table_parse_cedt(id, handler_arg, arg);
|
2021-09-14 12:14:22 -07:00
|
|
|
|
2021-10-29 12:51:53 -07:00
|
|
|
if (id == ACPI_CEDT_TYPE_CHBS)
|
|
|
|
for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
|
|
|
|
h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
|
|
|
|
end = (unsigned long)&mock_cedt.chbs[i + 1];
|
|
|
|
handler_arg(h, arg, end);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (id == ACPI_CEDT_TYPE_CFMWS)
|
2022-11-30 14:47:26 -08:00
|
|
|
for (i = cfmws_start; i <= cfmws_end; i++) {
|
2021-10-29 12:51:53 -07:00
|
|
|
h = (union acpi_subtable_headers *) mock_cfmws[i];
|
|
|
|
end = (unsigned long) h + mock_cfmws[i]->header.length;
|
|
|
|
handler_arg(h, arg, end);
|
|
|
|
}
|
|
|
|
|
2022-11-30 14:47:26 -08:00
|
|
|
if (id == ACPI_CEDT_TYPE_CXIMS)
|
|
|
|
for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) {
|
|
|
|
h = (union acpi_subtable_headers *)mock_cxims[i];
|
|
|
|
end = (unsigned long)h + mock_cxims[i]->header.length;
|
|
|
|
handler_arg(h, arg, end);
|
|
|
|
}
|
|
|
|
|
2021-10-29 12:51:53 -07:00
|
|
|
return 0;
|
2021-09-14 12:14:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_mock_bridge(struct device *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
|
|
|
|
if (dev == &cxl_host_bridge[i]->dev)
|
|
|
|
return true;
|
2022-11-03 17:30:48 -07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
|
|
|
|
if (dev == &cxl_hb_single[i]->dev)
|
|
|
|
return true;
|
2022-12-01 13:34:21 -08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_rch); i++)
|
|
|
|
if (dev == &cxl_rch[i]->dev)
|
|
|
|
return true;
|
|
|
|
|
2022-01-31 18:10:04 -08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_mock_port(struct device *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (is_mock_bridge(dev))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
|
|
|
|
if (dev == &cxl_root_port[i]->dev)
|
|
|
|
return true;
|
2021-09-14 12:14:22 -07:00
|
|
|
|
2022-01-23 16:32:01 -08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
|
|
|
|
if (dev == &cxl_switch_uport[i]->dev)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
|
|
|
|
if (dev == &cxl_switch_dport[i]->dev)
|
|
|
|
return true;
|
|
|
|
|
2022-11-03 17:30:48 -07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
|
|
|
|
if (dev == &cxl_root_single[i]->dev)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
|
|
|
|
if (dev == &cxl_swu_single[i]->dev)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
|
|
|
|
if (dev == &cxl_swd_single[i]->dev)
|
|
|
|
return true;
|
|
|
|
|
2022-01-23 16:32:01 -08:00
|
|
|
if (is_cxl_memdev(dev))
|
|
|
|
return is_mock_dev(dev->parent);
|
|
|
|
|
2021-09-14 12:14:22 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int host_bridge_index(struct acpi_device *adev)
|
|
|
|
{
|
|
|
|
return adev - host_bridge;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct acpi_device *find_host_bridge(acpi_handle handle)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
|
|
|
|
if (handle == host_bridge[i].handle)
|
|
|
|
return &host_bridge[i];
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static acpi_status
|
|
|
|
mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
|
|
|
|
struct acpi_object_list *arguments,
|
|
|
|
unsigned long long *data)
|
|
|
|
{
|
|
|
|
struct acpi_device *adev = find_host_bridge(handle);
|
|
|
|
|
|
|
|
if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
|
|
|
|
return acpi_evaluate_integer(handle, pathname, arguments, data);
|
|
|
|
|
|
|
|
*data = host_bridge_index(adev);
|
|
|
|
return AE_OK;
|
|
|
|
}
|
|
|
|
|
2022-12-01 13:34:21 -08:00
|
|
|
static struct pci_bus mock_pci_bus[NR_BRIDGES];
|
2022-11-14 10:29:52 -08:00
|
|
|
static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
|
2021-09-14 12:14:22 -07:00
|
|
|
[0] = {
|
|
|
|
.bus = &mock_pci_bus[0],
|
|
|
|
},
|
|
|
|
[1] = {
|
|
|
|
.bus = &mock_pci_bus[1],
|
|
|
|
},
|
2022-11-14 10:29:52 -08:00
|
|
|
[2] = {
|
|
|
|
.bus = &mock_pci_bus[2],
|
|
|
|
},
|
2022-12-01 13:34:21 -08:00
|
|
|
[3] = {
|
|
|
|
.bus = &mock_pci_bus[3],
|
|
|
|
},
|
2022-11-14 10:29:52 -08:00
|
|
|
|
2021-09-14 12:14:22 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static bool is_mock_bus(struct pci_bus *bus)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
|
|
|
|
if (bus == &mock_pci_bus[i])
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
|
|
|
|
{
|
|
|
|
struct acpi_device *adev = find_host_bridge(handle);
|
|
|
|
|
|
|
|
if (!adev)
|
|
|
|
return acpi_pci_find_root(handle);
|
|
|
|
return &mock_pci_root[host_bridge_index(adev)];
|
|
|
|
}
|
|
|
|
|
2023-02-14 11:41:30 -08:00
|
|
|
static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
|
|
|
|
struct cxl_endpoint_dvsec_info *info)
|
2022-02-01 12:24:30 -08:00
|
|
|
{
|
|
|
|
struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
|
cxl/region: check interleave capability
Since interleave capability is not verified, if the interleave
capability of a target does not match the region need, committing decoder
should have failed at the device end.
In order to checkout this error as quickly as possible, driver needs
to check the interleave capability of target during attaching it to
region.
Per CXL specification r3.1(8.2.4.20.1 CXL HDM Decoder Capability Register),
bits 11 and 12 indicate the capability to establish interleaving in 3, 6,
12 and 16 ways. If these bits are not set, the target cannot be attached to
a region utilizing such interleave ways.
Additionally, bits 8 and 9 represent the capability of the bits used for
interleaving in the address, Linux tracks this in the cxl_port
interleave_mask.
Per CXL specification r3.1(8.2.4.20.13 Decoder Protection):
eIW means encoded Interleave Ways.
eIG means encoded Interleave Granularity.
in HPA:
if eIW is 0 or 8 (interleave ways: 1, 3), all the bits of HPA are used,
the interleave bits are none, the following check is ignored.
if eIW is less than 8 (interleave ways: 2, 4, 8, 16), the interleave bits
start at bit position eIG + 8 and end at eIG + eIW + 8 - 1.
if eIW is greater than 8 (interleave ways: 6, 12), the interleave bits
start at bit position eIG + 8 and end at eIG + eIW - 1.
if the interleave mask is insufficient to cover the required interleave
bits, the target cannot be attached to the region.
Fixes: 384e624bb211 ("cxl/region: Attach endpoint decoders")
Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://patch.msgid.link/20240614084755.59503-2-yaoxt.fnst@fujitsu.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-06-14 04:47:54 -04:00
|
|
|
struct device *dev = &port->dev;
|
2022-02-01 12:24:30 -08:00
|
|
|
|
|
|
|
if (!cxlhdm)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
cxlhdm->port = port;
|
cxl/region: check interleave capability
Since interleave capability is not verified, if the interleave
capability of a target does not match the region need, committing decoder
should have failed at the device end.
In order to checkout this error as quickly as possible, driver needs
to check the interleave capability of target during attaching it to
region.
Per CXL specification r3.1(8.2.4.20.1 CXL HDM Decoder Capability Register),
bits 11 and 12 indicate the capability to establish interleaving in 3, 6,
12 and 16 ways. If these bits are not set, the target cannot be attached to
a region utilizing such interleave ways.
Additionally, bits 8 and 9 represent the capability of the bits used for
interleaving in the address, Linux tracks this in the cxl_port
interleave_mask.
Per CXL specification r3.1(8.2.4.20.13 Decoder Protection):
eIW means encoded Interleave Ways.
eIG means encoded Interleave Granularity.
in HPA:
if eIW is 0 or 8 (interleave ways: 1, 3), all the bits of HPA are used,
the interleave bits are none, the following check is ignored.
if eIW is less than 8 (interleave ways: 2, 4, 8, 16), the interleave bits
start at bit position eIG + 8 and end at eIG + eIW + 8 - 1.
if eIW is greater than 8 (interleave ways: 6, 12), the interleave bits
start at bit position eIG + 8 and end at eIG + eIW - 1.
if the interleave mask is insufficient to cover the required interleave
bits, the target cannot be attached to the region.
Fixes: 384e624bb211 ("cxl/region: Attach endpoint decoders")
Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://patch.msgid.link/20240614084755.59503-2-yaoxt.fnst@fujitsu.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-06-14 04:47:54 -04:00
|
|
|
cxlhdm->interleave_mask = ~0U;
|
|
|
|
cxlhdm->iw_cap_mask = ~0UL;
|
|
|
|
dev_set_drvdata(dev, cxlhdm);
|
2022-02-01 12:24:30 -08:00
|
|
|
return cxlhdm;
|
|
|
|
}
|
|
|
|
|
2022-02-01 13:23:14 -08:00
|
|
|
static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
|
2022-02-01 12:24:30 -08:00
|
|
|
{
|
|
|
|
dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2022-01-23 16:32:07 -08:00
|
|
|
|
|
|
|
struct target_map_ctx {
|
|
|
|
int *target_map;
|
|
|
|
int index;
|
|
|
|
int target_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int map_targets(struct device *dev, void *data)
|
2022-02-01 12:24:30 -08:00
|
|
|
{
|
2022-01-23 16:32:07 -08:00
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
struct target_map_ctx *ctx = data;
|
|
|
|
|
|
|
|
ctx->target_map[ctx->index++] = pdev->id;
|
|
|
|
|
|
|
|
if (ctx->index > ctx->target_count) {
|
|
|
|
dev_WARN_ONCE(dev, 1, "too many targets found?\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2022-02-01 12:24:30 -08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-08 22:56:37 -07:00
|
|
|
static int mock_decoder_commit(struct cxl_decoder *cxld)
|
|
|
|
{
|
|
|
|
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
|
|
|
|
int id = cxld->id;
|
|
|
|
|
|
|
|
if (cxld->flags & CXL_DECODER_F_ENABLE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
|
2023-11-06 10:26:45 -07:00
|
|
|
if (cxl_num_decoders_committed(port) != id) {
|
2022-06-08 22:56:37 -07:00
|
|
|
dev_dbg(&port->dev,
|
|
|
|
"%s: out of order commit, expected decoder%d.%d\n",
|
2023-11-06 10:26:45 -07:00
|
|
|
dev_name(&cxld->dev), port->id,
|
|
|
|
cxl_num_decoders_committed(port));
|
2022-06-08 22:56:37 -07:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
port->commit_end++;
|
|
|
|
cxld->flags |= CXL_DECODER_F_ENABLE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
cxl/port: Fix use-after-free, permit out-of-order decoder shutdown
In support of investigating an initialization failure report [1],
cxl_test was updated to register mock memory-devices after the mock
root-port/bus device had been registered. That led to cxl_test crashing
with a use-after-free bug with the following signature:
cxl_port_attach_region: cxl region3: cxl_host_bridge.0:port3 decoder3.0 add: mem0:decoder7.0 @ 0 next: cxl_switch_uport.0 nr_eps: 1 nr_targets: 1
cxl_port_attach_region: cxl region3: cxl_host_bridge.0:port3 decoder3.0 add: mem4:decoder14.0 @ 1 next: cxl_switch_uport.0 nr_eps: 2 nr_targets: 1
cxl_port_setup_targets: cxl region3: cxl_switch_uport.0:port6 target[0] = cxl_switch_dport.0 for mem0:decoder7.0 @ 0
1) cxl_port_setup_targets: cxl region3: cxl_switch_uport.0:port6 target[1] = cxl_switch_dport.4 for mem4:decoder14.0 @ 1
[..]
cxld_unregister: cxl decoder14.0:
cxl_region_decode_reset: cxl_region region3:
mock_decoder_reset: cxl_port port3: decoder3.0 reset
2) mock_decoder_reset: cxl_port port3: decoder3.0: out of order reset, expected decoder3.1
cxl_endpoint_decoder_release: cxl decoder14.0:
[..]
cxld_unregister: cxl decoder7.0:
3) cxl_region_decode_reset: cxl_region region3:
Oops: general protection fault, probably for non-canonical address 0x6b6b6b6b6b6b6bc3: 0000 [#1] PREEMPT SMP PTI
[..]
RIP: 0010:to_cxl_port+0x8/0x60 [cxl_core]
[..]
Call Trace:
<TASK>
cxl_region_decode_reset+0x69/0x190 [cxl_core]
cxl_region_detach+0xe8/0x210 [cxl_core]
cxl_decoder_kill_region+0x27/0x40 [cxl_core]
cxld_unregister+0x5d/0x60 [cxl_core]
At 1) a region has been established with 2 endpoint decoders (7.0 and
14.0). Those endpoints share a common switch-decoder in the topology
(3.0). At teardown, 2), decoder14.0 is the first to be removed and hits
the "out of order reset case" in the switch decoder. The effect though
is that region3 cleanup is aborted leaving it in-tact and
referencing decoder14.0. At 3) the second attempt to teardown region3
trips over the stale decoder14.0 object which has long since been
deleted.
The fix here is to recognize that the CXL specification places no
mandate on in-order shutdown of switch-decoders, the driver enforces
in-order allocation, and hardware enforces in-order commit. So, rather
than fail and leave objects dangling, always remove them.
In support of making cxl_region_decode_reset() always succeed,
cxl_region_invalidate_memregion() failures are turned into warnings.
Crashing the kernel is ok there since system integrity is at risk if
caches cannot be managed around physical address mutation events like
CXL region destruction.
A new device_for_each_child_reverse_from() is added to cleanup
port->commit_end after all dependent decoders have been disabled. In
other words if decoders are allocated 0->1->2 and disabled 1->2->0 then
port->commit_end only decrements from 2 after 2 has been disabled, and
it decrements all the way to zero since 1 was disabled previously.
Link: http://lore.kernel.org/20241004212504.1246-1-gourry@gourry.net [1]
Cc: stable@vger.kernel.org
Fixes: 176baefb2eb5 ("cxl/hdm: Commit decoder state to hardware")
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Zijun Hu <quic_zijuhu@quicinc.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Link: https://patch.msgid.link/172964782781.81806.17902885593105284330.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
2024-10-22 18:43:49 -07:00
|
|
|
static void mock_decoder_reset(struct cxl_decoder *cxld)
|
2022-06-08 22:56:37 -07:00
|
|
|
{
|
|
|
|
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
|
|
|
|
int id = cxld->id;
|
|
|
|
|
|
|
|
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
|
cxl/port: Fix use-after-free, permit out-of-order decoder shutdown
In support of investigating an initialization failure report [1],
cxl_test was updated to register mock memory-devices after the mock
root-port/bus device had been registered. That led to cxl_test crashing
with a use-after-free bug with the following signature:
cxl_port_attach_region: cxl region3: cxl_host_bridge.0:port3 decoder3.0 add: mem0:decoder7.0 @ 0 next: cxl_switch_uport.0 nr_eps: 1 nr_targets: 1
cxl_port_attach_region: cxl region3: cxl_host_bridge.0:port3 decoder3.0 add: mem4:decoder14.0 @ 1 next: cxl_switch_uport.0 nr_eps: 2 nr_targets: 1
cxl_port_setup_targets: cxl region3: cxl_switch_uport.0:port6 target[0] = cxl_switch_dport.0 for mem0:decoder7.0 @ 0
1) cxl_port_setup_targets: cxl region3: cxl_switch_uport.0:port6 target[1] = cxl_switch_dport.4 for mem4:decoder14.0 @ 1
[..]
cxld_unregister: cxl decoder14.0:
cxl_region_decode_reset: cxl_region region3:
mock_decoder_reset: cxl_port port3: decoder3.0 reset
2) mock_decoder_reset: cxl_port port3: decoder3.0: out of order reset, expected decoder3.1
cxl_endpoint_decoder_release: cxl decoder14.0:
[..]
cxld_unregister: cxl decoder7.0:
3) cxl_region_decode_reset: cxl_region region3:
Oops: general protection fault, probably for non-canonical address 0x6b6b6b6b6b6b6bc3: 0000 [#1] PREEMPT SMP PTI
[..]
RIP: 0010:to_cxl_port+0x8/0x60 [cxl_core]
[..]
Call Trace:
<TASK>
cxl_region_decode_reset+0x69/0x190 [cxl_core]
cxl_region_detach+0xe8/0x210 [cxl_core]
cxl_decoder_kill_region+0x27/0x40 [cxl_core]
cxld_unregister+0x5d/0x60 [cxl_core]
At 1) a region has been established with 2 endpoint decoders (7.0 and
14.0). Those endpoints share a common switch-decoder in the topology
(3.0). At teardown, 2), decoder14.0 is the first to be removed and hits
the "out of order reset case" in the switch decoder. The effect though
is that region3 cleanup is aborted leaving it in-tact and
referencing decoder14.0. At 3) the second attempt to teardown region3
trips over the stale decoder14.0 object which has long since been
deleted.
The fix here is to recognize that the CXL specification places no
mandate on in-order shutdown of switch-decoders, the driver enforces
in-order allocation, and hardware enforces in-order commit. So, rather
than fail and leave objects dangling, always remove them.
In support of making cxl_region_decode_reset() always succeed,
cxl_region_invalidate_memregion() failures are turned into warnings.
Crashing the kernel is ok there since system integrity is at risk if
caches cannot be managed around physical address mutation events like
CXL region destruction.
A new device_for_each_child_reverse_from() is added to cleanup
port->commit_end after all dependent decoders have been disabled. In
other words if decoders are allocated 0->1->2 and disabled 1->2->0 then
port->commit_end only decrements from 2 after 2 has been disabled, and
it decrements all the way to zero since 1 was disabled previously.
Link: http://lore.kernel.org/20241004212504.1246-1-gourry@gourry.net [1]
Cc: stable@vger.kernel.org
Fixes: 176baefb2eb5 ("cxl/hdm: Commit decoder state to hardware")
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Zijun Hu <quic_zijuhu@quicinc.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Link: https://patch.msgid.link/172964782781.81806.17902885593105284330.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
2024-10-22 18:43:49 -07:00
|
|
|
return;
|
2022-06-08 22:56:37 -07:00
|
|
|
|
|
|
|
dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
|
cxl/port: Fix use-after-free, permit out-of-order decoder shutdown
In support of investigating an initialization failure report [1],
cxl_test was updated to register mock memory-devices after the mock
root-port/bus device had been registered. That led to cxl_test crashing
with a use-after-free bug with the following signature:
cxl_port_attach_region: cxl region3: cxl_host_bridge.0:port3 decoder3.0 add: mem0:decoder7.0 @ 0 next: cxl_switch_uport.0 nr_eps: 1 nr_targets: 1
cxl_port_attach_region: cxl region3: cxl_host_bridge.0:port3 decoder3.0 add: mem4:decoder14.0 @ 1 next: cxl_switch_uport.0 nr_eps: 2 nr_targets: 1
cxl_port_setup_targets: cxl region3: cxl_switch_uport.0:port6 target[0] = cxl_switch_dport.0 for mem0:decoder7.0 @ 0
1) cxl_port_setup_targets: cxl region3: cxl_switch_uport.0:port6 target[1] = cxl_switch_dport.4 for mem4:decoder14.0 @ 1
[..]
cxld_unregister: cxl decoder14.0:
cxl_region_decode_reset: cxl_region region3:
mock_decoder_reset: cxl_port port3: decoder3.0 reset
2) mock_decoder_reset: cxl_port port3: decoder3.0: out of order reset, expected decoder3.1
cxl_endpoint_decoder_release: cxl decoder14.0:
[..]
cxld_unregister: cxl decoder7.0:
3) cxl_region_decode_reset: cxl_region region3:
Oops: general protection fault, probably for non-canonical address 0x6b6b6b6b6b6b6bc3: 0000 [#1] PREEMPT SMP PTI
[..]
RIP: 0010:to_cxl_port+0x8/0x60 [cxl_core]
[..]
Call Trace:
<TASK>
cxl_region_decode_reset+0x69/0x190 [cxl_core]
cxl_region_detach+0xe8/0x210 [cxl_core]
cxl_decoder_kill_region+0x27/0x40 [cxl_core]
cxld_unregister+0x5d/0x60 [cxl_core]
At 1) a region has been established with 2 endpoint decoders (7.0 and
14.0). Those endpoints share a common switch-decoder in the topology
(3.0). At teardown, 2), decoder14.0 is the first to be removed and hits
the "out of order reset case" in the switch decoder. The effect though
is that region3 cleanup is aborted leaving it in-tact and
referencing decoder14.0. At 3) the second attempt to teardown region3
trips over the stale decoder14.0 object which has long since been
deleted.
The fix here is to recognize that the CXL specification places no
mandate on in-order shutdown of switch-decoders, the driver enforces
in-order allocation, and hardware enforces in-order commit. So, rather
than fail and leave objects dangling, always remove them.
In support of making cxl_region_decode_reset() always succeed,
cxl_region_invalidate_memregion() failures are turned into warnings.
Crashing the kernel is ok there since system integrity is at risk if
caches cannot be managed around physical address mutation events like
CXL region destruction.
A new device_for_each_child_reverse_from() is added to cleanup
port->commit_end after all dependent decoders have been disabled. In
other words if decoders are allocated 0->1->2 and disabled 1->2->0 then
port->commit_end only decrements from 2 after 2 has been disabled, and
it decrements all the way to zero since 1 was disabled previously.
Link: http://lore.kernel.org/20241004212504.1246-1-gourry@gourry.net [1]
Cc: stable@vger.kernel.org
Fixes: 176baefb2eb5 ("cxl/hdm: Commit decoder state to hardware")
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Zijun Hu <quic_zijuhu@quicinc.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Link: https://patch.msgid.link/172964782781.81806.17902885593105284330.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
2024-10-22 18:43:49 -07:00
|
|
|
if (port->commit_end == id)
|
|
|
|
cxl_port_commit_reap(cxld);
|
|
|
|
else
|
2022-06-08 22:56:37 -07:00
|
|
|
dev_dbg(&port->dev,
|
|
|
|
"%s: out of order reset, expected decoder%d.%d\n",
|
|
|
|
dev_name(&cxld->dev), port->id, port->commit_end);
|
|
|
|
cxld->flags &= ~CXL_DECODER_F_ENABLE;
|
|
|
|
}
|
|
|
|
|
2023-02-10 01:06:45 -08:00
|
|
|
static void default_mock_decoder(struct cxl_decoder *cxld)
|
|
|
|
{
|
|
|
|
cxld->hpa_range = (struct range){
|
|
|
|
.start = 0,
|
|
|
|
.end = -1,
|
|
|
|
};
|
|
|
|
|
|
|
|
cxld->interleave_ways = 1;
|
|
|
|
cxld->interleave_granularity = 256;
|
2023-06-14 18:30:13 -07:00
|
|
|
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
|
2023-02-10 01:06:45 -08:00
|
|
|
cxld->commit = mock_decoder_commit;
|
|
|
|
cxld->reset = mock_decoder_reset;
|
|
|
|
}
|
|
|
|
|
driver core: Constify API device_find_child() and adapt for various usages
Constify the following API:
struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
To :
struct device *device_find_child(struct device *dev, const void *data,
device_match_t match);
typedef int (*device_match_t)(struct device *dev, const void *data);
with the following reasons:
- Protect caller's match data @*data which is for comparison and lookup
and the API does not actually need to modify @*data.
- Make the API's parameters (@match)() and @data have the same type as
all of other device finding APIs (bus|class|driver)_find_device().
- All kinds of existing device match functions can be directly taken
as the API's argument, they were exported by driver core.
Constify the API and adapt for various existing usages.
BTW, various subsystem changes are squashed into this commit to meet
'git bisect' requirement, and this commit has the minimal and simplest
changes to complement squashing shortcoming, and that may bring extra
code improvement.
Reviewed-by: Alison Schofield <alison.schofield@intel.com>
Reviewed-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
Acked-by: Uwe Kleine-König <ukleinek@kernel.org> # for drivers/pwm
Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Link: https://lore.kernel.org/r/20241224-const_dfc_done-v5-4-6623037414d4@quicinc.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2024-12-24 21:05:03 +08:00
|
|
|
static int first_decoder(struct device *dev, const void *data)
|
2023-02-10 01:06:45 -08:00
|
|
|
{
|
|
|
|
struct cxl_decoder *cxld;
|
|
|
|
|
|
|
|
if (!is_switch_decoder(dev))
|
|
|
|
return 0;
|
|
|
|
cxld = to_cxl_decoder(dev);
|
|
|
|
if (cxld->id == 0)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
|
|
|
|
{
|
|
|
|
struct acpi_cedt_cfmws *window = mock_cfmws[0];
|
|
|
|
struct platform_device *pdev = NULL;
|
|
|
|
struct cxl_endpoint_decoder *cxled;
|
|
|
|
struct cxl_switch_decoder *cxlsd;
|
|
|
|
struct cxl_port *port, *iter;
|
|
|
|
const int size = SZ_512M;
|
|
|
|
struct cxl_memdev *cxlmd;
|
|
|
|
struct cxl_dport *dport;
|
|
|
|
struct device *dev;
|
|
|
|
bool hb0 = false;
|
|
|
|
u64 base;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (is_endpoint_decoder(&cxld->dev)) {
|
|
|
|
cxled = to_cxl_endpoint_decoder(&cxld->dev);
|
|
|
|
cxlmd = cxled_to_memdev(cxled);
|
|
|
|
WARN_ON(!dev_is_platform(cxlmd->dev.parent));
|
|
|
|
pdev = to_platform_device(cxlmd->dev.parent);
|
|
|
|
|
|
|
|
/* check is endpoint is attach to host-bridge0 */
|
|
|
|
port = cxled_to_port(cxled);
|
|
|
|
do {
|
2023-06-22 15:55:01 -05:00
|
|
|
if (port->uport_dev == &cxl_host_bridge[0]->dev) {
|
2023-02-10 01:06:45 -08:00
|
|
|
hb0 = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (is_cxl_port(port->dev.parent))
|
|
|
|
port = to_cxl_port(port->dev.parent);
|
|
|
|
else
|
|
|
|
port = NULL;
|
|
|
|
} while (port);
|
|
|
|
port = cxled_to_port(cxled);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The first decoder on the first 2 devices on the first switch
|
|
|
|
* attached to host-bridge0 mock a fake / static RAM region. All
|
|
|
|
* other decoders are default disabled. Given the round robin
|
|
|
|
* assignment those devices are named cxl_mem.0, and cxl_mem.4.
|
|
|
|
*
|
|
|
|
* See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
|
|
|
|
*/
|
|
|
|
if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
|
|
|
|
default_mock_decoder(cxld);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
base = window->base_hpa;
|
|
|
|
cxld->hpa_range = (struct range) {
|
|
|
|
.start = base,
|
|
|
|
.end = base + size - 1,
|
|
|
|
};
|
|
|
|
|
|
|
|
cxld->interleave_ways = 2;
|
|
|
|
eig_to_granularity(window->granularity, &cxld->interleave_granularity);
|
2023-06-14 18:30:13 -07:00
|
|
|
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
|
2023-02-10 01:06:45 -08:00
|
|
|
cxld->flags = CXL_DECODER_F_ENABLE;
|
|
|
|
cxled->state = CXL_DECODER_STATE_AUTO;
|
|
|
|
port->commit_end = cxld->id;
|
|
|
|
devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0);
|
|
|
|
cxld->commit = mock_decoder_commit;
|
|
|
|
cxld->reset = mock_decoder_reset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that endpoint decoder is set up, walk up the hierarchy
|
|
|
|
* and setup the switch and root port decoders targeting @cxlmd.
|
|
|
|
*/
|
|
|
|
iter = port;
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
dport = iter->parent_dport;
|
|
|
|
iter = dport->port;
|
|
|
|
dev = device_find_child(&iter->dev, NULL, first_decoder);
|
|
|
|
/*
|
|
|
|
* Ancestor ports are guaranteed to be enumerated before
|
|
|
|
* @port, and all ports have at least one decoder.
|
|
|
|
*/
|
|
|
|
if (WARN_ON(!dev))
|
|
|
|
continue;
|
|
|
|
cxlsd = to_cxl_switch_decoder(dev);
|
|
|
|
if (i == 0) {
|
|
|
|
/* put cxl_mem.4 second in the decode order */
|
|
|
|
if (pdev->id == 4)
|
|
|
|
cxlsd->target[1] = dport;
|
|
|
|
else
|
|
|
|
cxlsd->target[0] = dport;
|
|
|
|
} else
|
|
|
|
cxlsd->target[0] = dport;
|
|
|
|
cxld = &cxlsd->cxld;
|
2023-06-14 18:30:13 -07:00
|
|
|
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
|
2023-02-10 01:06:45 -08:00
|
|
|
cxld->flags = CXL_DECODER_F_ENABLE;
|
|
|
|
iter->commit_end = 0;
|
|
|
|
/*
|
|
|
|
* Switch targets 2 endpoints, while host bridge targets
|
|
|
|
* one root port
|
|
|
|
*/
|
|
|
|
if (i == 0)
|
|
|
|
cxld->interleave_ways = 2;
|
|
|
|
else
|
|
|
|
cxld->interleave_ways = 1;
|
cxl/region: Fix x1 root-decoder granularity calculations
Root decoder granularity must match value from CFWMS, which may not
be the region's granularity for non-interleaved root decoders.
So when calculating granularities for host bridge decoders, use the
region's granularity instead of the root decoder's granularity to ensure
the correct granularities are set for the host bridge decoders and any
downstream switch decoders.
Test configuration is 1 host bridge * 2 switches * 2 endpoints per switch.
Region created with 2048 granularity using following command line:
cxl create-region -m -d decoder0.0 -w 4 mem0 mem2 mem1 mem3 \
-g 2048 -s 2048M
Use "cxl list -PDE | grep granularity" to get a view of the granularity
set at each level of the topology.
Before this patch:
"interleave_granularity":2048,
"interleave_granularity":2048,
"interleave_granularity":512,
"interleave_granularity":2048,
"interleave_granularity":2048,
"interleave_granularity":512,
"interleave_granularity":256,
After:
"interleave_granularity":2048,
"interleave_granularity":2048,
"interleave_granularity":4096,
"interleave_granularity":2048,
"interleave_granularity":2048,
"interleave_granularity":4096,
"interleave_granularity":2048,
Fixes: 27b3f8d13830 ("cxl/region: Program target lists")
Cc: <stable@vger.kernel.org>
Signed-off-by: Jim Harris <jim.harris@samsung.com>
Link: https://lore.kernel.org/r/169824893473.1403938.16110924262989774582.stgit@bgt-140510-bm03.eng.stellus.in
[djbw: fixup the prebuilt cxl_test region]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-10-26 10:09:06 -07:00
|
|
|
cxld->interleave_granularity = 4096;
|
2023-02-10 01:06:45 -08:00
|
|
|
cxld->hpa_range = (struct range) {
|
|
|
|
.start = base,
|
|
|
|
.end = base + size - 1,
|
|
|
|
};
|
|
|
|
put_device(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-14 11:41:24 -08:00
|
|
|
static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
|
|
|
|
struct cxl_endpoint_dvsec_info *info)
|
2022-01-31 18:10:04 -08:00
|
|
|
{
|
2022-01-23 16:32:07 -08:00
|
|
|
struct cxl_port *port = cxlhdm->port;
|
|
|
|
struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
|
|
|
|
int target_count, i;
|
|
|
|
|
|
|
|
if (is_cxl_endpoint(port))
|
|
|
|
target_count = 0;
|
|
|
|
else if (is_cxl_root(parent_port))
|
|
|
|
target_count = NR_CXL_ROOT_PORTS;
|
|
|
|
else
|
|
|
|
target_count = NR_CXL_SWITCH_PORTS;
|
|
|
|
|
|
|
|
for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
|
|
|
|
int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
|
|
|
|
struct target_map_ctx ctx = {
|
|
|
|
.target_map = target_map,
|
|
|
|
.target_count = target_count,
|
|
|
|
};
|
|
|
|
struct cxl_decoder *cxld;
|
|
|
|
int rc;
|
|
|
|
|
2022-05-18 17:52:23 -07:00
|
|
|
if (target_count) {
|
|
|
|
struct cxl_switch_decoder *cxlsd;
|
|
|
|
|
|
|
|
cxlsd = cxl_switch_decoder_alloc(port, target_count);
|
|
|
|
if (IS_ERR(cxlsd)) {
|
|
|
|
dev_warn(&port->dev,
|
|
|
|
"Failed to allocate the decoder\n");
|
|
|
|
return PTR_ERR(cxlsd);
|
|
|
|
}
|
|
|
|
cxld = &cxlsd->cxld;
|
|
|
|
} else {
|
2022-05-21 16:24:14 -07:00
|
|
|
struct cxl_endpoint_decoder *cxled;
|
|
|
|
|
|
|
|
cxled = cxl_endpoint_decoder_alloc(port);
|
|
|
|
|
|
|
|
if (IS_ERR(cxled)) {
|
2022-05-18 17:52:23 -07:00
|
|
|
dev_warn(&port->dev,
|
|
|
|
"Failed to allocate the decoder\n");
|
2022-05-21 16:24:14 -07:00
|
|
|
return PTR_ERR(cxled);
|
2022-05-18 17:52:23 -07:00
|
|
|
}
|
2022-05-21 16:24:14 -07:00
|
|
|
cxld = &cxled->cxld;
|
2022-01-23 16:32:07 -08:00
|
|
|
}
|
2022-01-31 18:10:04 -08:00
|
|
|
|
2023-02-10 01:06:45 -08:00
|
|
|
mock_init_hdm_decoder(cxld);
|
2022-01-23 16:32:07 -08:00
|
|
|
|
|
|
|
if (target_count) {
|
2023-06-22 15:55:01 -05:00
|
|
|
rc = device_for_each_child(port->uport_dev, &ctx,
|
2022-01-23 16:32:07 -08:00
|
|
|
map_targets);
|
|
|
|
if (rc) {
|
|
|
|
put_device(&cxld->dev);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
2022-01-31 18:10:04 -08:00
|
|
|
|
2022-01-23 16:32:07 -08:00
|
|
|
rc = cxl_decoder_add_locked(cxld, target_map);
|
|
|
|
if (rc) {
|
|
|
|
put_device(&cxld->dev);
|
|
|
|
dev_err(&port->dev, "Failed to add decoder\n");
|
|
|
|
return rc;
|
|
|
|
}
|
2022-01-31 18:10:04 -08:00
|
|
|
|
2022-01-23 16:32:07 -08:00
|
|
|
rc = cxl_decoder_autoremove(&port->dev, cxld);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
|
|
|
|
}
|
2022-01-31 18:10:04 -08:00
|
|
|
|
2022-01-23 16:32:07 -08:00
|
|
|
return 0;
|
|
|
|
}
|
2022-01-31 18:10:04 -08:00
|
|
|
|
2022-01-23 16:32:07 -08:00
|
|
|
static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
|
|
|
|
{
|
|
|
|
struct platform_device **array;
|
|
|
|
int i, array_size;
|
|
|
|
|
|
|
|
if (port->depth == 1) {
|
2023-06-22 15:55:01 -05:00
|
|
|
if (is_multi_bridge(port->uport_dev)) {
|
2022-11-03 17:30:48 -07:00
|
|
|
array_size = ARRAY_SIZE(cxl_root_port);
|
|
|
|
array = cxl_root_port;
|
2023-06-22 15:55:01 -05:00
|
|
|
} else if (is_single_bridge(port->uport_dev)) {
|
2022-11-03 17:30:48 -07:00
|
|
|
array_size = ARRAY_SIZE(cxl_root_single);
|
|
|
|
array = cxl_root_single;
|
|
|
|
} else {
|
|
|
|
dev_dbg(&port->dev, "%s: unknown bridge type\n",
|
2023-06-22 15:55:01 -05:00
|
|
|
dev_name(port->uport_dev));
|
2022-11-03 17:30:48 -07:00
|
|
|
return -ENXIO;
|
|
|
|
}
|
2022-01-23 16:32:07 -08:00
|
|
|
} else if (port->depth == 2) {
|
2022-11-03 17:30:48 -07:00
|
|
|
struct cxl_port *parent = to_cxl_port(port->dev.parent);
|
|
|
|
|
2023-06-22 15:55:01 -05:00
|
|
|
if (is_multi_bridge(parent->uport_dev)) {
|
2022-11-03 17:30:48 -07:00
|
|
|
array_size = ARRAY_SIZE(cxl_switch_dport);
|
|
|
|
array = cxl_switch_dport;
|
2023-06-22 15:55:01 -05:00
|
|
|
} else if (is_single_bridge(parent->uport_dev)) {
|
2022-11-03 17:30:48 -07:00
|
|
|
array_size = ARRAY_SIZE(cxl_swd_single);
|
|
|
|
array = cxl_swd_single;
|
|
|
|
} else {
|
|
|
|
dev_dbg(&port->dev, "%s: unknown bridge type\n",
|
2023-06-22 15:55:01 -05:00
|
|
|
dev_name(port->uport_dev));
|
2022-11-03 17:30:48 -07:00
|
|
|
return -ENXIO;
|
|
|
|
}
|
2022-01-23 16:32:07 -08:00
|
|
|
} else {
|
|
|
|
dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
|
|
|
|
port->depth);
|
|
|
|
return -ENXIO;
|
2022-01-31 18:10:04 -08:00
|
|
|
}
|
|
|
|
|
2022-01-23 16:32:07 -08:00
|
|
|
for (i = 0; i < array_size; i++) {
|
|
|
|
struct platform_device *pdev = array[i];
|
2022-01-23 16:32:01 -08:00
|
|
|
struct cxl_dport *dport;
|
|
|
|
|
2023-06-22 15:55:01 -05:00
|
|
|
if (pdev->dev.parent != port->uport_dev) {
|
2022-11-03 17:30:48 -07:00
|
|
|
dev_dbg(&port->dev, "%s: mismatch parent %s\n",
|
2023-06-22 15:55:01 -05:00
|
|
|
dev_name(port->uport_dev),
|
2022-11-03 17:30:48 -07:00
|
|
|
dev_name(pdev->dev.parent));
|
2022-01-23 16:32:01 -08:00
|
|
|
continue;
|
2022-11-03 17:30:48 -07:00
|
|
|
}
|
2022-01-23 16:32:01 -08:00
|
|
|
|
|
|
|
dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
|
|
|
|
CXL_RESOURCE_NONE);
|
|
|
|
|
2022-10-18 15:23:32 +02:00
|
|
|
if (IS_ERR(dport))
|
2022-01-23 16:32:01 -08:00
|
|
|
return PTR_ERR(dport);
|
|
|
|
}
|
|
|
|
|
2022-01-31 18:10:04 -08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-02-06 12:03:40 -07:00
|
|
|
/*
|
|
|
|
* Faking the cxl_dpa_perf for the memdev when appropriate.
|
|
|
|
*/
|
|
|
|
static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
|
|
|
|
struct cxl_dpa_perf *dpa_perf)
|
|
|
|
{
|
|
|
|
dpa_perf->qos_class = FAKE_QTG_ID;
|
|
|
|
dpa_perf->dpa_range = *range;
|
2024-04-03 08:47:15 -07:00
|
|
|
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
|
|
|
|
dpa_perf->coord[i].read_latency = 500;
|
|
|
|
dpa_perf->coord[i].write_latency = 500;
|
|
|
|
dpa_perf->coord[i].read_bandwidth = 1000;
|
|
|
|
dpa_perf->coord[i].write_bandwidth = 1000;
|
|
|
|
}
|
2024-02-06 12:03:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
|
|
|
|
{
|
|
|
|
struct cxl_root *cxl_root __free(put_cxl_root) =
|
|
|
|
find_cxl_root(port);
|
|
|
|
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
|
|
|
|
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
2024-04-26 15:47:56 -07:00
|
|
|
struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
|
2024-02-06 12:03:40 -07:00
|
|
|
|
|
|
|
if (!cxl_root)
|
|
|
|
return;
|
|
|
|
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
for (int i = 0; i < cxlds->nr_partitions; i++) {
|
|
|
|
struct resource *res = &cxlds->part[i].res;
|
|
|
|
struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
|
cxl: Introduce to_{ram,pmem}_{res,perf}() helpers
In preparation for consolidating all DPA partition information into an
array of DPA metadata, introduce helpers that hide the layout of the
current data. I.e. make the eventual replacement of ->ram_res,
->pmem_res, ->ram_perf, and ->pmem_perf with a new DPA metadata array a
no-op for code paths that consume that information, and reduce the noise
of follow-on patches.
The end goal is to consolidate all DPA information in 'struct
cxl_dev_state', but for now the helpers just make it appear that all DPA
metadata is relative to @cxlds.
As the conversion to generic partition metadata walking is completed,
these helpers will naturally be eliminated, or reduced in scope.
Cc: Alejandro Lucero <alucerop@amd.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: Fan Ni <fan.ni@samsung.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305238.668823.16553986866633608541.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:12 -08:00
|
|
|
struct range range = {
|
|
|
|
.start = res->start,
|
|
|
|
.end = res->end,
|
|
|
|
};
|
2024-02-06 12:03:40 -07:00
|
|
|
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
dpa_perf_setup(port, &range, perf);
|
cxl: Introduce to_{ram,pmem}_{res,perf}() helpers
In preparation for consolidating all DPA partition information into an
array of DPA metadata, introduce helpers that hide the layout of the
current data. I.e. make the eventual replacement of ->ram_res,
->pmem_res, ->ram_perf, and ->pmem_perf with a new DPA metadata array a
no-op for code paths that consume that information, and reduce the noise
of follow-on patches.
The end goal is to consolidate all DPA information in 'struct
cxl_dev_state', but for now the helpers just make it appear that all DPA
metadata is relative to @cxlds.
As the conversion to generic partition metadata walking is completed,
these helpers will naturally be eliminated, or reduced in scope.
Cc: Alejandro Lucero <alucerop@amd.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: Fan Ni <fan.ni@samsung.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305238.668823.16553986866633608541.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:12 -08:00
|
|
|
}
|
2024-02-06 12:03:40 -07:00
|
|
|
|
|
|
|
cxl_memdev_update_perf(cxlmd);
|
2024-04-26 15:47:56 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is here to only test the topology iterator. It serves
|
|
|
|
* no other purpose.
|
|
|
|
*/
|
|
|
|
cxl_endpoint_get_perf_coordinates(port, ep_c);
|
2024-02-06 12:03:40 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 12:14:22 -07:00
|
|
|
static struct cxl_mock_ops cxl_mock_ops = {
|
|
|
|
.is_mock_adev = is_mock_adev,
|
|
|
|
.is_mock_bridge = is_mock_bridge,
|
|
|
|
.is_mock_bus = is_mock_bus,
|
|
|
|
.is_mock_port = is_mock_port,
|
|
|
|
.is_mock_dev = is_mock_dev,
|
2021-10-29 12:51:53 -07:00
|
|
|
.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
|
2021-09-14 12:14:22 -07:00
|
|
|
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
|
|
|
|
.acpi_pci_find_root = mock_acpi_pci_find_root,
|
2022-01-31 18:10:04 -08:00
|
|
|
.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
|
2022-02-01 12:24:30 -08:00
|
|
|
.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
|
|
|
|
.devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
|
|
|
|
.devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
|
2024-02-06 12:03:40 -07:00
|
|
|
.cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
|
2021-09-14 12:14:22 -07:00
|
|
|
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
|
|
|
|
};
|
|
|
|
|
|
|
|
static void mock_companion(struct acpi_device *adev, struct device *dev)
|
|
|
|
{
|
|
|
|
device_initialize(&adev->dev);
|
|
|
|
fwnode_init(&adev->fwnode, NULL);
|
|
|
|
dev->fwnode = &adev->fwnode;
|
|
|
|
adev->fwnode.dev = dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef SZ_64G
|
|
|
|
#define SZ_64G (SZ_32G * 2)
|
|
|
|
#endif
|
|
|
|
|
2024-10-22 18:44:06 -07:00
|
|
|
static __init int cxl_rch_topo_init(void)
|
2022-12-01 13:34:21 -08:00
|
|
|
{
|
|
|
|
int rc, i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) {
|
|
|
|
int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i;
|
|
|
|
struct acpi_device *adev = &host_bridge[idx];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_host_bridge", idx);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_bridge;
|
|
|
|
|
|
|
|
mock_companion(adev, &pdev->dev);
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_bridge;
|
|
|
|
}
|
|
|
|
|
|
|
|
cxl_rch[i] = pdev;
|
|
|
|
mock_pci_bus[idx].bridge = &pdev->dev;
|
|
|
|
rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
|
|
|
|
"firmware_node");
|
|
|
|
if (rc)
|
|
|
|
goto err_bridge;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_bridge:
|
|
|
|
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
|
|
|
|
struct platform_device *pdev = cxl_rch[i];
|
|
|
|
|
|
|
|
if (!pdev)
|
|
|
|
continue;
|
|
|
|
sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
|
|
|
|
platform_device_unregister(cxl_rch[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2024-10-22 18:44:06 -07:00
|
|
|
static void cxl_rch_topo_exit(void)
|
2022-12-01 13:34:21 -08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
|
|
|
|
struct platform_device *pdev = cxl_rch[i];
|
|
|
|
|
|
|
|
if (!pdev)
|
|
|
|
continue;
|
|
|
|
sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
|
|
|
|
platform_device_unregister(cxl_rch[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-22 18:44:06 -07:00
|
|
|
static __init int cxl_single_topo_init(void)
|
2022-11-03 17:30:48 -07:00
|
|
|
{
|
|
|
|
int i, rc;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
|
|
|
|
struct acpi_device *adev =
|
|
|
|
&host_bridge[NR_CXL_HOST_BRIDGES + i];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_host_bridge",
|
|
|
|
NR_CXL_HOST_BRIDGES + i);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_bridge;
|
|
|
|
|
|
|
|
mock_companion(adev, &pdev->dev);
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_bridge;
|
|
|
|
}
|
|
|
|
|
|
|
|
cxl_hb_single[i] = pdev;
|
2022-11-14 10:29:52 -08:00
|
|
|
mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev;
|
2022-11-03 17:30:48 -07:00
|
|
|
rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
|
|
|
|
"physical_node");
|
|
|
|
if (rc)
|
|
|
|
goto err_bridge;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
|
|
|
|
struct platform_device *bridge =
|
|
|
|
cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_root_port",
|
|
|
|
NR_MULTI_ROOT + i);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_port;
|
|
|
|
pdev->dev.parent = &bridge->dev;
|
|
|
|
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_port;
|
|
|
|
}
|
|
|
|
cxl_root_single[i] = pdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
|
|
|
|
struct platform_device *root_port = cxl_root_single[i];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_switch_uport",
|
|
|
|
NR_MULTI_ROOT + i);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_uport;
|
|
|
|
pdev->dev.parent = &root_port->dev;
|
|
|
|
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_uport;
|
|
|
|
}
|
|
|
|
cxl_swu_single[i] = pdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
|
|
|
|
struct platform_device *uport =
|
|
|
|
cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_switch_dport",
|
|
|
|
i + NR_MEM_MULTI);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_dport;
|
|
|
|
pdev->dev.parent = &uport->dev;
|
|
|
|
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_dport;
|
|
|
|
}
|
|
|
|
cxl_swd_single[i] = pdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_dport:
|
|
|
|
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_swd_single[i]);
|
|
|
|
err_uport:
|
|
|
|
for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_swu_single[i]);
|
|
|
|
err_port:
|
|
|
|
for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_root_single[i]);
|
|
|
|
err_bridge:
|
|
|
|
for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
|
|
|
|
struct platform_device *pdev = cxl_hb_single[i];
|
|
|
|
|
|
|
|
if (!pdev)
|
|
|
|
continue;
|
|
|
|
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
|
|
|
|
platform_device_unregister(cxl_hb_single[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2024-10-22 18:44:06 -07:00
|
|
|
static void cxl_single_topo_exit(void)
|
2022-11-03 17:30:48 -07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_swd_single[i]);
|
|
|
|
for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_swu_single[i]);
|
|
|
|
for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_root_single[i]);
|
|
|
|
for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
|
|
|
|
struct platform_device *pdev = cxl_hb_single[i];
|
|
|
|
|
|
|
|
if (!pdev)
|
|
|
|
continue;
|
|
|
|
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
|
|
|
|
platform_device_unregister(cxl_hb_single[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-22 18:44:06 -07:00
|
|
|
static void cxl_mem_exit(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_rcd[i]);
|
|
|
|
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_mem_single[i]);
|
|
|
|
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_mem[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_mem_init(void)
|
|
|
|
{
|
|
|
|
int i, rc;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
|
|
|
|
struct platform_device *dport = cxl_switch_dport[i];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_mem", i);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_mem;
|
|
|
|
pdev->dev.parent = &dport->dev;
|
|
|
|
set_dev_node(&pdev->dev, i % 2);
|
|
|
|
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_mem;
|
|
|
|
}
|
|
|
|
cxl_mem[i] = pdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
|
|
|
|
struct platform_device *dport = cxl_swd_single[i];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_single;
|
|
|
|
pdev->dev.parent = &dport->dev;
|
|
|
|
set_dev_node(&pdev->dev, i % 2);
|
|
|
|
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_single;
|
|
|
|
}
|
|
|
|
cxl_mem_single[i] = pdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
|
|
|
|
int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
|
|
|
|
struct platform_device *rch = cxl_rch[i];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_rcd", idx);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_rcd;
|
|
|
|
pdev->dev.parent = &rch->dev;
|
|
|
|
set_dev_node(&pdev->dev, i % 2);
|
|
|
|
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_rcd;
|
|
|
|
}
|
|
|
|
cxl_rcd[i] = pdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_rcd:
|
|
|
|
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_rcd[i]);
|
|
|
|
err_single:
|
|
|
|
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_mem_single[i]);
|
|
|
|
err_mem:
|
|
|
|
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_mem[i]);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-09-14 12:14:22 -07:00
|
|
|
static __init int cxl_test_init(void)
|
|
|
|
{
|
|
|
|
int rc, i;
|
2025-05-27 16:34:51 +01:00
|
|
|
struct range mappable;
|
2021-09-14 12:14:22 -07:00
|
|
|
|
2022-12-13 08:44:24 -08:00
|
|
|
cxl_acpi_test();
|
|
|
|
cxl_core_test();
|
|
|
|
cxl_mem_test();
|
|
|
|
cxl_pmem_test();
|
|
|
|
cxl_port_test();
|
|
|
|
|
2021-09-14 12:14:22 -07:00
|
|
|
register_cxl_mock_ops(&cxl_mock_ops);
|
|
|
|
|
|
|
|
cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
|
|
|
|
if (!cxl_mock_pool) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_gen_pool_create;
|
|
|
|
}
|
2025-05-27 16:34:51 +01:00
|
|
|
mappable = mhp_get_pluggable_range(true);
|
2021-09-14 12:14:22 -07:00
|
|
|
|
2025-05-27 16:34:51 +01:00
|
|
|
rc = gen_pool_add(cxl_mock_pool,
|
|
|
|
min(iomem_resource.end + 1 - SZ_64G,
|
|
|
|
mappable.end + 1 - SZ_64G),
|
2022-06-22 18:02:37 -07:00
|
|
|
SZ_64G, NUMA_NO_NODE);
|
2021-09-14 12:14:22 -07:00
|
|
|
if (rc)
|
|
|
|
goto err_gen_pool_add;
|
|
|
|
|
2022-11-30 14:47:26 -08:00
|
|
|
if (interleave_arithmetic == 1) {
|
|
|
|
cfmws_start = CFMWS_XOR_ARRAY_START;
|
|
|
|
cfmws_end = CFMWS_XOR_ARRAY_END;
|
|
|
|
} else {
|
|
|
|
cfmws_start = CFMWS_MOD_ARRAY_START;
|
|
|
|
cfmws_end = CFMWS_MOD_ARRAY_END;
|
|
|
|
}
|
|
|
|
|
2021-09-14 12:14:22 -07:00
|
|
|
rc = populate_cedt();
|
|
|
|
if (rc)
|
|
|
|
goto err_populate;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
|
|
|
|
struct acpi_device *adev = &host_bridge[i];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_host_bridge", i);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_bridge;
|
|
|
|
|
|
|
|
mock_companion(adev, &pdev->dev);
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_bridge;
|
|
|
|
}
|
2022-01-23 16:32:12 -08:00
|
|
|
|
2021-09-14 12:14:22 -07:00
|
|
|
cxl_host_bridge[i] = pdev;
|
2022-11-14 10:29:52 -08:00
|
|
|
mock_pci_bus[i].bridge = &pdev->dev;
|
2022-01-23 16:32:12 -08:00
|
|
|
rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
|
|
|
|
"physical_node");
|
|
|
|
if (rc)
|
|
|
|
goto err_bridge;
|
2021-09-14 12:14:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
|
|
|
|
struct platform_device *bridge =
|
2022-01-23 16:31:56 -08:00
|
|
|
cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
|
2021-09-14 12:14:22 -07:00
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_root_port", i);
|
|
|
|
if (!pdev)
|
|
|
|
goto err_port;
|
|
|
|
pdev->dev.parent = &bridge->dev;
|
|
|
|
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_port;
|
|
|
|
}
|
|
|
|
cxl_root_port[i] = pdev;
|
|
|
|
}
|
|
|
|
|
2022-01-23 16:32:01 -08:00
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
|
|
|
|
struct platform_device *root_port = cxl_root_port[i];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_switch_uport", i);
|
|
|
|
if (!pdev)
|
2022-11-03 17:30:42 -07:00
|
|
|
goto err_uport;
|
2022-01-23 16:32:01 -08:00
|
|
|
pdev->dev.parent = &root_port->dev;
|
|
|
|
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_uport;
|
|
|
|
}
|
|
|
|
cxl_switch_uport[i] = pdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
|
|
|
|
struct platform_device *uport =
|
|
|
|
cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc("cxl_switch_dport", i);
|
|
|
|
if (!pdev)
|
2022-11-03 17:30:42 -07:00
|
|
|
goto err_dport;
|
2022-01-23 16:32:01 -08:00
|
|
|
pdev->dev.parent = &uport->dev;
|
|
|
|
|
|
|
|
rc = platform_device_add(pdev);
|
|
|
|
if (rc) {
|
|
|
|
platform_device_put(pdev);
|
|
|
|
goto err_dport;
|
|
|
|
}
|
|
|
|
cxl_switch_dport[i] = pdev;
|
|
|
|
}
|
|
|
|
|
2024-10-22 18:44:06 -07:00
|
|
|
rc = cxl_single_topo_init();
|
2022-11-03 17:30:48 -07:00
|
|
|
if (rc)
|
2024-10-22 18:44:06 -07:00
|
|
|
goto err_dport;
|
2022-11-03 17:30:48 -07:00
|
|
|
|
2024-10-22 18:44:06 -07:00
|
|
|
rc = cxl_rch_topo_init();
|
2022-12-01 13:34:21 -08:00
|
|
|
if (rc)
|
|
|
|
goto err_single;
|
|
|
|
|
2021-09-14 12:14:22 -07:00
|
|
|
cxl_acpi = platform_device_alloc("cxl_acpi", 0);
|
|
|
|
if (!cxl_acpi)
|
2022-12-01 13:34:21 -08:00
|
|
|
goto err_rch;
|
2021-09-14 12:14:22 -07:00
|
|
|
|
|
|
|
mock_companion(&acpi0017_mock, &cxl_acpi->dev);
|
|
|
|
acpi0017_mock.dev.bus = &platform_bus_type;
|
|
|
|
|
|
|
|
rc = platform_device_add(cxl_acpi);
|
|
|
|
if (rc)
|
2024-10-22 18:44:06 -07:00
|
|
|
goto err_root;
|
|
|
|
|
|
|
|
rc = cxl_mem_init();
|
|
|
|
if (rc)
|
|
|
|
goto err_root;
|
2021-09-14 12:14:22 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2024-10-22 18:44:06 -07:00
|
|
|
err_root:
|
2021-09-14 12:14:22 -07:00
|
|
|
platform_device_put(cxl_acpi);
|
2022-12-01 13:34:21 -08:00
|
|
|
err_rch:
|
2024-10-22 18:44:06 -07:00
|
|
|
cxl_rch_topo_exit();
|
2022-11-03 17:30:48 -07:00
|
|
|
err_single:
|
2024-10-22 18:44:06 -07:00
|
|
|
cxl_single_topo_exit();
|
2022-01-23 16:32:01 -08:00
|
|
|
err_dport:
|
|
|
|
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_switch_dport[i]);
|
|
|
|
err_uport:
|
|
|
|
for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_switch_uport[i]);
|
2021-09-14 12:14:22 -07:00
|
|
|
err_port:
|
|
|
|
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_root_port[i]);
|
|
|
|
err_bridge:
|
2022-01-23 16:32:12 -08:00
|
|
|
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
|
|
|
|
struct platform_device *pdev = cxl_host_bridge[i];
|
|
|
|
|
|
|
|
if (!pdev)
|
|
|
|
continue;
|
|
|
|
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
|
2021-09-14 12:14:22 -07:00
|
|
|
platform_device_unregister(cxl_host_bridge[i]);
|
2022-01-23 16:32:12 -08:00
|
|
|
}
|
2021-09-14 12:14:22 -07:00
|
|
|
err_populate:
|
|
|
|
depopulate_all_mock_resources();
|
|
|
|
err_gen_pool_add:
|
|
|
|
gen_pool_destroy(cxl_mock_pool);
|
|
|
|
err_gen_pool_create:
|
|
|
|
unregister_cxl_mock_ops(&cxl_mock_ops);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __exit void cxl_test_exit(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2024-10-22 18:44:06 -07:00
|
|
|
cxl_mem_exit();
|
2021-09-14 12:14:22 -07:00
|
|
|
platform_device_unregister(cxl_acpi);
|
2024-10-22 18:44:06 -07:00
|
|
|
cxl_rch_topo_exit();
|
|
|
|
cxl_single_topo_exit();
|
2022-01-23 16:32:01 -08:00
|
|
|
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_switch_dport[i]);
|
|
|
|
for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_switch_uport[i]);
|
2021-09-14 12:14:22 -07:00
|
|
|
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
|
|
|
|
platform_device_unregister(cxl_root_port[i]);
|
2022-01-23 16:32:12 -08:00
|
|
|
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
|
|
|
|
struct platform_device *pdev = cxl_host_bridge[i];
|
|
|
|
|
|
|
|
if (!pdev)
|
|
|
|
continue;
|
|
|
|
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
|
2021-09-14 12:14:22 -07:00
|
|
|
platform_device_unregister(cxl_host_bridge[i]);
|
2022-01-23 16:32:12 -08:00
|
|
|
}
|
2021-09-14 12:14:22 -07:00
|
|
|
depopulate_all_mock_resources();
|
|
|
|
gen_pool_destroy(cxl_mock_pool);
|
|
|
|
unregister_cxl_mock_ops(&cxl_mock_ops);
|
|
|
|
}
|
|
|
|
|
2023-01-26 09:05:55 -08:00
|
|
|
module_param(interleave_arithmetic, int, 0444);
|
2022-11-30 14:47:26 -08:00
|
|
|
MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
|
2021-09-14 12:14:22 -07:00
|
|
|
module_init(cxl_test_init);
|
|
|
|
module_exit(cxl_test_exit);
|
|
|
|
MODULE_LICENSE("GPL v2");
|
2025-04-29 16:59:53 -07:00
|
|
|
MODULE_DESCRIPTION("cxl_test: setup module");
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
MODULE_IMPORT_NS("ACPI");
|
|
|
|
MODULE_IMPORT_NS("CXL");
|