2021-09-08 22:12:32 -07:00
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
|
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
|
|
|
|
|
#include <linux/security.h>
|
|
|
|
|
#include <linux/debugfs.h>
|
2023-01-30 15:13:27 +00:00
|
|
|
|
#include <linux/ktime.h>
|
2021-09-08 22:12:32 -07:00
|
|
|
|
#include <linux/mutex.h>
|
2024-10-01 15:35:57 -04:00
|
|
|
|
#include <linux/unaligned.h>
|
2023-04-18 10:39:03 -07:00
|
|
|
|
#include <cxlpci.h>
|
2021-09-08 22:12:32 -07:00
|
|
|
|
#include <cxlmem.h>
|
|
|
|
|
#include <cxl.h>
|
|
|
|
|
|
|
|
|
|
#include "core.h"
|
2023-01-17 21:53:36 -08:00
|
|
|
|
#include "trace.h"
|
2025-02-26 09:21:21 -07:00
|
|
|
|
#include "mce.h"
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
|
|
|
|
static bool cxl_raw_allow_all;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* DOC: cxl mbox
|
|
|
|
|
*
|
|
|
|
|
* Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
|
|
|
|
|
* implementation is used by the cxl_pci driver to initialize the device
|
|
|
|
|
* and implement the cxl_mem.h IOCTL UAPI. It also implements the
|
|
|
|
|
* backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#define cxl_for_each_cmd(cmd) \
|
|
|
|
|
for ((cmd) = &cxl_mem_commands[0]; \
|
|
|
|
|
((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
|
|
|
|
|
|
|
|
|
|
#define CXL_CMD(_id, sin, sout, _flags) \
|
|
|
|
|
[CXL_MEM_COMMAND_ID_##_id] = { \
|
|
|
|
|
.info = { \
|
|
|
|
|
.id = CXL_MEM_COMMAND_ID_##_id, \
|
|
|
|
|
.size_in = sin, \
|
|
|
|
|
.size_out = sout, \
|
|
|
|
|
}, \
|
|
|
|
|
.opcode = CXL_MBOX_OP_##_id, \
|
|
|
|
|
.flags = _flags, \
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-13 22:12:46 -07:00
|
|
|
|
#define CXL_VARIABLE_PAYLOAD ~0U
|
2021-09-08 22:12:32 -07:00
|
|
|
|
/*
|
|
|
|
|
* This table defines the supported mailbox commands for the driver. This table
|
|
|
|
|
* is made up of a UAPI structure. Non-negative values as parameters in the
|
|
|
|
|
* table will be validated against the user's input. For example, if size_in is
|
|
|
|
|
* 0, and the user passed in 1, it is an error.
|
|
|
|
|
*/
|
|
|
|
|
static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
|
|
|
|
|
CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
|
|
|
|
|
#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
|
2022-04-13 22:12:46 -07:00
|
|
|
|
CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
|
2021-09-08 22:12:32 -07:00
|
|
|
|
#endif
|
2022-04-13 22:12:46 -07:00
|
|
|
|
CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
|
2021-09-08 22:12:32 -07:00
|
|
|
|
CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
|
|
|
|
|
CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
|
2022-04-13 22:12:46 -07:00
|
|
|
|
CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
|
2021-09-08 22:12:32 -07:00
|
|
|
|
CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
|
2022-04-13 22:12:46 -07:00
|
|
|
|
CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
|
2024-03-13 12:42:17 +05:30
|
|
|
|
CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0),
|
2024-03-13 12:42:18 +05:30
|
|
|
|
CXL_CMD(CLEAR_LOG, 0x10, 0, 0),
|
2024-03-13 12:42:17 +05:30
|
|
|
|
CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0),
|
2021-09-08 22:12:32 -07:00
|
|
|
|
CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
|
2022-04-13 22:12:46 -07:00
|
|
|
|
CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
|
2021-09-08 22:12:32 -07:00
|
|
|
|
CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
|
|
|
|
|
CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
|
|
|
|
|
CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
|
|
|
|
|
CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
|
|
|
|
|
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
|
2023-08-29 08:20:14 -07:00
|
|
|
|
CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
|
2021-09-08 22:12:32 -07:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Commands that RAW doesn't permit. The rationale for each:
|
|
|
|
|
*
|
|
|
|
|
* CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
|
|
|
|
|
* coordination of transaction timeout values at the root bridge level.
|
|
|
|
|
*
|
|
|
|
|
* CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
|
|
|
|
|
* and needs to be coordinated with HDM updates.
|
|
|
|
|
*
|
|
|
|
|
* CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
|
|
|
|
|
* driver and any writes from userspace invalidates those contents.
|
|
|
|
|
*
|
|
|
|
|
* CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
|
|
|
|
|
* to the device after it is marked clean, userspace can not make that
|
|
|
|
|
* assertion.
|
|
|
|
|
*
|
|
|
|
|
* CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
|
|
|
|
|
* is kept up to date with patrol notifications and error management.
|
2023-04-18 10:39:02 -07:00
|
|
|
|
*
|
|
|
|
|
* CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
|
|
|
|
|
* driver orchestration for safety.
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*/
|
|
|
|
|
static u16 cxl_disabled_raw_commands[] = {
|
|
|
|
|
CXL_MBOX_OP_ACTIVATE_FW,
|
|
|
|
|
CXL_MBOX_OP_SET_PARTITION_INFO,
|
|
|
|
|
CXL_MBOX_OP_SET_LSA,
|
|
|
|
|
CXL_MBOX_OP_SET_SHUTDOWN_STATE,
|
|
|
|
|
CXL_MBOX_OP_SCAN_MEDIA,
|
|
|
|
|
CXL_MBOX_OP_GET_SCAN_MEDIA,
|
2023-04-18 10:39:02 -07:00
|
|
|
|
CXL_MBOX_OP_GET_POISON,
|
|
|
|
|
CXL_MBOX_OP_INJECT_POISON,
|
|
|
|
|
CXL_MBOX_OP_CLEAR_POISON,
|
2021-09-08 22:12:32 -07:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Command sets that RAW doesn't permit. All opcodes in this set are
|
|
|
|
|
* disabled because they pass plain text security payloads over the
|
|
|
|
|
* user/kernel boundary. This functionality is intended to be wrapped
|
|
|
|
|
* behind the keys ABI which allows for encrypted payloads in the UAPI
|
|
|
|
|
*/
|
|
|
|
|
static u8 security_command_sets[] = {
|
|
|
|
|
0x44, /* Sanitize */
|
|
|
|
|
0x45, /* Persistent Memory Data-at-rest Security */
|
|
|
|
|
0x46, /* Security Passthrough */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static bool cxl_is_security_command(u16 opcode)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
|
|
|
|
|
if (security_command_sets[i] == (opcode >> 8))
|
|
|
|
|
return true;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-25 22:19:40 -07:00
|
|
|
|
static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
|
|
|
|
|
u16 opcode)
|
|
|
|
|
{
|
|
|
|
|
switch (opcode) {
|
|
|
|
|
case CXL_MBOX_OP_SANITIZE:
|
|
|
|
|
set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_SECURE_ERASE:
|
|
|
|
|
set_bit(CXL_SEC_ENABLED_SECURE_ERASE,
|
|
|
|
|
security->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_GET_SECURITY_STATE:
|
|
|
|
|
set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE,
|
|
|
|
|
security->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_SET_PASSPHRASE:
|
|
|
|
|
set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE,
|
|
|
|
|
security->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_DISABLE_PASSPHRASE:
|
|
|
|
|
set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
|
|
|
|
|
security->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_UNLOCK:
|
|
|
|
|
set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_FREEZE_SECURITY:
|
|
|
|
|
set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY,
|
|
|
|
|
security->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
|
|
|
|
|
set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
|
|
|
|
|
security->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-18 10:39:03 -07:00
|
|
|
|
static bool cxl_is_poison_command(u16 opcode)
|
|
|
|
|
{
|
|
|
|
|
#define CXL_MBOX_OP_POISON_CMDS 0x43
|
|
|
|
|
|
|
|
|
|
if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison,
|
|
|
|
|
u16 opcode)
|
|
|
|
|
{
|
|
|
|
|
switch (opcode) {
|
|
|
|
|
case CXL_MBOX_OP_GET_POISON:
|
|
|
|
|
set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_INJECT_POISON:
|
|
|
|
|
set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_CLEAR_POISON:
|
|
|
|
|
set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS:
|
|
|
|
|
set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_SCAN_MEDIA:
|
|
|
|
|
set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
case CXL_MBOX_OP_GET_SCAN_MEDIA:
|
|
|
|
|
set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
|
|
|
|
|
{
|
|
|
|
|
struct cxl_mem_command *c;
|
|
|
|
|
|
|
|
|
|
cxl_for_each_cmd(c)
|
|
|
|
|
if (c->opcode == opcode)
|
|
|
|
|
return c;
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-30 18:27:15 -07:00
|
|
|
|
static const char *cxl_mem_opcode_to_name(u16 opcode)
|
|
|
|
|
{
|
|
|
|
|
struct cxl_mem_command *c;
|
|
|
|
|
|
|
|
|
|
c = cxl_mem_find_command(opcode);
|
|
|
|
|
if (!c)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
return cxl_command_names[c->info.id].name;
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
/**
|
2022-12-05 20:22:33 -08:00
|
|
|
|
* cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
|
2024-09-05 15:35:47 -07:00
|
|
|
|
* @cxl_mbox: CXL mailbox context
|
2022-12-05 20:22:33 -08:00
|
|
|
|
* @mbox_cmd: initialized command to execute
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*
|
2022-04-03 19:12:13 -07:00
|
|
|
|
* Context: Any context.
|
2021-09-08 22:12:32 -07:00
|
|
|
|
* Return:
|
|
|
|
|
* * %>=0 - Number of bytes returned in @out.
|
|
|
|
|
* * %-E2BIG - Payload is too large for hardware.
|
|
|
|
|
* * %-EBUSY - Couldn't acquire exclusive mailbox access.
|
|
|
|
|
* * %-EFAULT - Hardware error occurred.
|
|
|
|
|
* * %-ENXIO - Command completed, but device reported an error.
|
|
|
|
|
* * %-EIO - Unexpected output size.
|
|
|
|
|
*
|
|
|
|
|
* Mailbox commands may execute successfully yet the device itself reported an
|
|
|
|
|
* error. While this distinction can be useful for commands from userspace, the
|
|
|
|
|
* kernel will only be able to use results when both are successful.
|
|
|
|
|
*/
|
2024-09-05 15:35:47 -07:00
|
|
|
|
int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
|
2022-12-05 20:22:33 -08:00
|
|
|
|
struct cxl_mbox_cmd *mbox_cmd)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2022-12-05 20:22:39 -08:00
|
|
|
|
size_t out_size, min_out;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
int rc;
|
|
|
|
|
|
2024-09-05 15:35:46 -07:00
|
|
|
|
if (mbox_cmd->size_in > cxl_mbox->payload_size ||
|
|
|
|
|
mbox_cmd->size_out > cxl_mbox->payload_size)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
2022-12-05 20:22:33 -08:00
|
|
|
|
out_size = mbox_cmd->size_out;
|
2022-12-05 20:22:39 -08:00
|
|
|
|
min_out = mbox_cmd->min_out;
|
2024-09-05 15:35:46 -07:00
|
|
|
|
rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
|
2023-01-19 10:49:34 +01:00
|
|
|
|
/*
|
|
|
|
|
* EIO is reserved for a payload size mismatch and mbox_send()
|
|
|
|
|
* may not return this error.
|
|
|
|
|
*/
|
|
|
|
|
if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO"))
|
|
|
|
|
return -ENXIO;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (rc)
|
|
|
|
|
return rc;
|
|
|
|
|
|
2023-05-23 10:09:27 -07:00
|
|
|
|
if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
|
|
|
|
|
mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
|
2022-12-05 20:22:33 -08:00
|
|
|
|
return cxl_mbox_cmd_rc2errno(mbox_cmd);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2022-12-05 20:22:39 -08:00
|
|
|
|
if (!out_size)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
/*
|
2022-12-05 20:22:39 -08:00
|
|
|
|
* Variable sized output needs to at least satisfy the caller's
|
|
|
|
|
* minimum if not the fully requested size.
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*/
|
2022-12-05 20:22:39 -08:00
|
|
|
|
if (min_out == 0)
|
|
|
|
|
min_out = out_size;
|
|
|
|
|
|
|
|
|
|
if (mbox_cmd->size_out < min_out)
|
|
|
|
|
return -EIO;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, "CXL");
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
|
|
|
|
static bool cxl_mem_raw_command_allowed(u16 opcode)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
if (security_locked_down(LOCKDOWN_PCI_ACCESS))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
if (cxl_raw_allow_all)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
if (cxl_is_security_command(opcode))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
|
|
|
|
|
if (cxl_disabled_raw_commands[i] == opcode)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-30 18:27:18 -07:00
|
|
|
|
/**
|
|
|
|
|
* cxl_payload_from_user_allowed() - Check contents of in_payload.
|
|
|
|
|
* @opcode: The mailbox command opcode.
|
|
|
|
|
* @payload_in: Pointer to the input payload passed in from user space.
|
|
|
|
|
*
|
|
|
|
|
* Return:
|
|
|
|
|
* * true - payload_in passes check for @opcode.
|
|
|
|
|
* * false - payload_in contains invalid or unsupported values.
|
|
|
|
|
*
|
|
|
|
|
* The driver may inspect payload contents before sending a mailbox
|
|
|
|
|
* command from user space to the device. The intent is to reject
|
|
|
|
|
* commands with input payloads that are known to be unsafe. This
|
|
|
|
|
* check is not intended to replace the users careful selection of
|
|
|
|
|
* mailbox command parameters and makes no guarantee that the user
|
|
|
|
|
* command will succeed, nor that it is appropriate.
|
|
|
|
|
*
|
|
|
|
|
* The specific checks are determined by the opcode.
|
|
|
|
|
*/
|
|
|
|
|
static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
|
|
|
|
|
{
|
|
|
|
|
switch (opcode) {
|
|
|
|
|
case CXL_MBOX_OP_SET_PARTITION_INFO: {
|
|
|
|
|
struct cxl_mbox_set_partition_info *pi = payload_in;
|
|
|
|
|
|
2022-04-28 12:38:25 +03:00
|
|
|
|
if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
|
2022-03-30 18:27:18 -07:00
|
|
|
|
return false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2024-03-13 12:42:18 +05:30
|
|
|
|
case CXL_MBOX_OP_CLEAR_LOG: {
|
|
|
|
|
const uuid_t *uuid = (uuid_t *)payload_in;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Restrict the ‘Clear log’ action to only apply to
|
|
|
|
|
* Vendor debug logs.
|
|
|
|
|
*/
|
|
|
|
|
return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID);
|
|
|
|
|
}
|
2022-03-30 18:27:18 -07:00
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2025-02-04 15:03:02 -07:00
|
|
|
|
static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd,
|
|
|
|
|
struct cxl_mailbox *cxl_mbox, u16 opcode,
|
2022-03-30 18:27:13 -07:00
|
|
|
|
size_t in_size, size_t out_size, u64 in_payload)
|
|
|
|
|
{
|
2025-02-04 15:03:02 -07:00
|
|
|
|
*mbox_cmd = (struct cxl_mbox_cmd) {
|
2022-03-30 18:27:13 -07:00
|
|
|
|
.opcode = opcode,
|
|
|
|
|
.size_in = in_size,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if (in_size) {
|
2025-02-04 15:03:02 -07:00
|
|
|
|
mbox_cmd->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
|
|
|
|
|
in_size);
|
|
|
|
|
if (IS_ERR(mbox_cmd->payload_in))
|
|
|
|
|
return PTR_ERR(mbox_cmd->payload_in);
|
2022-03-30 18:27:18 -07:00
|
|
|
|
|
2025-02-04 15:03:02 -07:00
|
|
|
|
if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) {
|
|
|
|
|
dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n",
|
2022-03-30 18:27:18 -07:00
|
|
|
|
cxl_mem_opcode_to_name(opcode));
|
2025-02-04 15:03:02 -07:00
|
|
|
|
kvfree(mbox_cmd->payload_in);
|
2022-03-30 18:27:18 -07:00
|
|
|
|
return -EBUSY;
|
|
|
|
|
}
|
2022-03-30 18:27:13 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Prepare to handle a full payload for variable sized output */
|
2022-04-13 22:12:46 -07:00
|
|
|
|
if (out_size == CXL_VARIABLE_PAYLOAD)
|
2025-02-04 15:03:02 -07:00
|
|
|
|
mbox_cmd->size_out = cxl_mbox->payload_size;
|
2022-03-30 18:27:13 -07:00
|
|
|
|
else
|
2025-02-04 15:03:02 -07:00
|
|
|
|
mbox_cmd->size_out = out_size;
|
2022-03-30 18:27:13 -07:00
|
|
|
|
|
2025-02-04 15:03:02 -07:00
|
|
|
|
if (mbox_cmd->size_out) {
|
|
|
|
|
mbox_cmd->payload_out = kvzalloc(mbox_cmd->size_out, GFP_KERNEL);
|
|
|
|
|
if (!mbox_cmd->payload_out) {
|
|
|
|
|
kvfree(mbox_cmd->payload_in);
|
2022-03-30 18:27:13 -07:00
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
|
|
|
|
|
{
|
|
|
|
|
kvfree(mbox->payload_in);
|
|
|
|
|
kvfree(mbox->payload_out);
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-30 18:27:11 -07:00
|
|
|
|
static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
|
|
|
|
|
const struct cxl_send_command *send_cmd,
|
2025-02-04 15:03:02 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2022-03-30 18:27:11 -07:00
|
|
|
|
if (send_cmd->raw.rsvd)
|
|
|
|
|
return -EINVAL;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
|
|
|
|
/*
|
2022-03-30 18:27:11 -07:00
|
|
|
|
* Unlike supported commands, the output size of RAW commands
|
|
|
|
|
* gets passed along without further checking, so it must be
|
|
|
|
|
* validated here.
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*/
|
2024-09-05 15:35:46 -07:00
|
|
|
|
if (send_cmd->out.size > cxl_mbox->payload_size)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2022-03-30 18:27:11 -07:00
|
|
|
|
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
|
|
|
|
|
return -EPERM;
|
|
|
|
|
|
2025-02-04 15:03:02 -07:00
|
|
|
|
dev_WARN_ONCE(cxl_mbox->host, true, "raw command path used\n");
|
2022-03-30 18:27:12 -07:00
|
|
|
|
|
2022-03-30 18:27:11 -07:00
|
|
|
|
*mem_cmd = (struct cxl_mem_command) {
|
|
|
|
|
.info = {
|
|
|
|
|
.id = CXL_MEM_COMMAND_ID_RAW,
|
|
|
|
|
.size_in = send_cmd->in.size,
|
|
|
|
|
.size_out = send_cmd->out.size,
|
|
|
|
|
},
|
|
|
|
|
.opcode = send_cmd->raw.opcode
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
|
|
|
|
|
const struct cxl_send_command *send_cmd,
|
2025-02-04 15:03:02 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox)
|
2022-03-30 18:27:11 -07:00
|
|
|
|
{
|
|
|
|
|
struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
|
|
|
|
|
const struct cxl_command_info *info = &c->info;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
|
|
|
|
if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (send_cmd->rsvd)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (send_cmd->in.rsvd || send_cmd->out.rsvd)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/* Check that the command is enabled for hardware */
|
2025-02-04 15:03:02 -07:00
|
|
|
|
if (!test_bit(info->id, cxl_mbox->enabled_cmds))
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return -ENOTTY;
|
|
|
|
|
|
2021-09-14 12:03:04 -07:00
|
|
|
|
/* Check that the command is not claimed for exclusive kernel use */
|
2025-02-04 15:03:02 -07:00
|
|
|
|
if (test_bit(info->id, cxl_mbox->exclusive_cmds))
|
2021-09-14 12:03:04 -07:00
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
/* Check the input buffer is the expected size */
|
2022-06-28 16:01:09 -06:00
|
|
|
|
if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
|
|
|
|
|
(info->size_in != send_cmd->in.size))
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
/* Check the output buffer is at least large enough */
|
2022-06-28 16:01:09 -06:00
|
|
|
|
if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
|
|
|
|
|
(send_cmd->out.size < info->size_out))
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2022-03-30 18:27:11 -07:00
|
|
|
|
*mem_cmd = (struct cxl_mem_command) {
|
|
|
|
|
.info = {
|
|
|
|
|
.id = info->id,
|
|
|
|
|
.flags = info->flags,
|
|
|
|
|
.size_in = send_cmd->in.size,
|
|
|
|
|
.size_out = send_cmd->out.size,
|
|
|
|
|
},
|
|
|
|
|
.opcode = c->opcode
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
|
2022-03-30 18:27:14 -07:00
|
|
|
|
* @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
|
2025-02-04 15:03:02 -07:00
|
|
|
|
* @cxl_mbox: CXL mailbox context
|
2022-03-30 18:27:11 -07:00
|
|
|
|
* @send_cmd: &struct cxl_send_command copied in from userspace.
|
|
|
|
|
*
|
|
|
|
|
* Return:
|
|
|
|
|
* * %0 - @out_cmd is ready to send.
|
|
|
|
|
* * %-ENOTTY - Invalid command specified.
|
|
|
|
|
* * %-EINVAL - Reserved fields or invalid values were used.
|
|
|
|
|
* * %-ENOMEM - Input or output buffer wasn't sized properly.
|
|
|
|
|
* * %-EPERM - Attempted to use a protected command.
|
|
|
|
|
* * %-EBUSY - Kernel has claimed exclusive access to this opcode
|
|
|
|
|
*
|
2022-03-30 18:27:17 -07:00
|
|
|
|
* The result of this command is a fully validated command in @mbox_cmd that is
|
2022-03-30 18:27:11 -07:00
|
|
|
|
* safe to send to the hardware.
|
|
|
|
|
*/
|
2022-03-30 18:27:14 -07:00
|
|
|
|
static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
|
2025-02-04 15:03:02 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox,
|
2022-03-30 18:27:17 -07:00
|
|
|
|
const struct cxl_send_command *send_cmd)
|
2022-03-30 18:27:11 -07:00
|
|
|
|
{
|
2022-03-30 18:27:17 -07:00
|
|
|
|
struct cxl_mem_command mem_cmd;
|
2022-03-30 18:27:14 -07:00
|
|
|
|
int rc;
|
|
|
|
|
|
2022-03-30 18:27:11 -07:00
|
|
|
|
if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
|
|
|
|
|
return -ENOTTY;
|
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
/*
|
2022-03-30 18:27:11 -07:00
|
|
|
|
* The user can never specify an input payload larger than what hardware
|
|
|
|
|
* supports, but output can be arbitrarily large (simply write out as
|
|
|
|
|
* much data as the hardware provides).
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*/
|
2024-09-05 15:35:46 -07:00
|
|
|
|
if (send_cmd->in.size > cxl_mbox->payload_size)
|
2022-03-30 18:27:11 -07:00
|
|
|
|
return -EINVAL;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2022-03-30 18:27:11 -07:00
|
|
|
|
/* Sanitize and construct a cxl_mem_command */
|
|
|
|
|
if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
|
2025-02-04 15:03:02 -07:00
|
|
|
|
rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxl_mbox);
|
2022-03-30 18:27:11 -07:00
|
|
|
|
else
|
2025-02-04 15:03:02 -07:00
|
|
|
|
rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxl_mbox);
|
2022-03-30 18:27:14 -07:00
|
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
|
|
/* Sanitize and construct a cxl_mbox_cmd */
|
2025-02-04 15:03:02 -07:00
|
|
|
|
return cxl_mbox_cmd_ctor(mbox_cmd, cxl_mbox, mem_cmd.opcode,
|
2022-03-30 18:27:17 -07:00
|
|
|
|
mem_cmd.info.size_in, mem_cmd.info.size_out,
|
2022-03-30 18:27:14 -07:00
|
|
|
|
send_cmd->in.payload);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
}
|
|
|
|
|
|
2025-02-04 15:03:02 -07:00
|
|
|
|
int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
|
2021-09-08 22:12:32 -07:00
|
|
|
|
struct cxl_mem_query_commands __user *q)
|
|
|
|
|
{
|
2025-02-04 15:03:02 -07:00
|
|
|
|
struct device *dev = cxl_mbox->host;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
struct cxl_mem_command *cmd;
|
|
|
|
|
u32 n_commands;
|
|
|
|
|
int j = 0;
|
|
|
|
|
|
|
|
|
|
dev_dbg(dev, "Query IOCTL\n");
|
|
|
|
|
|
|
|
|
|
if (get_user(n_commands, &q->n_commands))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
/* returns the total number if 0 elements are requested. */
|
|
|
|
|
if (n_commands == 0)
|
2021-09-08 22:12:44 -07:00
|
|
|
|
return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
|
|
|
|
/*
|
2024-09-13 15:32:16 -07:00
|
|
|
|
* otherwise, return min(n_commands, total commands) cxl_command_info
|
2021-09-08 22:12:32 -07:00
|
|
|
|
* structures.
|
|
|
|
|
*/
|
|
|
|
|
cxl_for_each_cmd(cmd) {
|
2023-02-02 17:04:26 -08:00
|
|
|
|
struct cxl_command_info info = cmd->info;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2025-02-04 15:03:02 -07:00
|
|
|
|
if (test_bit(info.id, cxl_mbox->enabled_cmds))
|
2023-02-02 17:04:26 -08:00
|
|
|
|
info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
|
2025-02-04 15:03:02 -07:00
|
|
|
|
if (test_bit(info.id, cxl_mbox->exclusive_cmds))
|
2023-02-02 17:04:26 -08:00
|
|
|
|
info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
|
|
|
|
|
|
|
|
|
|
if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
if (j == n_commands)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
|
2025-02-04 15:03:02 -07:00
|
|
|
|
* @cxl_mbox: The mailbox context for the operation.
|
2022-03-30 18:27:16 -07:00
|
|
|
|
* @mbox_cmd: The validated mailbox command.
|
2021-09-08 22:12:32 -07:00
|
|
|
|
* @out_payload: Pointer to userspace's output payload.
|
|
|
|
|
* @size_out: (Input) Max payload size to copy out.
|
|
|
|
|
* (Output) Payload size hardware generated.
|
|
|
|
|
* @retval: Hardware generated return code from the operation.
|
|
|
|
|
*
|
|
|
|
|
* Return:
|
|
|
|
|
* * %0 - Mailbox transaction succeeded. This implies the mailbox
|
|
|
|
|
* protocol completed successfully not that the operation itself
|
|
|
|
|
* was successful.
|
|
|
|
|
* * %-ENOMEM - Couldn't allocate a bounce buffer.
|
|
|
|
|
* * %-EFAULT - Something happened with copy_to/from_user.
|
|
|
|
|
* * %-EINTR - Mailbox acquisition interrupted.
|
|
|
|
|
* * %-EXXX - Transaction level failures.
|
|
|
|
|
*
|
2022-03-30 18:27:16 -07:00
|
|
|
|
* Dispatches a mailbox command on behalf of a userspace request.
|
|
|
|
|
* The output payload is copied to userspace.
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*
|
|
|
|
|
* See cxl_send_cmd().
|
|
|
|
|
*/
|
2025-02-04 15:03:02 -07:00
|
|
|
|
static int handle_mailbox_cmd_from_user(struct cxl_mailbox *cxl_mbox,
|
2022-03-30 18:27:16 -07:00
|
|
|
|
struct cxl_mbox_cmd *mbox_cmd,
|
|
|
|
|
u64 out_payload, s32 *size_out,
|
|
|
|
|
u32 *retval)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2025-02-04 15:03:02 -07:00
|
|
|
|
struct device *dev = cxl_mbox->host;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
dev_dbg(dev,
|
|
|
|
|
"Submitting %s command for user\n"
|
|
|
|
|
"\topcode: %x\n"
|
2022-03-30 18:27:15 -07:00
|
|
|
|
"\tsize: %zx\n",
|
2022-03-30 18:27:16 -07:00
|
|
|
|
cxl_mem_opcode_to_name(mbox_cmd->opcode),
|
|
|
|
|
mbox_cmd->opcode, mbox_cmd->size_in);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2024-09-05 15:35:46 -07:00
|
|
|
|
rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (rc)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* @size_out contains the max size that's allowed to be written back out
|
|
|
|
|
* to userspace. While the payload may have written more output than
|
|
|
|
|
* this it will have to be ignored.
|
|
|
|
|
*/
|
2022-03-30 18:27:16 -07:00
|
|
|
|
if (mbox_cmd->size_out) {
|
|
|
|
|
dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
|
2021-09-08 22:12:32 -07:00
|
|
|
|
"Invalid return size\n");
|
|
|
|
|
if (copy_to_user(u64_to_user_ptr(out_payload),
|
2022-03-30 18:27:16 -07:00
|
|
|
|
mbox_cmd->payload_out, mbox_cmd->size_out)) {
|
2021-09-08 22:12:32 -07:00
|
|
|
|
rc = -EFAULT;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-30 18:27:16 -07:00
|
|
|
|
*size_out = mbox_cmd->size_out;
|
|
|
|
|
*retval = mbox_cmd->return_code;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
|
|
|
|
out:
|
2022-03-30 18:27:16 -07:00
|
|
|
|
cxl_mbox_cmd_dtor(mbox_cmd);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
2025-02-04 15:03:02 -07:00
|
|
|
|
int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2025-02-04 15:03:02 -07:00
|
|
|
|
struct device *dev = cxl_mbox->host;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
struct cxl_send_command send;
|
2022-03-30 18:27:14 -07:00
|
|
|
|
struct cxl_mbox_cmd mbox_cmd;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
dev_dbg(dev, "Send IOCTL\n");
|
|
|
|
|
|
|
|
|
|
if (copy_from_user(&send, s, sizeof(send)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
2025-02-04 15:03:02 -07:00
|
|
|
|
rc = cxl_validate_cmd_from_user(&mbox_cmd, cxl_mbox, &send);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (rc)
|
|
|
|
|
return rc;
|
|
|
|
|
|
2025-02-04 15:03:02 -07:00
|
|
|
|
rc = handle_mailbox_cmd_from_user(cxl_mbox, &mbox_cmd, send.out.payload,
|
2022-03-30 18:27:16 -07:00
|
|
|
|
&send.out.size, &send.retval);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (rc)
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
|
|
if (copy_to_user(s, &send, sizeof(send)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
|
|
|
|
|
u32 *size, u8 *out)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2024-09-05 15:35:46 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2023-01-19 10:49:34 +01:00
|
|
|
|
u32 remaining = *size;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
u32 offset = 0;
|
|
|
|
|
|
|
|
|
|
while (remaining) {
|
2024-09-05 15:35:46 -07:00
|
|
|
|
u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size);
|
2022-12-05 20:22:33 -08:00
|
|
|
|
struct cxl_mbox_cmd mbox_cmd;
|
|
|
|
|
struct cxl_mbox_get_log log;
|
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
log = (struct cxl_mbox_get_log) {
|
2021-09-08 22:12:32 -07:00
|
|
|
|
.uuid = *uuid,
|
|
|
|
|
.offset = cpu_to_le32(offset),
|
2022-12-05 20:22:33 -08:00
|
|
|
|
.length = cpu_to_le32(xfer_size),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
mbox_cmd = (struct cxl_mbox_cmd) {
|
|
|
|
|
.opcode = CXL_MBOX_OP_GET_LOG,
|
|
|
|
|
.size_in = sizeof(log),
|
|
|
|
|
.payload_in = &log,
|
|
|
|
|
.size_out = xfer_size,
|
|
|
|
|
.payload_out = out,
|
2021-09-08 22:12:32 -07:00
|
|
|
|
};
|
|
|
|
|
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2023-01-19 10:49:34 +01:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The output payload length that indicates the number
|
|
|
|
|
* of valid bytes can be smaller than the Log buffer
|
|
|
|
|
* size.
|
|
|
|
|
*/
|
|
|
|
|
if (rc == -EIO && mbox_cmd.size_out < xfer_size) {
|
|
|
|
|
offset += mbox_cmd.size_out;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (rc < 0)
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
|
|
out += xfer_size;
|
|
|
|
|
remaining -= xfer_size;
|
|
|
|
|
offset += xfer_size;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-19 10:49:34 +01:00
|
|
|
|
*size = offset;
|
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2025-02-20 12:42:39 -07:00
|
|
|
|
static int check_features_opcodes(u16 opcode, int *ro_cmds, int *wr_cmds)
|
|
|
|
|
{
|
|
|
|
|
switch (opcode) {
|
|
|
|
|
case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
|
|
|
|
|
case CXL_MBOX_OP_GET_FEATURE:
|
|
|
|
|
(*ro_cmds)++;
|
|
|
|
|
return 1;
|
|
|
|
|
case CXL_MBOX_OP_SET_FEATURE:
|
|
|
|
|
(*wr_cmds)++;
|
|
|
|
|
return 1;
|
|
|
|
|
default:
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* 'Get Supported Features' and 'Get Feature' */
|
|
|
|
|
#define MAX_FEATURES_READ_CMDS 2
|
|
|
|
|
static void set_features_cap(struct cxl_mailbox *cxl_mbox,
|
|
|
|
|
int ro_cmds, int wr_cmds)
|
|
|
|
|
{
|
|
|
|
|
/* Setting up Features capability while walking the CEL */
|
|
|
|
|
if (ro_cmds == MAX_FEATURES_READ_CMDS) {
|
|
|
|
|
if (wr_cmds)
|
|
|
|
|
cxl_mbox->feat_cap = CXL_FEATURES_RW;
|
|
|
|
|
else
|
|
|
|
|
cxl_mbox->feat_cap = CXL_FEATURES_RO;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
/**
|
|
|
|
|
* cxl_walk_cel() - Walk through the Command Effects Log.
|
2023-06-14 18:30:02 -07:00
|
|
|
|
* @mds: The driver data for the operation
|
2021-09-08 22:12:32 -07:00
|
|
|
|
* @size: Length of the Command Effects Log.
|
|
|
|
|
* @cel: CEL
|
|
|
|
|
*
|
|
|
|
|
* Iterate over each entry in the CEL and determine if the driver supports the
|
|
|
|
|
* command. If so, the command is enabled for the device and can be used later.
|
|
|
|
|
*/
|
2023-06-14 18:30:02 -07:00
|
|
|
|
static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2025-02-04 15:03:02 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2021-09-08 22:13:15 -07:00
|
|
|
|
struct cxl_cel_entry *cel_entry;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
const int cel_entries = size / sizeof(*cel_entry);
|
2023-06-14 18:30:02 -07:00
|
|
|
|
struct device *dev = mds->cxlds.dev;
|
2025-02-20 12:42:39 -07:00
|
|
|
|
int i, ro_cmds = 0, wr_cmds = 0;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2021-09-08 22:13:15 -07:00
|
|
|
|
cel_entry = (struct cxl_cel_entry *) cel;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
|
|
|
|
for (i = 0; i < cel_entries; i++) {
|
|
|
|
|
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
|
|
|
|
|
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
|
2023-09-03 14:42:58 -07:00
|
|
|
|
int enabled = 0;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2023-09-03 14:42:58 -07:00
|
|
|
|
if (cmd) {
|
2025-02-04 15:03:02 -07:00
|
|
|
|
set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
|
2023-09-03 14:42:58 -07:00
|
|
|
|
enabled++;
|
|
|
|
|
}
|
2023-04-18 10:39:03 -07:00
|
|
|
|
|
2025-02-20 12:42:39 -07:00
|
|
|
|
enabled += check_features_opcodes(opcode, &ro_cmds,
|
|
|
|
|
&wr_cmds);
|
|
|
|
|
|
2023-09-03 14:42:58 -07:00
|
|
|
|
if (cxl_is_poison_command(opcode)) {
|
2023-06-14 18:30:02 -07:00
|
|
|
|
cxl_set_poison_cmd_enabled(&mds->poison, opcode);
|
2023-09-03 14:42:58 -07:00
|
|
|
|
enabled++;
|
|
|
|
|
}
|
2023-04-18 10:39:03 -07:00
|
|
|
|
|
2023-09-03 14:42:58 -07:00
|
|
|
|
if (cxl_is_security_command(opcode)) {
|
2023-07-25 22:19:40 -07:00
|
|
|
|
cxl_set_security_cmd_enabled(&mds->security, opcode);
|
2023-09-03 14:42:58 -07:00
|
|
|
|
enabled++;
|
|
|
|
|
}
|
2023-07-25 22:19:40 -07:00
|
|
|
|
|
2023-09-03 14:42:58 -07:00
|
|
|
|
dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
|
|
|
|
|
enabled ? "enabled" : "unsupported by driver");
|
2021-09-08 22:12:32 -07:00
|
|
|
|
}
|
2025-02-20 12:42:39 -07:00
|
|
|
|
|
|
|
|
|
set_features_cap(cxl_mbox, ro_cmds, wr_cmds);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2024-09-05 15:35:46 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
struct cxl_mbox_get_supported_logs *ret;
|
2022-12-05 20:22:33 -08:00
|
|
|
|
struct cxl_mbox_cmd mbox_cmd;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
int rc;
|
|
|
|
|
|
2024-09-05 15:35:46 -07:00
|
|
|
|
ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (!ret)
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
2022-12-05 20:22:33 -08:00
|
|
|
|
mbox_cmd = (struct cxl_mbox_cmd) {
|
|
|
|
|
.opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
|
2024-09-05 15:35:46 -07:00
|
|
|
|
.size_out = cxl_mbox->payload_size,
|
2022-12-05 20:22:33 -08:00
|
|
|
|
.payload_out = ret,
|
2022-12-05 20:22:39 -08:00
|
|
|
|
/* At least the record number field must be valid */
|
|
|
|
|
.min_out = 2,
|
2022-12-05 20:22:33 -08:00
|
|
|
|
};
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (rc < 0) {
|
|
|
|
|
kvfree(ret);
|
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-05 20:22:33 -08:00
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
enum {
|
|
|
|
|
CEL_UUID,
|
|
|
|
|
VENDOR_DEBUG_UUID,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* See CXL 2.0 Table 170. Get Log Input Payload */
|
|
|
|
|
static const uuid_t log_uuid[] = {
|
2021-09-08 22:13:15 -07:00
|
|
|
|
[CEL_UUID] = DEFINE_CXL_CEL_UUID,
|
|
|
|
|
[VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
|
2021-09-08 22:12:32 -07:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
2021-11-02 13:29:01 -07:00
|
|
|
|
* cxl_enumerate_cmds() - Enumerate commands for a device.
|
2023-06-14 18:30:02 -07:00
|
|
|
|
* @mds: The driver data for the operation
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*
|
|
|
|
|
* Returns 0 if enumerate completed successfully.
|
|
|
|
|
*
|
|
|
|
|
* CXL devices have optional support for certain commands. This function will
|
|
|
|
|
* determine the set of supported commands for the hardware and update the
|
2023-06-14 18:30:02 -07:00
|
|
|
|
* enabled_cmds bitmap in the @mds.
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*/
|
2023-06-14 18:30:02 -07:00
|
|
|
|
int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2025-02-04 15:03:02 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
struct cxl_mbox_get_supported_logs *gsl;
|
2023-06-14 18:30:02 -07:00
|
|
|
|
struct device *dev = mds->cxlds.dev;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
struct cxl_mem_command *cmd;
|
|
|
|
|
int i, rc;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
gsl = cxl_get_gsl(mds);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (IS_ERR(gsl))
|
|
|
|
|
return PTR_ERR(gsl);
|
|
|
|
|
|
|
|
|
|
rc = -ENOENT;
|
|
|
|
|
for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
|
|
|
|
|
u32 size = le32_to_cpu(gsl->entry[i].size);
|
|
|
|
|
uuid_t uuid = gsl->entry[i].uuid;
|
|
|
|
|
u8 *log;
|
|
|
|
|
|
|
|
|
|
dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
|
|
|
|
|
|
|
|
|
|
if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
log = kvmalloc(size, GFP_KERNEL);
|
|
|
|
|
if (!log) {
|
|
|
|
|
rc = -ENOMEM;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
rc = cxl_xfer_log(mds, &uuid, &size, log);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (rc) {
|
|
|
|
|
kvfree(log);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
cxl_walk_cel(mds, size, log);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
kvfree(log);
|
|
|
|
|
|
|
|
|
|
/* In case CEL was bogus, enable some default commands. */
|
|
|
|
|
cxl_for_each_cmd(cmd)
|
|
|
|
|
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
|
2025-02-04 15:03:02 -07:00
|
|
|
|
set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
|
|
|
|
/* Found the required CEL */
|
|
|
|
|
rc = 0;
|
|
|
|
|
}
|
|
|
|
|
out:
|
|
|
|
|
kvfree(gsl);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2023-12-20 16:17:36 -08:00
|
|
|
|
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
|
|
|
|
enum cxl_event_log_type type,
|
|
|
|
|
enum cxl_event_type event_type,
|
|
|
|
|
const uuid_t *uuid, union cxl_event *evt)
|
2023-01-17 21:53:38 -08:00
|
|
|
|
{
|
2024-04-30 10:28:06 -07:00
|
|
|
|
if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
|
2023-12-20 16:17:33 -08:00
|
|
|
|
trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
|
2024-04-30 10:28:06 -07:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if (event_type == CXL_CPER_EVENT_GENERIC) {
|
2023-12-20 16:17:36 -08:00
|
|
|
|
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
|
2024-04-30 10:28:06 -07:00
|
|
|
|
return;
|
|
|
|
|
}
|
cxl/events: Trace Memory Sparing Event Record
CXL rev 3.2 section 8.2.10.2.1.4 Table 8-60 defines the Memory Sparing
Event Record.
Determine if the event read is memory sparing record and if so trace the
record.
Memory device shall produce a memory sparing event record
1. After completion of a PPR maintenance operation if the memory sparing
event record enable bit is set (Field: sPPR/hPPR Operation Mode in
Table 8-128/Table 8-131).
2. In response to a query request by the host (see section 8.2.10.7.1.4)
to determine the availability of sparing resources.
The device shall report the resource availability by producing the Memory
Sparing Event Record (see Table 8-60) in which the channel, rank, nibble
mask, bank group, bank, row, column, sub-channel fields are a copy of the
values specified in the request. If the controller does not support
reporting whether a resource is available, and a perform maintenance
operation for memory sparing is issued with query resources set to 1, the
controller shall return invalid input.
Example trace log for produce memory sparing event record on completion
of a soft PPR operation,
cxl_memory_sparing: memdev=mem1 host=0000:0f:00.0 serial=3
log=Informational : time=55045163029
uuid=e71f3a40-2d29-4092-8a39-4d1c966c7c65 len=128 flags='0x1' handle=1
related_handle=0 maint_op_class=2 maint_op_sub_class=1
ld_id=0 head_id=0 : flags='' result=0
validity_flags='CHANNEL|RANK|NIBBLE|BANK GROUP|BANK|ROW|COLUMN'
spare resource avail=1 channel=2 rank=5 nibble_mask=a59c bank_group=2
bank=4 row=13 column=23 sub_channel=0
comp_id=00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
comp_id_pldm_valid_flags='' pldm_entity_id=0x00 pldm_resource_id=0x00
Note: For memory sparing event record, fields 'maintenance operation
class' and 'maintenance operation subclass' are defined twice, first
in the common event record (Table 8-55) and second in the memory
sparing event record (Table 8-60). Thus those in the sparing event
record coded as reserved, to be removed when the spec is updated.
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Shiju Jose <shiju.jose@huawei.com>
Link: https://patch.msgid.link/20250717101817.2104-5-shiju.jose@huawei.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-07-17 11:18:17 +01:00
|
|
|
|
if (event_type == CXL_CPER_EVENT_MEM_SPARING) {
|
|
|
|
|
trace_cxl_memory_sparing(cxlmd, type, &evt->mem_sparing);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2024-04-30 10:28:06 -07:00
|
|
|
|
|
|
|
|
|
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
|
2025-02-26 09:21:20 -07:00
|
|
|
|
u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
|
2024-04-30 10:28:06 -07:00
|
|
|
|
struct cxl_region *cxlr;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* These trace points are annotated with HPA and region
|
|
|
|
|
* translations. Take topology mutation locks and lookup
|
|
|
|
|
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
|
|
|
|
|
*/
|
2025-07-11 16:49:32 -07:00
|
|
|
|
guard(rwsem_read)(&cxl_rwsem.region);
|
|
|
|
|
guard(rwsem_read)(&cxl_rwsem.dpa);
|
2024-04-30 10:28:06 -07:00
|
|
|
|
|
2024-06-07 16:43:58 +02:00
|
|
|
|
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
|
2024-04-30 10:28:06 -07:00
|
|
|
|
cxlr = cxl_dpa_to_region(cxlmd, dpa);
|
2025-02-26 09:21:20 -07:00
|
|
|
|
if (cxlr) {
|
|
|
|
|
u64 cache_size = cxlr->params.cache_size;
|
|
|
|
|
|
2024-07-02 22:29:49 -07:00
|
|
|
|
hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
|
2025-02-26 09:21:20 -07:00
|
|
|
|
if (cache_size)
|
|
|
|
|
hpa_alias = hpa - cache_size;
|
|
|
|
|
}
|
2024-04-30 10:28:06 -07:00
|
|
|
|
|
2025-05-21 13:47:44 +01:00
|
|
|
|
if (event_type == CXL_CPER_EVENT_GEN_MEDIA) {
|
|
|
|
|
if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
|
|
|
|
|
dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
|
|
|
|
|
|
2025-07-17 11:18:15 +01:00
|
|
|
|
if (evt->gen_media.media_hdr.descriptor &
|
|
|
|
|
CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
|
|
|
|
|
WARN_ON_ONCE((evt->gen_media.media_hdr.type &
|
|
|
|
|
CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
|
|
|
|
|
!get_unaligned_le24(evt->gen_media.cme_count));
|
|
|
|
|
else
|
|
|
|
|
WARN_ON_ONCE(evt->gen_media.media_hdr.type &
|
|
|
|
|
CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
|
|
|
|
|
|
2024-04-30 10:28:06 -07:00
|
|
|
|
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
|
2025-02-26 09:21:20 -07:00
|
|
|
|
hpa_alias, &evt->gen_media);
|
2025-05-21 13:47:44 +01:00
|
|
|
|
} else if (event_type == CXL_CPER_EVENT_DRAM) {
|
|
|
|
|
if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
|
|
|
|
|
dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
|
|
|
|
|
|
2025-07-17 11:18:16 +01:00
|
|
|
|
if (evt->dram.media_hdr.descriptor &
|
|
|
|
|
CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
|
|
|
|
|
WARN_ON_ONCE((evt->dram.media_hdr.type &
|
|
|
|
|
CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
|
|
|
|
|
!get_unaligned_le24(evt->dram.cvme_count));
|
|
|
|
|
else
|
|
|
|
|
WARN_ON_ONCE(evt->dram.media_hdr.type &
|
|
|
|
|
CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
|
|
|
|
|
|
2025-02-26 09:21:20 -07:00
|
|
|
|
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
|
|
|
|
|
&evt->dram);
|
2025-05-21 13:47:44 +01:00
|
|
|
|
}
|
2024-04-30 10:28:06 -07:00
|
|
|
|
}
|
2023-12-20 16:17:36 -08:00
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
|
2023-01-17 21:53:38 -08:00
|
|
|
|
|
2023-12-20 16:17:36 -08:00
|
|
|
|
static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
|
|
|
|
enum cxl_event_log_type type,
|
|
|
|
|
struct cxl_event_record_raw *record)
|
|
|
|
|
{
|
|
|
|
|
enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
|
|
|
|
|
const uuid_t *uuid = &record->id;
|
2023-01-17 21:53:39 -08:00
|
|
|
|
|
2023-12-20 16:17:36 -08:00
|
|
|
|
if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
|
|
|
|
|
ev_type = CXL_CPER_EVENT_GEN_MEDIA;
|
|
|
|
|
else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
|
|
|
|
|
ev_type = CXL_CPER_EVENT_DRAM;
|
|
|
|
|
else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
|
|
|
|
|
ev_type = CXL_CPER_EVENT_MEM_MODULE;
|
cxl/events: Trace Memory Sparing Event Record
CXL rev 3.2 section 8.2.10.2.1.4 Table 8-60 defines the Memory Sparing
Event Record.
Determine if the event read is memory sparing record and if so trace the
record.
Memory device shall produce a memory sparing event record
1. After completion of a PPR maintenance operation if the memory sparing
event record enable bit is set (Field: sPPR/hPPR Operation Mode in
Table 8-128/Table 8-131).
2. In response to a query request by the host (see section 8.2.10.7.1.4)
to determine the availability of sparing resources.
The device shall report the resource availability by producing the Memory
Sparing Event Record (see Table 8-60) in which the channel, rank, nibble
mask, bank group, bank, row, column, sub-channel fields are a copy of the
values specified in the request. If the controller does not support
reporting whether a resource is available, and a perform maintenance
operation for memory sparing is issued with query resources set to 1, the
controller shall return invalid input.
Example trace log for produce memory sparing event record on completion
of a soft PPR operation,
cxl_memory_sparing: memdev=mem1 host=0000:0f:00.0 serial=3
log=Informational : time=55045163029
uuid=e71f3a40-2d29-4092-8a39-4d1c966c7c65 len=128 flags='0x1' handle=1
related_handle=0 maint_op_class=2 maint_op_sub_class=1
ld_id=0 head_id=0 : flags='' result=0
validity_flags='CHANNEL|RANK|NIBBLE|BANK GROUP|BANK|ROW|COLUMN'
spare resource avail=1 channel=2 rank=5 nibble_mask=a59c bank_group=2
bank=4 row=13 column=23 sub_channel=0
comp_id=00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
comp_id_pldm_valid_flags='' pldm_entity_id=0x00 pldm_resource_id=0x00
Note: For memory sparing event record, fields 'maintenance operation
class' and 'maintenance operation subclass' are defined twice, first
in the common event record (Table 8-55) and second in the memory
sparing event record (Table 8-60). Thus those in the sparing event
record coded as reserved, to be removed when the spec is updated.
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Shiju Jose <shiju.jose@huawei.com>
Link: https://patch.msgid.link/20250717101817.2104-5-shiju.jose@huawei.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-07-17 11:18:17 +01:00
|
|
|
|
else if (uuid_equal(uuid, &CXL_EVENT_MEM_SPARING_UUID))
|
|
|
|
|
ev_type = CXL_CPER_EVENT_MEM_SPARING;
|
2023-01-17 21:53:40 -08:00
|
|
|
|
|
2023-12-20 16:17:36 -08:00
|
|
|
|
cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
|
2023-01-17 21:53:38 -08:00
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
static int cxl_clear_event_record(struct cxl_memdev_state *mds,
|
2023-01-17 21:53:36 -08:00
|
|
|
|
enum cxl_event_log_type log,
|
|
|
|
|
struct cxl_get_event_payload *get_pl)
|
|
|
|
|
{
|
2024-09-05 15:35:46 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2023-01-17 21:53:36 -08:00
|
|
|
|
struct cxl_mbox_clear_event_payload *payload;
|
|
|
|
|
u16 total = le16_to_cpu(get_pl->record_count);
|
|
|
|
|
u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
|
|
|
|
|
size_t pl_size = struct_size(payload, handles, max_handles);
|
|
|
|
|
struct cxl_mbox_cmd mbox_cmd;
|
|
|
|
|
u16 cnt;
|
|
|
|
|
int rc = 0;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Payload size may limit the max handles */
|
2024-09-05 15:35:46 -07:00
|
|
|
|
if (pl_size > cxl_mbox->payload_size) {
|
|
|
|
|
max_handles = (cxl_mbox->payload_size - sizeof(*payload)) /
|
2023-06-14 18:30:02 -07:00
|
|
|
|
sizeof(__le16);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
pl_size = struct_size(payload, handles, max_handles);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
payload = kvzalloc(pl_size, GFP_KERNEL);
|
|
|
|
|
if (!payload)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
*payload = (struct cxl_mbox_clear_event_payload) {
|
|
|
|
|
.event_log = log,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
mbox_cmd = (struct cxl_mbox_cmd) {
|
|
|
|
|
.opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
|
|
|
|
|
.payload_in = payload,
|
|
|
|
|
.size_in = pl_size,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Clear Event Records uses u8 for the handle cnt while Get Event
|
|
|
|
|
* Record can return up to 0xffff records.
|
|
|
|
|
*/
|
|
|
|
|
i = 0;
|
|
|
|
|
for (cnt = 0; cnt < total; cnt++) {
|
2023-12-20 16:17:33 -08:00
|
|
|
|
struct cxl_event_record_raw *raw = &get_pl->records[cnt];
|
|
|
|
|
struct cxl_event_generic *gen = &raw->event.generic;
|
|
|
|
|
|
|
|
|
|
payload->handles[i++] = gen->hdr.handle;
|
2023-06-14 18:30:02 -07:00
|
|
|
|
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
|
2024-03-18 10:29:28 +08:00
|
|
|
|
le16_to_cpu(payload->handles[i - 1]));
|
2023-01-17 21:53:36 -08:00
|
|
|
|
|
|
|
|
|
if (i == max_handles) {
|
|
|
|
|
payload->nr_recs = i;
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
if (rc)
|
|
|
|
|
goto free_pl;
|
|
|
|
|
i = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Clear what is left if any */
|
|
|
|
|
if (i) {
|
|
|
|
|
payload->nr_recs = i;
|
|
|
|
|
mbox_cmd.size_in = struct_size(payload, handles, i);
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
if (rc)
|
|
|
|
|
goto free_pl;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free_pl:
|
|
|
|
|
kvfree(payload);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
|
2023-01-17 21:53:36 -08:00
|
|
|
|
enum cxl_event_log_type type)
|
|
|
|
|
{
|
2024-09-05 15:35:46 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2023-06-14 18:30:02 -07:00
|
|
|
|
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
|
|
|
|
|
struct device *dev = mds->cxlds.dev;
|
2023-01-17 21:53:36 -08:00
|
|
|
|
struct cxl_get_event_payload *payload;
|
|
|
|
|
u8 log_type = type;
|
|
|
|
|
u16 nr_rec;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mutex_lock(&mds->event.log_lock);
|
|
|
|
|
payload = mds->event.buf;
|
2023-01-17 21:53:36 -08:00
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
int rc, i;
|
2024-04-05 15:00:16 -07:00
|
|
|
|
struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
|
|
|
|
|
.opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
|
|
|
|
|
.payload_in = &log_type,
|
|
|
|
|
.size_in = sizeof(log_type),
|
|
|
|
|
.payload_out = payload,
|
2024-09-05 15:35:46 -07:00
|
|
|
|
.size_out = cxl_mbox->payload_size,
|
2024-04-05 15:00:16 -07:00
|
|
|
|
.min_out = struct_size(payload, records, 0),
|
|
|
|
|
};
|
2024-04-02 17:14:03 +09:00
|
|
|
|
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
if (rc) {
|
2023-06-14 18:30:02 -07:00
|
|
|
|
dev_err_ratelimited(dev,
|
2023-01-17 21:53:36 -08:00
|
|
|
|
"Event log '%d': Failed to query event records : %d",
|
|
|
|
|
type, rc);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nr_rec = le16_to_cpu(payload->record_count);
|
|
|
|
|
if (!nr_rec)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nr_rec; i++)
|
2023-12-20 16:17:36 -08:00
|
|
|
|
__cxl_event_trace_record(cxlmd, type,
|
|
|
|
|
&payload->records[i]);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
|
|
|
|
|
if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
|
2023-06-14 18:30:02 -07:00
|
|
|
|
trace_cxl_overflow(cxlmd, type, payload);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
rc = cxl_clear_event_record(mds, type, payload);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
if (rc) {
|
2023-06-14 18:30:02 -07:00
|
|
|
|
dev_err_ratelimited(dev,
|
2023-01-17 21:53:36 -08:00
|
|
|
|
"Event log '%d': Failed to clear events : %d",
|
|
|
|
|
type, rc);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} while (nr_rec);
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mutex_unlock(&mds->event.log_lock);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* cxl_mem_get_event_records - Get Event Records from the device
|
2023-06-14 18:30:02 -07:00
|
|
|
|
* @mds: The driver data for the operation
|
2023-01-30 15:34:37 +00:00
|
|
|
|
* @status: Event Status register value identifying which events are available.
|
2023-01-17 21:53:36 -08:00
|
|
|
|
*
|
|
|
|
|
* Retrieve all event records available on the device, report them as trace
|
|
|
|
|
* events, and clear them.
|
|
|
|
|
*
|
|
|
|
|
* See CXL rev 3.0 @8.2.9.2.2 Get Event Records
|
|
|
|
|
* See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
|
|
|
|
|
*/
|
2023-06-14 18:30:02 -07:00
|
|
|
|
void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
|
2023-01-17 21:53:36 -08:00
|
|
|
|
{
|
2023-06-14 18:30:02 -07:00
|
|
|
|
dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
|
|
|
|
|
if (status & CXLDEV_EVENT_STATUS_FATAL)
|
2023-06-14 18:30:02 -07:00
|
|
|
|
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
if (status & CXLDEV_EVENT_STATUS_FAIL)
|
2023-06-14 18:30:02 -07:00
|
|
|
|
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
if (status & CXLDEV_EVENT_STATUS_WARN)
|
2023-06-14 18:30:02 -07:00
|
|
|
|
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
if (status & CXLDEV_EVENT_STATUS_INFO)
|
2023-06-14 18:30:02 -07:00
|
|
|
|
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
|
2023-01-17 21:53:36 -08:00
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, "CXL");
|
2023-01-17 21:53:36 -08:00
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
/**
|
|
|
|
|
* cxl_mem_get_partition_info - Get partition info
|
2023-06-14 18:30:02 -07:00
|
|
|
|
* @mds: The driver data for the operation
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*
|
|
|
|
|
* Retrieve the current partition info for the device specified. The active
|
|
|
|
|
* values are the current capacity in bytes. If not 0, the 'next' values are
|
|
|
|
|
* the pending values, in bytes, which take affect on next cold reset.
|
|
|
|
|
*
|
|
|
|
|
* Return: 0 if no error: or the result of the mailbox command.
|
|
|
|
|
*
|
|
|
|
|
* See CXL @8.2.9.5.2.1 Get Partition Info
|
|
|
|
|
*/
|
2023-06-14 18:30:02 -07:00
|
|
|
|
static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2024-09-05 15:35:47 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2022-05-23 23:26:11 -07:00
|
|
|
|
struct cxl_mbox_get_partition_info pi;
|
2022-12-05 20:22:33 -08:00
|
|
|
|
struct cxl_mbox_cmd mbox_cmd;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
int rc;
|
|
|
|
|
|
2022-12-05 20:22:33 -08:00
|
|
|
|
mbox_cmd = (struct cxl_mbox_cmd) {
|
|
|
|
|
.opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
|
|
|
|
|
.size_out = sizeof(pi),
|
|
|
|
|
.payload_out = &pi,
|
|
|
|
|
};
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (rc)
|
|
|
|
|
return rc;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mds->active_volatile_bytes =
|
2021-09-08 22:12:32 -07:00
|
|
|
|
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mds->active_persistent_bytes =
|
2021-09-08 22:12:32 -07:00
|
|
|
|
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2021-11-02 13:29:01 -07:00
|
|
|
|
* cxl_dev_state_identify() - Send the IDENTIFY command to the device.
|
2023-06-14 18:30:02 -07:00
|
|
|
|
* @mds: The driver data for the operation
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*
|
2023-05-18 16:38:20 -07:00
|
|
|
|
* Return: 0 if identify was executed successfully or media not ready.
|
2021-09-08 22:12:32 -07:00
|
|
|
|
*
|
|
|
|
|
* This will dispatch the identify command to the device and on success populate
|
|
|
|
|
* structures to be exported to sysfs.
|
|
|
|
|
*/
|
2023-06-14 18:30:02 -07:00
|
|
|
|
int cxl_dev_state_identify(struct cxl_memdev_state *mds)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2024-09-05 15:35:47 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
|
2021-09-08 22:13:15 -07:00
|
|
|
|
struct cxl_mbox_identify id;
|
2022-12-05 20:22:33 -08:00
|
|
|
|
struct cxl_mbox_cmd mbox_cmd;
|
2023-04-18 10:39:04 -07:00
|
|
|
|
u32 val;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
int rc;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
if (!mds->cxlds.media_ready)
|
2023-05-18 16:38:20 -07:00
|
|
|
|
return 0;
|
|
|
|
|
|
2022-12-05 20:22:33 -08:00
|
|
|
|
mbox_cmd = (struct cxl_mbox_cmd) {
|
|
|
|
|
.opcode = CXL_MBOX_OP_IDENTIFY,
|
|
|
|
|
.size_out = sizeof(id),
|
|
|
|
|
.payload_out = &id,
|
|
|
|
|
};
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2021-09-08 22:12:32 -07:00
|
|
|
|
if (rc < 0)
|
|
|
|
|
return rc;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mds->total_bytes =
|
2021-09-08 22:12:32 -07:00
|
|
|
|
le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mds->volatile_only_bytes =
|
2021-09-08 22:12:32 -07:00
|
|
|
|
le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mds->persistent_only_bytes =
|
2021-09-08 22:12:32 -07:00
|
|
|
|
le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mds->partition_align_bytes =
|
2021-09-08 22:12:32 -07:00
|
|
|
|
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mds->lsa_size = le32_to_cpu(id.lsa_size);
|
|
|
|
|
memcpy(mds->firmware_version, id.fw_revision,
|
|
|
|
|
sizeof(id.fw_revision));
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
|
2023-04-18 10:39:04 -07:00
|
|
|
|
val = get_unaligned_le24(id.poison_list_max_mer);
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
|
2023-04-18 10:39:04 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-09-08 22:12:32 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, "CXL");
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2023-10-04 18:35:01 -07:00
|
|
|
|
static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
|
2023-06-12 11:10:35 -07:00
|
|
|
|
{
|
2024-09-05 15:35:47 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2023-06-12 11:10:35 -07:00
|
|
|
|
int rc;
|
|
|
|
|
u32 sec_out = 0;
|
|
|
|
|
struct cxl_get_security_output {
|
|
|
|
|
__le32 flags;
|
|
|
|
|
} out;
|
|
|
|
|
struct cxl_mbox_cmd sec_cmd = {
|
|
|
|
|
.opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
|
|
|
|
|
.payload_out = &out,
|
|
|
|
|
.size_out = sizeof(out),
|
|
|
|
|
};
|
|
|
|
|
struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
|
|
|
|
|
|
2023-06-12 11:10:37 -07:00
|
|
|
|
if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
|
2023-06-12 11:10:35 -07:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd);
|
2023-06-12 11:10:35 -07:00
|
|
|
|
if (rc < 0) {
|
2024-09-05 15:35:47 -07:00
|
|
|
|
dev_err(cxl_mbox->host, "Failed to get security state : %d", rc);
|
2023-06-12 11:10:35 -07:00
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Prior to using these commands, any security applied to
|
|
|
|
|
* the user data areas of the device shall be DISABLED (or
|
|
|
|
|
* UNLOCKED for secure erase case).
|
|
|
|
|
*/
|
|
|
|
|
sec_out = le32_to_cpu(out.flags);
|
|
|
|
|
if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2023-06-12 11:10:37 -07:00
|
|
|
|
if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
|
|
|
|
|
sec_out & CXL_PMEM_SEC_STATE_LOCKED)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2023-06-12 11:10:35 -07:00
|
|
|
|
if (rc < 0) {
|
2024-09-05 15:35:47 -07:00
|
|
|
|
dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc);
|
2023-06-12 11:10:35 -07:00
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2023-10-04 18:35:01 -07:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* cxl_mem_sanitize() - Send a sanitization command to the device.
|
|
|
|
|
* @cxlmd: The device for the operation
|
|
|
|
|
* @cmd: The specific sanitization command opcode
|
|
|
|
|
*
|
|
|
|
|
* Return: 0 if the command was executed successfully, regardless of
|
|
|
|
|
* whether or not the actual security operation is done in the background,
|
|
|
|
|
* such as for the Sanitize case.
|
|
|
|
|
* Error return values can be the result of the mailbox command, -EINVAL
|
|
|
|
|
* when security requirements are not met or invalid contexts, or -EBUSY
|
|
|
|
|
* if the sanitize operation is already in flight.
|
|
|
|
|
*
|
|
|
|
|
* See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
|
|
|
|
|
*/
|
|
|
|
|
int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
|
|
|
|
|
{
|
|
|
|
|
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
|
|
|
|
|
struct cxl_port *endpoint;
|
|
|
|
|
|
|
|
|
|
/* synchronize with cxl_mem_probe() and decoder write operations */
|
2024-08-30 01:31:37 +00:00
|
|
|
|
guard(device)(&cxlmd->dev);
|
2023-10-04 18:35:01 -07:00
|
|
|
|
endpoint = cxlmd->endpoint;
|
2025-07-11 16:49:32 -07:00
|
|
|
|
guard(rwsem_read)(&cxl_rwsem.region);
|
2023-10-04 18:35:01 -07:00
|
|
|
|
/*
|
|
|
|
|
* Require an endpoint to be safe otherwise the driver can not
|
|
|
|
|
* be sure that the device is unmapped.
|
|
|
|
|
*/
|
2023-10-16 10:57:48 -07:00
|
|
|
|
if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
|
2025-02-21 09:24:48 +08:00
|
|
|
|
return __cxl_mem_sanitize(mds, cmd);
|
2023-10-04 18:35:01 -07:00
|
|
|
|
|
2025-02-21 09:24:48 +08:00
|
|
|
|
return -EBUSY;
|
2023-10-04 18:35:01 -07:00
|
|
|
|
}
|
2023-06-12 11:10:35 -07:00
|
|
|
|
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
int i = info->nr_partitions;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
if (size == 0)
|
|
|
|
|
return;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
info->part[i].range = (struct range) {
|
|
|
|
|
.start = start,
|
|
|
|
|
.end = start + size - 1,
|
|
|
|
|
};
|
|
|
|
|
info->part[i].mode = mode;
|
|
|
|
|
info->nr_partitions++;
|
2022-05-21 15:35:29 -07:00
|
|
|
|
}
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
|
2022-05-21 15:35:29 -07:00
|
|
|
|
{
|
2023-06-14 18:30:02 -07:00
|
|
|
|
struct cxl_dev_state *cxlds = &mds->cxlds;
|
2022-05-21 15:35:29 -07:00
|
|
|
|
struct device *dev = cxlds->dev;
|
|
|
|
|
int rc;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2023-05-25 17:33:01 -07:00
|
|
|
|
if (!cxlds->media_ready) {
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
info->size = 0;
|
2023-05-25 17:33:01 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
info->size = mds->total_bytes;
|
2022-05-21 15:35:29 -07:00
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
if (mds->partition_align_bytes == 0) {
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
add_part(info, 0, mds->volatile_only_bytes, CXL_PARTMODE_RAM);
|
|
|
|
|
add_part(info, mds->volatile_only_bytes,
|
|
|
|
|
mds->persistent_only_bytes, CXL_PARTMODE_PMEM);
|
|
|
|
|
return 0;
|
2022-05-21 15:35:29 -07:00
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
rc = cxl_mem_get_partition_info(mds);
|
2023-05-25 17:33:01 -07:00
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(dev, "Failed to query partition information\n");
|
|
|
|
|
return rc;
|
2022-05-21 15:35:29 -07:00
|
|
|
|
}
|
|
|
|
|
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
add_part(info, 0, mds->active_volatile_bytes, CXL_PARTMODE_RAM);
|
|
|
|
|
add_part(info, mds->active_volatile_bytes, mds->active_persistent_bytes,
|
|
|
|
|
CXL_PARTMODE_PMEM);
|
|
|
|
|
|
|
|
|
|
return 0;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
}
|
cxl: Introduce 'struct cxl_dpa_partition' and 'struct cxl_range_info'
The pending efforts to add CXL Accelerator (type-2) device [1], and
Dynamic Capacity (DCD) support [2], tripped on the
no-longer-fit-for-purpose design in the CXL subsystem for tracking
device-physical-address (DPA) metadata. Trip hazards include:
- CXL Memory Devices need to consider a PMEM partition, but Accelerator
devices with CXL.mem likely do not in the common case.
- CXL Memory Devices enumerate DPA through Memory Device mailbox
commands like Partition Info, Accelerators devices do not.
- CXL Memory Devices that support DCD support more than 2 partitions.
Some of the driver algorithms are awkward to expand to > 2 partition
cases.
- DPA performance data is a general capability that can be shared with
accelerators, so tracking it in 'struct cxl_memdev_state' is no longer
suitable.
- Hardcoded assumptions around the PMEM partition always being index-1
if RAM is zero-sized or PMEM is zero sized.
- 'enum cxl_decoder_mode' is sometimes a partition id and sometimes a
memory property, it should be phased in favor of a partition id and
the memory property comes from the partition info.
Towards cleaning up those issues and allowing a smoother landing for the
aforementioned pending efforts, introduce a 'struct cxl_dpa_partition'
array to 'struct cxl_dev_state', and 'struct cxl_range_info' as a shared
way for Memory Devices and Accelerators to initialize the DPA information
in 'struct cxl_dev_state'.
For now, split a new cxl_dpa_setup() from cxl_mem_create_range_info() to
get the new data structure initialized, and cleanup some qos_class init.
Follow on patches will go further to use the new data structure to
cleanup algorithms that are better suited to loop over all possible
partitions.
cxl_dpa_setup() follows the locking expectations of mutating the device
DPA map, and is suitable for Accelerator drivers to use. Accelerators
likely only have one hardcoded 'ram' partition to convey to the
cxl_core.
Link: http://lore.kernel.org/20241230214445.27602-1-alejandro.lucero-palau@amd.com [1]
Link: http://lore.kernel.org/20241210-dcd-type2-upstream-v8-0-812852504400@intel.com [2]
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Link: https://patch.msgid.link/173864305827.668823.13978794102080021276.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2025-02-03 20:24:18 -08:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2025-02-20 14:02:34 -08:00
|
|
|
|
int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count)
|
|
|
|
|
{
|
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
|
|
|
|
struct cxl_mbox_get_health_info_out hi;
|
|
|
|
|
struct cxl_mbox_cmd mbox_cmd;
|
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
mbox_cmd = (struct cxl_mbox_cmd) {
|
|
|
|
|
.opcode = CXL_MBOX_OP_GET_HEALTH_INFO,
|
|
|
|
|
.size_out = sizeof(hi),
|
|
|
|
|
.payload_out = &hi,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
|
|
|
|
if (!rc)
|
|
|
|
|
*count = le32_to_cpu(hi.dirty_shutdown_cnt);
|
|
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL");
|
|
|
|
|
|
2025-02-20 14:02:33 -08:00
|
|
|
|
int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
|
2025-01-24 15:35:33 -08:00
|
|
|
|
{
|
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
|
|
|
|
struct cxl_mbox_cmd mbox_cmd;
|
|
|
|
|
struct cxl_mbox_set_shutdown_state_in in = {
|
|
|
|
|
.state = 1
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
mbox_cmd = (struct cxl_mbox_cmd) {
|
|
|
|
|
.opcode = CXL_MBOX_OP_SET_SHUTDOWN_STATE,
|
|
|
|
|
.size_in = sizeof(in),
|
|
|
|
|
.payload_in = &in,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
|
|
|
|
}
|
2025-02-20 14:02:33 -08:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL");
|
2025-01-24 15:35:33 -08:00
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
int cxl_set_timestamp(struct cxl_memdev_state *mds)
|
2023-01-30 15:13:27 +00:00
|
|
|
|
{
|
2024-09-05 15:35:47 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
2023-01-30 15:13:27 +00:00
|
|
|
|
struct cxl_mbox_cmd mbox_cmd;
|
|
|
|
|
struct cxl_mbox_set_timestamp_in pi;
|
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
pi.timestamp = cpu_to_le64(ktime_get_real_ns());
|
|
|
|
|
mbox_cmd = (struct cxl_mbox_cmd) {
|
|
|
|
|
.opcode = CXL_MBOX_OP_SET_TIMESTAMP,
|
|
|
|
|
.size_in = sizeof(pi),
|
|
|
|
|
.payload_in = &pi,
|
|
|
|
|
};
|
|
|
|
|
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2023-01-30 15:13:27 +00:00
|
|
|
|
/*
|
|
|
|
|
* Command is optional. Devices may have another way of providing
|
|
|
|
|
* a timestamp, or may return all 0s in timestamp fields.
|
|
|
|
|
* Don't report an error if this command isn't supported
|
|
|
|
|
*/
|
|
|
|
|
if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, "CXL");
|
2023-01-30 15:13:27 +00:00
|
|
|
|
|
2023-04-18 10:39:04 -07:00
|
|
|
|
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
|
|
|
|
|
struct cxl_region *cxlr)
|
|
|
|
|
{
|
2023-06-14 18:30:02 -07:00
|
|
|
|
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
|
2024-09-05 15:35:47 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
|
2023-04-18 10:39:04 -07:00
|
|
|
|
struct cxl_mbox_poison_out *po;
|
|
|
|
|
struct cxl_mbox_poison_in pi;
|
|
|
|
|
int nr_records = 0;
|
|
|
|
|
int rc;
|
|
|
|
|
|
2025-07-11 16:49:26 -07:00
|
|
|
|
ACQUIRE(mutex_intr, lock)(&mds->poison.mutex);
|
|
|
|
|
if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
|
2023-04-18 10:39:04 -07:00
|
|
|
|
return rc;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
po = mds->poison.list_out;
|
2023-04-18 10:39:04 -07:00
|
|
|
|
pi.offset = cpu_to_le64(offset);
|
|
|
|
|
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
|
|
|
|
|
|
|
|
|
|
do {
|
2024-04-05 15:00:16 -07:00
|
|
|
|
struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
|
|
|
|
|
.opcode = CXL_MBOX_OP_GET_POISON,
|
|
|
|
|
.size_in = sizeof(pi),
|
|
|
|
|
.payload_in = &pi,
|
2024-09-05 15:35:46 -07:00
|
|
|
|
.size_out = cxl_mbox->payload_size,
|
2024-04-05 15:00:16 -07:00
|
|
|
|
.payload_out = po,
|
|
|
|
|
.min_out = struct_size(po, record, 0),
|
|
|
|
|
};
|
|
|
|
|
|
2024-09-05 15:35:47 -07:00
|
|
|
|
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
|
2023-04-18 10:39:04 -07:00
|
|
|
|
if (rc)
|
|
|
|
|
break;
|
|
|
|
|
|
2023-04-18 10:39:05 -07:00
|
|
|
|
for (int i = 0; i < le16_to_cpu(po->count); i++)
|
|
|
|
|
trace_cxl_poison(cxlmd, cxlr, &po->record[i],
|
|
|
|
|
po->flags, po->overflow_ts,
|
|
|
|
|
CXL_POISON_TRACE_LIST);
|
2023-04-18 10:39:04 -07:00
|
|
|
|
|
|
|
|
|
/* Protect against an uncleared _FLAG_MORE */
|
|
|
|
|
nr_records = nr_records + le16_to_cpu(po->count);
|
2023-06-14 18:30:02 -07:00
|
|
|
|
if (nr_records >= mds->poison.max_errors) {
|
2023-04-18 10:39:04 -07:00
|
|
|
|
dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
|
|
|
|
|
nr_records);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} while (po->flags & CXL_POISON_FLAG_MORE);
|
|
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
|
2023-04-18 10:39:04 -07:00
|
|
|
|
|
2023-04-18 10:39:03 -07:00
|
|
|
|
static void free_poison_buf(void *buf)
|
|
|
|
|
{
|
|
|
|
|
kvfree(buf);
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
/* Get Poison List output buffer is protected by mds->poison.lock */
|
|
|
|
|
static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
|
2023-04-18 10:39:03 -07:00
|
|
|
|
{
|
2024-09-05 15:35:46 -07:00
|
|
|
|
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
|
|
|
|
|
|
|
|
|
|
mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
|
2023-06-14 18:30:02 -07:00
|
|
|
|
if (!mds->poison.list_out)
|
2023-04-18 10:39:03 -07:00
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
|
|
|
|
|
mds->poison.list_out);
|
2023-04-18 10:39:03 -07:00
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
int cxl_poison_state_init(struct cxl_memdev_state *mds)
|
2023-04-18 10:39:03 -07:00
|
|
|
|
{
|
|
|
|
|
int rc;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
|
2023-04-18 10:39:03 -07:00
|
|
|
|
return 0;
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
rc = cxl_poison_alloc_buf(mds);
|
2023-04-18 10:39:03 -07:00
|
|
|
|
if (rc) {
|
2023-06-14 18:30:02 -07:00
|
|
|
|
clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds);
|
2023-04-18 10:39:03 -07:00
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
2025-07-11 16:49:26 -07:00
|
|
|
|
mutex_init(&mds->poison.mutex);
|
2023-04-18 10:39:03 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
|
2023-04-18 10:39:03 -07:00
|
|
|
|
|
2024-09-05 15:35:46 -07:00
|
|
|
|
int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
|
|
|
|
|
{
|
|
|
|
|
if (!cxl_mbox || !host)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
cxl_mbox->host = host;
|
|
|
|
|
mutex_init(&cxl_mbox->mbox_mutex);
|
|
|
|
|
rcuwait_init(&cxl_mbox->mbox_wait);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
|
2024-09-05 15:35:46 -07:00
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
|
2021-09-08 22:12:32 -07:00
|
|
|
|
{
|
2023-06-14 18:30:02 -07:00
|
|
|
|
struct cxl_memdev_state *mds;
|
2025-02-26 09:21:21 -07:00
|
|
|
|
int rc;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
|
|
|
|
|
if (!mds) {
|
2021-09-08 22:12:32 -07:00
|
|
|
|
dev_err(dev, "No memory available\n");
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
mutex_init(&mds->event.log_lock);
|
|
|
|
|
mds->cxlds.dev = dev;
|
2023-10-18 19:16:59 +02:00
|
|
|
|
mds->cxlds.reg_map.host = dev;
|
2025-02-04 15:03:02 -07:00
|
|
|
|
mds->cxlds.cxl_mbox.host = dev;
|
2023-10-18 19:16:59 +02:00
|
|
|
|
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
|
2023-06-14 18:30:07 -07:00
|
|
|
|
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
2025-02-26 09:21:21 -07:00
|
|
|
|
rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier);
|
2025-02-27 18:18:48 +08:00
|
|
|
|
if (rc == -EOPNOTSUPP)
|
|
|
|
|
dev_warn(dev, "CXL MCE unsupported\n");
|
|
|
|
|
else if (rc)
|
2025-02-26 09:21:21 -07:00
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
|
2023-06-14 18:30:02 -07:00
|
|
|
|
return mds;
|
2021-09-08 22:12:32 -07:00
|
|
|
|
}
|
module: Convert symbol namespace to string literal
Clean up the existing export namespace code along the same lines of
commit 33def8498fdd ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-12-02 15:59:47 +01:00
|
|
|
|
EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL");
|
2021-09-08 22:12:32 -07:00
|
|
|
|
|
|
|
|
|
void __init cxl_mbox_init(void)
|
|
|
|
|
{
|
|
|
|
|
struct dentry *mbox_debugfs;
|
|
|
|
|
|
2022-07-10 09:57:28 -07:00
|
|
|
|
mbox_debugfs = cxl_debugfs_create_dir("mbox");
|
2021-09-08 22:12:32 -07:00
|
|
|
|
debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
|
|
|
|
|
&cxl_raw_allow_all);
|
|
|
|
|
}
|