mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

A "DPA invalidation event" is any scenario where the contents of a DPA (Device Physical Address) is modified in a way that is incoherent with CPU caches, or if the HPA (Host Physical Address) to DPA association changes due to a remapping event. PMEM security events like Unlock and Passphrase Secure Erase already manage caches through LIBNVDIMM, so that leaves HPA to DPA remap events that need cache management by the CXL core. Those only happen when the boot time CXL configuration has changed. That event occurs when userspace attaches an endpoint decoder to a region configuration, and that region is subsequently activated. The implications of not invalidating caches between remap events is that reads from the region at different points in time may return different results due to stale cached data from the previous HPA to DPA mapping. Without a guarantee that the region contents after cxl_region_probe() are written before being read (a layering-violation assumption that cxl_region_probe() can not make) the CXL subsystem needs to ensure that reads that precede writes see consistent results. A CONFIG_CXL_REGION_INVALIDATION_TEST option is added to support debug and unit testing of the CXL implementation in QEMU or other environments where cpu_cache_has_invalidate_memregion() returns false. This may prove too restrictive for QEMU where the HDM decoders are emulated, but in that case the CXL subsystem needs some new mechanism / indication that the HDM decoder is emulated and not a passthrough of real hardware. Reviewed-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/166993222098.1995348.16604163596374520890.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
163 lines
5.1 KiB
C
163 lines
5.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
|
|
#include <linux/libnvdimm.h>
|
|
#include <asm/unaligned.h>
|
|
#include <linux/module.h>
|
|
#include <linux/async.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/memregion.h>
|
|
#include "cxlmem.h"
|
|
#include "cxl.h"
|
|
|
|
static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
|
|
enum nvdimm_passphrase_type ptype)
|
|
{
|
|
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
|
|
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
|
|
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
|
unsigned long security_flags = 0;
|
|
u32 sec_out;
|
|
int rc;
|
|
|
|
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SECURITY_STATE, NULL, 0,
|
|
&sec_out, sizeof(sec_out));
|
|
if (rc < 0)
|
|
return 0;
|
|
|
|
if (ptype == NVDIMM_MASTER) {
|
|
if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)
|
|
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
|
|
else
|
|
set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
|
|
if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PLIMIT)
|
|
set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
|
|
return security_flags;
|
|
}
|
|
|
|
if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
|
|
if (sec_out & CXL_PMEM_SEC_STATE_FROZEN ||
|
|
sec_out & CXL_PMEM_SEC_STATE_USER_PLIMIT)
|
|
set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
|
|
|
|
if (sec_out & CXL_PMEM_SEC_STATE_LOCKED)
|
|
set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
|
|
else
|
|
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
|
|
} else {
|
|
set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
|
|
}
|
|
|
|
return security_flags;
|
|
}
|
|
|
|
static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *old_data,
|
|
const struct nvdimm_key_data *new_data,
|
|
enum nvdimm_passphrase_type ptype)
|
|
{
|
|
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
|
|
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
|
|
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
|
struct cxl_set_pass set_pass;
|
|
int rc;
|
|
|
|
set_pass.type = ptype == NVDIMM_MASTER ?
|
|
CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER;
|
|
memcpy(set_pass.old_pass, old_data->data, NVDIMM_PASSPHRASE_LEN);
|
|
memcpy(set_pass.new_pass, new_data->data, NVDIMM_PASSPHRASE_LEN);
|
|
|
|
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_PASSPHRASE,
|
|
&set_pass, sizeof(set_pass), NULL, 0);
|
|
return rc;
|
|
}
|
|
|
|
static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *key_data,
|
|
enum nvdimm_passphrase_type ptype)
|
|
{
|
|
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
|
|
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
|
|
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
|
struct cxl_disable_pass dis_pass;
|
|
int rc;
|
|
|
|
dis_pass.type = ptype == NVDIMM_MASTER ?
|
|
CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER;
|
|
memcpy(dis_pass.pass, key_data->data, NVDIMM_PASSPHRASE_LEN);
|
|
|
|
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_DISABLE_PASSPHRASE,
|
|
&dis_pass, sizeof(dis_pass), NULL, 0);
|
|
return rc;
|
|
}
|
|
|
|
static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *key_data)
|
|
{
|
|
return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_USER);
|
|
}
|
|
|
|
static int cxl_pmem_security_disable_master(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *key_data)
|
|
{
|
|
return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_MASTER);
|
|
}
|
|
|
|
static int cxl_pmem_security_freeze(struct nvdimm *nvdimm)
|
|
{
|
|
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
|
|
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
|
|
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
|
|
|
return cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_FREEZE_SECURITY, NULL, 0, NULL, 0);
|
|
}
|
|
|
|
static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *key_data)
|
|
{
|
|
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
|
|
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
|
|
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
|
u8 pass[NVDIMM_PASSPHRASE_LEN];
|
|
int rc;
|
|
|
|
memcpy(pass, key_data->data, NVDIMM_PASSPHRASE_LEN);
|
|
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_UNLOCK,
|
|
pass, NVDIMM_PASSPHRASE_LEN, NULL, 0);
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *key,
|
|
enum nvdimm_passphrase_type ptype)
|
|
{
|
|
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
|
|
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
|
|
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
|
struct cxl_pass_erase erase;
|
|
int rc;
|
|
|
|
erase.type = ptype == NVDIMM_MASTER ?
|
|
CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER;
|
|
memcpy(erase.pass, key->data, NVDIMM_PASSPHRASE_LEN);
|
|
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE,
|
|
&erase, sizeof(erase), NULL, 0);
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct nvdimm_security_ops __cxl_security_ops = {
|
|
.get_flags = cxl_pmem_get_security_flags,
|
|
.change_key = cxl_pmem_security_change_key,
|
|
.disable = cxl_pmem_security_disable,
|
|
.freeze = cxl_pmem_security_freeze,
|
|
.unlock = cxl_pmem_security_unlock,
|
|
.erase = cxl_pmem_security_passphrase_erase,
|
|
.disable_master = cxl_pmem_security_disable_master,
|
|
};
|
|
|
|
const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops;
|