Commit 95dddcb5 authored by Dan Williams's avatar Dan Williams
Browse files

Merge branch 'for-6.2/cxl-security' into for-6.2/cxl

Pick CXL PMEM security commands for v6.2. Resolve conflicts with the
removal of the cxl_pmem_wq.
parents da8380bb d18bc74a
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -41,3 +41,17 @@ KernelVersion: 5.18
Contact:        Kajol Jain <kjain@linux.ibm.com>
Description:	(RO) This sysfs file exposes the cpumask which is designated to
		to retrieve nvdimm pmu event counter data.

What:		/sys/bus/nd/devices/nmemX/cxl/id
Date:		November 2022
KernelVersion:	6.2
Contact:	Dave Jiang <dave.jiang@intel.com>
Description:	(RO) Show the id (serial) of the device. This is CXL specific.

What:		/sys/bus/nd/devices/nmemX/cxl/provider
Date:		November 2022
KernelVersion:	6.2
Contact:	Dave Jiang <dave.jiang@intel.com>
Description:	(RO) Shows the CXL bridge device that ties to a CXL memory device
		to this NVDIMM device. I.e. the parent of the device returned is
		a /sys/bus/cxl/devices/memX instance.
+0 −25
Original line number Diff line number Diff line
@@ -212,9 +212,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
	if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
		return -ENOTTY;

	if (!cpu_cache_has_invalidate_memregion())
		return -EINVAL;

	memcpy(nd_cmd.cmd.passphrase, key_data->data,
			sizeof(nd_cmd.cmd.passphrase));
	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -229,9 +226,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
		return -EIO;
	}

	/* DIMM unlocked, invalidate all CPU caches before we read it */
	cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);

	return 0;
}

@@ -299,11 +293,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
	if (!test_bit(cmd, &nfit_mem->dsm_mask))
		return -ENOTTY;

	if (!cpu_cache_has_invalidate_memregion())
		return -EINVAL;

	/* flush all cache before we erase DIMM */
	cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
	memcpy(nd_cmd.cmd.passphrase, key->data,
			sizeof(nd_cmd.cmd.passphrase));
	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -322,8 +311,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
		return -ENXIO;
	}

	/* DIMM erased, invalidate all CPU caches before we read it */
	cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
	return 0;
}

@@ -346,9 +333,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
	if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
		return -ENOTTY;

	if (!cpu_cache_has_invalidate_memregion())
		return -EINVAL;

	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
	if (rc < 0)
		return rc;
@@ -362,8 +346,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
		return -ENXIO;
	}

	/* flush all cache before we make the nvdimms available */
	cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
	return 0;
}

@@ -388,11 +370,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
	if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
		return -ENOTTY;

	if (!cpu_cache_has_invalidate_memregion())
		return -EINVAL;

	/* flush all cache before we erase DIMM */
	cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
	memcpy(nd_cmd.cmd.passphrase, nkey->data,
			sizeof(nd_cmd.cmd.passphrase));
	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -770,5 +747,3 @@ static const struct nvdimm_fw_ops __intel_fw_ops = {
};

const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;

MODULE_IMPORT_NS(DEVMEM);
+18 −0
Original line number Diff line number Diff line
@@ -111,4 +111,22 @@ config CXL_REGION
	select MEMREGION
	select GET_FREE_REGION

config CXL_REGION_INVALIDATION_TEST
	bool "CXL: Region Cache Management Bypass (TEST)"
	depends on CXL_REGION
	help
	  CXL Region management and security operations potentially invalidate
	  the content of CPU caches without notifiying those caches to
	  invalidate the affected cachelines. The CXL Region driver attempts
	  to invalidate caches when those events occur.  If that invalidation
	  fails the region will fail to enable.  Reasons for cache
	  invalidation failure are due to the CPU not providing a cache
	  invalidation mechanism. For example usage of wbinvd is restricted to
	  bare metal x86. However, for testing purposes toggling this option
	  can disable that data integrity safety and proceed with enabling
	  regions when there might be conflicting contents in the CPU cache.

	  If unsure, or if this kernel is meant for production environments,
	  say N.

endif
+1 −1
Original line number Diff line number Diff line
@@ -9,5 +9,5 @@ obj-$(CONFIG_CXL_PORT) += cxl_port.o
cxl_mem-y := mem.o
cxl_pci-y := pci.o
cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o
cxl_pmem-y := pmem.o security.o
cxl_port-y := port.o
+16 −0
Original line number Diff line number Diff line
@@ -65,6 +65,12 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
	CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
	CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
	CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
	CXL_CMD(GET_SECURITY_STATE, 0, 0x4, 0),
	CXL_CMD(SET_PASSPHRASE, 0x60, 0, 0),
	CXL_CMD(DISABLE_PASSPHRASE, 0x40, 0, 0),
	CXL_CMD(FREEZE_SECURITY, 0, 0, 0),
	CXL_CMD(UNLOCK, 0x20, 0, 0),
	CXL_CMD(PASSPHRASE_SECURE_ERASE, 0x40, 0, 0),
};

/*
@@ -698,6 +704,16 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
		rc = 0;
	}

	/*
	 * Setup permanently kernel exclusive commands, i.e. the
	 * mechanism is driven through sysfs, keyctl, etc...
	 */
	set_bit(CXL_MEM_COMMAND_ID_SET_PASSPHRASE, cxlds->exclusive_cmds);
	set_bit(CXL_MEM_COMMAND_ID_DISABLE_PASSPHRASE, cxlds->exclusive_cmds);
	set_bit(CXL_MEM_COMMAND_ID_UNLOCK, cxlds->exclusive_cmds);
	set_bit(CXL_MEM_COMMAND_ID_PASSPHRASE_SECURE_ERASE,
		cxlds->exclusive_cmds);

out:
	kvfree(gsl);
	return rc;
Loading