Commit dd72945c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull cxl updates from Dan Williams:
 "More preparation and plumbing work in the CXL subsystem.

  From an end user perspective the highlight here is lighting up the CXL
  Persistent Memory related commands (label read / write) with the
  generic ioctl() front-end in LIBNVDIMM.

  Otherwise, the ability to instantiate new persistent and volatile
  memory regions is still on track for v5.17.

  Summary:

   - Fix support for platforms that do not enumerate every ACPI0016 (CXL
     Host Bridge) in the CHBS (ACPI Host Bridge Structure).

   - Introduce a common pci_find_dvsec_capability() helper, clean up
     open coded implementations in various drivers.

   - Add 'cxl_test' for regression testing CXL subsystem ABIs.
     'cxl_test' is a module built from tools/testing/cxl/ that mocks up
     a CXL topology to augment the nascent support for emulation of CXL
     devices in QEMU.

   - Convert libnvdimm to use the uuid API.

   - Complete the definition of CXL namespace labels in libnvdimm.

   - Tunnel libnvdimm label operations from nd_ioctl() back to the CXL
     mailbox driver. Enable 'ndctl {read,write}-labels' for CXL.

   - Continue to sort and refactor functionality into distinct driver
     and core-infrastructure buckets. For example, mailbox handling is
     now a generic core capability consumed by the PCI and cxl_test
     drivers"

* tag 'cxl-for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (34 commits)
  ocxl: Use pci core's DVSEC functionality
  cxl/pci: Use pci core's DVSEC functionality
  PCI: Add pci_find_dvsec_capability to find designated VSEC
  cxl/pci: Split cxl_pci_setup_regs()
  cxl/pci: Add @base to cxl_register_map
  cxl/pci: Make more use of cxl_register_map
  cxl/pci: Remove pci request/release regions
  cxl/pci: Fix NULL vs ERR_PTR confusion
  cxl/pci: Remove dev_dbg for unknown register blocks
  cxl/pci: Convert register block identifiers to an enum
  cxl/acpi: Do not fail cxl_acpi_probe() based on a missing CHBS
  cxl/pci: Disambiguate cxl_pci further from cxl_mem
  Documentation/cxl: Add bus internal docs
  cxl/core: Split decoder setup into alloc + add
  tools/testing/cxl: Introduce a mock memory device + driver
  cxl/mbox: Move command definitions to common location
  cxl/bus: Populate the target list at decoder create
  tools/testing/cxl: Introduce a mocked-up CXL port hierarchy
  cxl/pmem: Add support for multiple nvdimm-bridge objects
  cxl/pmem: Translate NVDIMM label commands to CXL label commands
  ...
parents dab334c9 c6d7e134
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -39,12 +39,18 @@ CXL Core
.. kernel-doc:: drivers/cxl/core/bus.c
   :doc: cxl core

.. kernel-doc:: drivers/cxl/core/bus.c
   :identifiers:

.. kernel-doc:: drivers/cxl/core/pmem.c
   :doc: cxl pmem

.. kernel-doc:: drivers/cxl/core/regs.c
   :doc: cxl registers

.. kernel-doc:: drivers/cxl/core/mbox.c
   :doc: cxl mbox

External Interfaces
===================

+2 −1
Original line number Diff line number Diff line
@@ -107,7 +107,8 @@ static int get_max_afu_index(struct pci_dev *dev, int *afu_idx)
	int pos;
	u32 val;

	pos = find_dvsec_from_pos(dev, OCXL_DVSEC_FUNC_ID, 0);
	pos = pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM,
					OCXL_DVSEC_FUNC_ID);
	if (!pos)
		return -ESRCH;

+97 −42
Original line number Diff line number Diff line
@@ -52,6 +52,12 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
		return -EINVAL;
	}

	if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
		dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
			CFMWS_INTERLEAVE_WAYS(cfmws));
		return -EINVAL;
	}

	expected_len = struct_size((cfmws), interleave_targets,
				   CFMWS_INTERLEAVE_WAYS(cfmws));

@@ -71,11 +77,11 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
static void cxl_add_cfmws_decoders(struct device *dev,
				   struct cxl_port *root_port)
{
	int target_map[CXL_DECODER_MAX_INTERLEAVE];
	struct acpi_cedt_cfmws *cfmws;
	struct cxl_decoder *cxld;
	acpi_size len, cur = 0;
	void *cedt_subtable;
	unsigned long flags;
	int rc;

	len = acpi_cedt->length - sizeof(*acpi_cedt);
@@ -83,6 +89,7 @@ static void cxl_add_cfmws_decoders(struct device *dev,

	while (cur < len) {
		struct acpi_cedt_header *c = cedt_subtable + cur;
		int i;

		if (c->type != ACPI_CEDT_TYPE_CFMWS) {
			cur += c->length;
@@ -108,24 +115,39 @@ static void cxl_add_cfmws_decoders(struct device *dev,
			continue;
		}

		flags = cfmws_to_decoder_flags(cfmws->restrictions);
		cxld = devm_cxl_add_decoder(dev, root_port,
					    CFMWS_INTERLEAVE_WAYS(cfmws),
					    cfmws->base_hpa, cfmws->window_size,
					    CFMWS_INTERLEAVE_WAYS(cfmws),
					    CFMWS_INTERLEAVE_GRANULARITY(cfmws),
					    CXL_DECODER_EXPANDER,
					    flags);
		for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
			target_map[i] = cfmws->interleave_targets[i];

		cxld = cxl_decoder_alloc(root_port,
					 CFMWS_INTERLEAVE_WAYS(cfmws));
		if (IS_ERR(cxld))
			goto next;

		if (IS_ERR(cxld)) {
		cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
		cxld->target_type = CXL_DECODER_EXPANDER;
		cxld->range = (struct range) {
			.start = cfmws->base_hpa,
			.end = cfmws->base_hpa + cfmws->window_size - 1,
		};
		cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
		cxld->interleave_granularity =
			CFMWS_INTERLEAVE_GRANULARITY(cfmws);

		rc = cxl_decoder_add(cxld, target_map);
		if (rc)
			put_device(&cxld->dev);
		else
			rc = cxl_decoder_autoremove(dev, cxld);
		if (rc) {
			dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
				cfmws->base_hpa, cfmws->base_hpa +
				cfmws->window_size - 1);
		} else {
			goto next;
		}
		dev_dbg(dev, "add: %s range %#llx-%#llx\n",
			dev_name(&cxld->dev), cfmws->base_hpa,
			cfmws->base_hpa + cfmws->window_size - 1);
		}
next:
		cur += c->length;
	}
}
@@ -182,15 +204,7 @@ static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
	return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
}

struct cxl_walk_context {
	struct device *dev;
	struct pci_bus *root;
	struct cxl_port *port;
	int error;
	int count;
};

static int match_add_root_ports(struct pci_dev *pdev, void *data)
__mock int match_add_root_ports(struct pci_dev *pdev, void *data)
{
	struct cxl_walk_context *ctx = data;
	struct pci_bus *root_bus = ctx->root;
@@ -239,7 +253,8 @@ static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device
	return NULL;
}

static struct acpi_device *to_cxl_host_bridge(struct device *dev)
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
					      struct device *dev)
{
	struct acpi_device *adev = to_acpi_device(dev);

@@ -257,11 +272,12 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev)
 */
static int add_host_bridge_uport(struct device *match, void *arg)
{
	struct acpi_device *bridge = to_cxl_host_bridge(match);
	struct cxl_port *root_port = arg;
	struct device *host = root_port->dev.parent;
	struct acpi_device *bridge = to_cxl_host_bridge(host, match);
	struct acpi_pci_root *pci_root;
	struct cxl_walk_context ctx;
	int single_port_map[1], rc;
	struct cxl_decoder *cxld;
	struct cxl_dport *dport;
	struct cxl_port *port;
@@ -272,7 +288,7 @@ static int add_host_bridge_uport(struct device *match, void *arg)
	dport = find_dport_by_dev(root_port, match);
	if (!dport) {
		dev_dbg(host, "host bridge expected and not found\n");
		return -ENODEV;
		return 0;
	}

	port = devm_cxl_add_port(host, match, dport->component_reg_phys,
@@ -297,22 +313,46 @@ static int add_host_bridge_uport(struct device *match, void *arg)
		return -ENODEV;
	if (ctx.error)
		return ctx.error;
	if (ctx.count > 1)
		return 0;

	/* TODO: Scan CHBCR for HDM Decoder resources */

	/*
	 * In the single-port host-bridge case there are no HDM decoders
	 * in the CHBCR and a 1:1 passthrough decode is implied.
	 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability
	 * Structure) single ported host-bridges need not publish a decoder
	 * capability when a passthrough decode can be assumed, i.e. all
	 * transactions that the uport sees are claimed and passed to the single
	 * dport. Disable the range until the first CXL region is enumerated /
	 * activated.
	 */
	if (ctx.count == 1) {
		cxld = devm_cxl_add_passthrough_decoder(host, port);
	cxld = cxl_decoder_alloc(port, 1);
	if (IS_ERR(cxld))
		return PTR_ERR(cxld);

		dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
	}
	cxld->interleave_ways = 1;
	cxld->interleave_granularity = PAGE_SIZE;
	cxld->target_type = CXL_DECODER_EXPANDER;
	cxld->range = (struct range) {
		.start = 0,
		.end = -1,
	};

	return 0;
	device_lock(&port->dev);
	dport = list_first_entry(&port->dports, typeof(*dport), list);
	device_unlock(&port->dev);

	single_port_map[0] = dport->port_id;

	rc = cxl_decoder_add(cxld, single_port_map);
	if (rc)
		put_device(&cxld->dev);
	else
		rc = cxl_decoder_autoremove(host, cxld);

	if (rc == 0)
		dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
	return rc;
}

static int add_host_bridge_dport(struct device *match, void *arg)
@@ -323,7 +363,7 @@ static int add_host_bridge_dport(struct device *match, void *arg)
	struct acpi_cedt_chbs *chbs;
	struct cxl_port *root_port = arg;
	struct device *host = root_port->dev.parent;
	struct acpi_device *bridge = to_cxl_host_bridge(match);
	struct acpi_device *bridge = to_cxl_host_bridge(host, match);

	if (!bridge)
		return 0;
@@ -337,9 +377,11 @@ static int add_host_bridge_dport(struct device *match, void *arg)
	}

	chbs = cxl_acpi_match_chbs(host, uid);
	if (IS_ERR(chbs))
		dev_dbg(host, "No CHBS found for Host Bridge: %s\n",
	if (IS_ERR(chbs)) {
		dev_warn(host, "No CHBS found for Host Bridge: %s\n",
			 dev_name(match));
		return 0;
	}

	rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
	if (rc) {
@@ -375,6 +417,17 @@ static int add_root_nvdimm_bridge(struct device *match, void *data)
	return 1;
}

static u32 cedt_instance(struct platform_device *pdev)
{
	const bool *native_acpi0017 = acpi_device_get_match_data(&pdev->dev);

	if (native_acpi0017 && *native_acpi0017)
		return 0;

	/* for cxl_test request a non-canonical instance */
	return U32_MAX;
}

static int cxl_acpi_probe(struct platform_device *pdev)
{
	int rc;
@@ -388,7 +441,7 @@ static int cxl_acpi_probe(struct platform_device *pdev)
		return PTR_ERR(root_port);
	dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));

	status = acpi_get_table(ACPI_SIG_CEDT, 0, &acpi_cedt);
	status = acpi_get_table(ACPI_SIG_CEDT, cedt_instance(pdev), &acpi_cedt);
	if (ACPI_FAILURE(status))
		return -ENXIO;

@@ -419,9 +472,11 @@ static int cxl_acpi_probe(struct platform_device *pdev)
	return 0;
}

static bool native_acpi0017 = true;

static const struct acpi_device_id cxl_acpi_ids[] = {
	{ "ACPI0017", 0 },
	{ "", 0 },
	{ "ACPI0017", (unsigned long) &native_acpi0017 },
	{ },
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);

+1 −0
Original line number Diff line number Diff line
@@ -6,3 +6,4 @@ cxl_core-y := bus.o
cxl_core-y += pmem.o
cxl_core-y += regs.o
cxl_core-y += memdev.o
cxl_core-y += mbox.o
+68 −51
Original line number Diff line number Diff line
@@ -453,50 +453,57 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
}
EXPORT_SYMBOL_GPL(cxl_add_dport);

static struct cxl_decoder *
cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
		  resource_size_t len, int interleave_ways,
		  int interleave_granularity, enum cxl_decoder_type type,
		  unsigned long flags)
static int decoder_populate_targets(struct cxl_decoder *cxld,
				    struct cxl_port *port, int *target_map)
{
	struct cxl_decoder *cxld;
	struct device *dev;
	int rc = 0;
	int rc = 0, i;

	if (interleave_ways < 1)
		return ERR_PTR(-EINVAL);
	if (!target_map)
		return 0;

	device_lock(&port->dev);
	if (list_empty(&port->dports))
	if (list_empty(&port->dports)) {
		rc = -EINVAL;
		goto out_unlock;
	}

	for (i = 0; i < cxld->nr_targets; i++) {
		struct cxl_dport *dport = find_dport(port, target_map[i]);

		if (!dport) {
			rc = -ENXIO;
			goto out_unlock;
		}
		cxld->target[i] = dport;
	}

out_unlock:
	device_unlock(&port->dev);
	if (rc)
		return ERR_PTR(rc);

	return rc;
}

struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
{
	struct cxl_decoder *cxld, cxld_const_init = {
		.nr_targets = nr_targets,
	};
	struct device *dev;
	int rc = 0;

	if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets < 1)
		return ERR_PTR(-EINVAL);

	cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
	if (!cxld)
		return ERR_PTR(-ENOMEM);
	memcpy(cxld, &cxld_const_init, sizeof(cxld_const_init));

	rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
	if (rc < 0)
		goto err;

	*cxld = (struct cxl_decoder) {
		.id = rc,
		.range = {
			.start = base,
			.end = base + len - 1,
		},
		.flags = flags,
		.interleave_ways = interleave_ways,
		.interleave_granularity = interleave_granularity,
		.target_type = type,
	};

	/* handle implied target_list */
	if (interleave_ways == 1)
		cxld->target[0] =
			list_first_entry(&port->dports, struct cxl_dport, list);
	cxld->id = rc;
	dev = &cxld->dev;
	device_initialize(dev);
	device_set_pm_not_required(dev);
@@ -514,41 +521,47 @@ cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
	kfree(cxld);
	return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_decoder_alloc);

struct cxl_decoder *
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
		     resource_size_t base, resource_size_t len,
		     int interleave_ways, int interleave_granularity,
		     enum cxl_decoder_type type, unsigned long flags)
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{
	struct cxl_decoder *cxld;
	struct cxl_port *port;
	struct device *dev;
	int rc;

	cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
				 interleave_granularity, type, flags);
	if (IS_ERR(cxld))
		return cxld;
	if (WARN_ON_ONCE(!cxld))
		return -EINVAL;

	if (WARN_ON_ONCE(IS_ERR(cxld)))
		return PTR_ERR(cxld);

	if (cxld->interleave_ways < 1)
		return -EINVAL;

	port = to_cxl_port(cxld->dev.parent);
	rc = decoder_populate_targets(cxld, port, target_map);
	if (rc)
		return rc;

	dev = &cxld->dev;
	rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
	if (rc)
		goto err;
		return rc;

	rc = device_add(dev);
	if (rc)
		goto err;
	return device_add(dev);
}
EXPORT_SYMBOL_GPL(cxl_decoder_add);

	rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
	if (rc)
		return ERR_PTR(rc);
	return cxld;
static void cxld_unregister(void *dev)
{
	device_unregister(dev);
}

err:
	put_device(dev);
	return ERR_PTR(rc);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
{
	return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
}
EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
EXPORT_SYMBOL_GPL(cxl_decoder_autoremove);

/**
 * __cxl_driver_register - register a driver for the cxl bus
@@ -635,6 +648,8 @@ static __init int cxl_core_init(void)
{
	int rc;

	cxl_mbox_init();

	rc = cxl_memdev_init();
	if (rc)
		return rc;
@@ -646,6 +661,7 @@ static __init int cxl_core_init(void)

err:
	cxl_memdev_exit();
	cxl_mbox_exit();
	return rc;
}

@@ -653,6 +669,7 @@ static void cxl_core_exit(void)
{
	bus_unregister(&cxl_bus_type);
	cxl_memdev_exit();
	cxl_mbox_exit();
}

module_init(cxl_core_init);
Loading