Unverified Commit 72870c57 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!10797 Intel: Backport SPR/EMR CXL and HBM support to kernel 5.10

Merge Pull Request from: @yunyingsun 
 
Content:
Some unknown uncore PMON types(like /sys/devices/uncore_type_<x>_<y>) can be found in both SPR(Sapphire Rapids, 4th Gen Xeon) and EMR(Emerald Rapids, 5th Gen Xeon) with HBM or CXL. 

The unknown PMON types are HBM and CXL PMON. Except for the name, the other information regarding the HBM and CXL PMON counters can be retrieved via the discovery table. Add them into the uncores tables for SPR and EMR.

The first 7 patches update the generic support of the discovery table. The last patch enables the HBM and CXL PMON uncore counters for SPR and EMR.

Upstream commits from mainline kernel v6.11-rc1:
f8a86a9bb5f7 perf/x86/intel/uncore: Support HBM and CXL PMON counters
15a4bd51853b perf/x86/uncore: Cleanup unused unit structure
f76a8420444b perf/x86/uncore: Apply the unit control RB tree to PCI uncore units
b1d9ea2e1ca4 perf/x86/uncore: Apply the unit control RB tree to MSR uncore units
80580dae65b9 perf/x86/uncore: Apply the unit control RB tree to MMIO uncore units
585463fee642 perf/x86/uncore: Retrieve the unit ID from the unit control RB tree
c74443d92f68 perf/x86/uncore: Support per PMU cpumask
0007f3932592 perf/x86/uncore: Save the unit control address of all units

Intel-kernel issue:
https://gitee.com/openeuler/intel-kernel/issues/IAIFSR

Test:
Before backport, on SPR or EMR that has HBM or CXL devices, there're "uncore_type_<x>_<y>" devices available under "/sys/devices/".
After backport, on SPR or EMR that has HBM or CXL devices, there're "uncore_hbm_x" and/or "uncore_{cxlcm,cxldp}_x" devices under "/sys/devices/"

Known issue:
N/A

Configs:
N/A

Note:
1. The backported patches have no deviations from the mainline upstream version.
2. The backported patches have been validated on Intel SPR/EMR servers.
3. The patch set only contains CXL/HBM perfmon support for SPR/EMR. Next Xeon Granite Rapids support is not included, and will be supported later with separated patch set. 
 
Link:https://gitee.com/openeuler/kernel/pulls/10797

 

Reviewed-by: default avatarJason Zeng <jason.zeng@intel.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
parents ed7595f1 72dd74d2
Loading
Loading
Loading
Loading
+59 −38
Original line number Diff line number Diff line
@@ -251,6 +251,9 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box,
		return;
	}

	if (intel_generic_uncore_assign_hw_event(event, box))
		return;

	hwc->config_base = uncore_event_ctl(box, hwc->idx);
	hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
}
@@ -835,7 +838,9 @@ static void uncore_pmu_disable(struct pmu *pmu)
static ssize_t uncore_get_attr_cpumask(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
	struct intel_uncore_pmu *pmu = container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu);

	return cpumap_print_to_pagebuf(true, buf, &pmu->cpu_mask);
}

static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
@@ -852,7 +857,10 @@ static const struct attribute_group uncore_pmu_attr_group = {
static inline int uncore_get_box_id(struct intel_uncore_type *type,
				    struct intel_uncore_pmu *pmu)
{
	return type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx;
	if (type->boxes)
		return intel_uncore_find_discovery_unit_id(type->boxes, -1, pmu->pmu_idx);

	return pmu->pmu_idx;
}

void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu)
@@ -953,6 +961,9 @@ static void uncore_type_exit(struct intel_uncore_type *type)
	if (type->cleanup_mapping)
		type->cleanup_mapping(type);

	if (type->cleanup_extra_boxes)
		type->cleanup_extra_boxes(type);

	if (pmu) {
		for (i = 0; i < type->num_boxes; i++, pmu++) {
			uncore_pmu_unregister(pmu);
@@ -961,10 +972,7 @@ static void uncore_type_exit(struct intel_uncore_type *type)
		kfree(type->pmus);
		type->pmus = NULL;
	}
	if (type->box_ids) {
		kfree(type->box_ids);
		type->box_ids = NULL;
	}

	kfree(type->events_group);
	type->events_group = NULL;
}
@@ -1068,22 +1076,19 @@ static struct intel_uncore_pmu *
uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev)
{
	struct intel_uncore_type **types = uncore_pci_uncores;
	struct intel_uncore_discovery_unit *unit;
	struct intel_uncore_type *type;
	u64 box_ctl;
	int i, die;
	struct rb_node *node;

	for (; *types; types++) {
		type = *types;
		for (die = 0; die < __uncore_max_dies; die++) {
			for (i = 0; i < type->num_boxes; i++) {
				if (!type->box_ctls[die])
					continue;
				box_ctl = type->box_ctls[die] + type->pci_offsets[i];
				if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(box_ctl) &&
				    pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(box_ctl) &&
				    pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl))
					return &type->pmus[i];
			}

		for (node = rb_first(type->boxes); node; node = rb_next(node)) {
			unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
			if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(unit->addr) &&
			    pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(unit->addr) &&
			    pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr))
				return &type->pmus[unit->pmu_idx];
		}
	}

@@ -1359,28 +1364,25 @@ static struct notifier_block uncore_pci_notifier = {
static void uncore_pci_pmus_register(void)
{
	struct intel_uncore_type **types = uncore_pci_uncores;
	struct intel_uncore_discovery_unit *unit;
	struct intel_uncore_type *type;
	struct intel_uncore_pmu *pmu;
	struct rb_node *node;
	struct pci_dev *pdev;
	u64 box_ctl;
	int i, die;

	for (; *types; types++) {
		type = *types;
		for (die = 0; die < __uncore_max_dies; die++) {
			for (i = 0; i < type->num_boxes; i++) {
				if (!type->box_ctls[die])
					continue;
				box_ctl = type->box_ctls[die] + type->pci_offsets[i];
				pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl),
								   UNCORE_DISCOVERY_PCI_BUS(box_ctl),
								   UNCORE_DISCOVERY_PCI_DEVFN(box_ctl));

		for (node = rb_first(type->boxes); node; node = rb_next(node)) {
			unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
			pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr),
							   UNCORE_DISCOVERY_PCI_BUS(unit->addr),
							   UNCORE_DISCOVERY_PCI_DEVFN(unit->addr));

			if (!pdev)
				continue;
				pmu = &type->pmus[i];

				uncore_pci_pmu_register(pdev, type, pmu, die);
			}
			pmu = &type->pmus[unit->pmu_idx];
			uncore_pci_pmu_register(pdev, type, pmu, unit->die);
		}
	}

@@ -1445,6 +1447,18 @@ static void uncore_pci_exit(void)
	}
}

static bool uncore_die_has_box(struct intel_uncore_type *type,
			       int die, unsigned int pmu_idx)
{
	if (!type->boxes)
		return true;

	if (intel_uncore_find_discovery_unit_id(type->boxes, die, pmu_idx) < 0)
		return false;

	return true;
}

static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
				   int new_cpu)
{
@@ -1460,18 +1474,25 @@ static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,

		if (old_cpu < 0) {
			WARN_ON_ONCE(box->cpu != -1);
			if (uncore_die_has_box(type, die, pmu->pmu_idx)) {
				box->cpu = new_cpu;
				cpumask_set_cpu(new_cpu, &pmu->cpu_mask);
			}
			continue;
		}

		WARN_ON_ONCE(box->cpu != old_cpu);
		WARN_ON_ONCE(box->cpu != -1 && box->cpu != old_cpu);
		box->cpu = -1;
		cpumask_clear_cpu(old_cpu, &pmu->cpu_mask);
		if (new_cpu < 0)
			continue;

		if (!uncore_die_has_box(type, die, pmu->pmu_idx))
			continue;
		uncore_pmu_cancel_hrtimer(box);
		perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
		box->cpu = new_cpu;
		cpumask_set_cpu(new_cpu, &pmu->cpu_mask);
	}
}

@@ -1494,7 +1515,7 @@ static void uncore_box_unref(struct intel_uncore_type **types, int id)
		pmu = type->pmus;
		for (i = 0; i < type->num_boxes; i++, pmu++) {
			box = pmu->boxes[id];
			if (box && atomic_dec_return(&box->refcnt) == 0)
			if (box && box->cpu >= 0 && atomic_dec_return(&box->refcnt) == 0)
				uncore_box_exit(box);
		}
	}
@@ -1584,7 +1605,7 @@ static int uncore_box_ref(struct intel_uncore_type **types,
		pmu = type->pmus;
		for (i = 0; i < type->num_boxes; i++, pmu++) {
			box = pmu->boxes[id];
			if (box && atomic_inc_return(&box->refcnt) == 1)
			if (box && box->cpu >= 0 && atomic_inc_return(&box->refcnt) == 1)
				uncore_box_init(box);
		}
	}
+6 −2
Original line number Diff line number Diff line
@@ -60,7 +60,6 @@ struct intel_uncore_type {
	unsigned fixed_ctr;
	unsigned fixed_ctl;
	unsigned box_ctl;
	u64 *box_ctls;	/* Unit ctrl addr of the first box of each die */
	union {
		unsigned msr_offset;
		unsigned mmio_offset;
@@ -74,7 +73,6 @@ struct intel_uncore_type {
		u64 *pci_offsets;
		u64 *mmio_offsets;
	};
	unsigned *box_ids;
	struct event_constraint unconstrainted;
	struct event_constraint *constraints;
	struct intel_uncore_pmu *pmus;
@@ -84,6 +82,7 @@ struct intel_uncore_type {
	const struct attribute_group *attr_groups[4];
	const struct attribute_group **attr_update;
	struct pmu *pmu; /* for custom pmu ops */
	struct rb_root *boxes;
	/*
	 * Uncore PMU would store relevant platform topology configuration here
	 * to identify which platform component each PMON block of that type is
@@ -96,6 +95,10 @@ struct intel_uncore_type {
	int (*get_topology)(struct intel_uncore_type *type);
	int (*set_mapping)(struct intel_uncore_type *type);
	void (*cleanup_mapping)(struct intel_uncore_type *type);
	/*
	 * Optional callbacks for extra uncore units cleanup
	 */
	void (*cleanup_extra_boxes)(struct intel_uncore_type *type);
};

#define pmu_group attr_groups[0]
@@ -123,6 +126,7 @@ struct intel_uncore_pmu {
	int				func_id;
	bool				registered;
	atomic_t			activeboxes;
	cpumask_t			cpu_mask;
	struct intel_uncore_type	*type;
	struct intel_uncore_box		**boxes;
};
+200 −106
Original line number Diff line number Diff line
@@ -89,9 +89,7 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit)
	if (!type)
		return NULL;

	type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
	if (!type->box_ctrl_die)
		goto free_type;
	type->units = RB_ROOT;

	type->access_type = unit->access_type;
	num_discovered_types[type->access_type]++;
@@ -100,12 +98,6 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit)
	rb_add(&type->node, &discovery_tables, __type_less);

	return type;

free_type:
	kfree(type);

	return NULL;

}

static struct intel_uncore_discovery_type *
@@ -120,14 +112,118 @@ get_uncore_discovery_type(struct uncore_unit_discovery *unit)
	return add_uncore_discovery_type(unit);
}

static inline int pmu_idx_cmp(const void *key, const struct rb_node *b)
{
	struct intel_uncore_discovery_unit *unit;
	const unsigned int *id = key;

	unit = rb_entry(b, struct intel_uncore_discovery_unit, node);

	if (unit->pmu_idx > *id)
		return -1;
	else if (unit->pmu_idx < *id)
		return 1;

	return 0;
}

static struct intel_uncore_discovery_unit *
intel_uncore_find_discovery_unit(struct rb_root *units, int die,
				 unsigned int pmu_idx)
{
	struct intel_uncore_discovery_unit *unit;
	struct rb_node *pos;

	if (!units)
		return NULL;

	pos = rb_find_first(&pmu_idx, units, pmu_idx_cmp);
	if (!pos)
		return NULL;
	unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);

	if (die < 0)
		return unit;

	for (; pos; pos = rb_next(pos)) {
		unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);

		if (unit->pmu_idx != pmu_idx)
			break;

		if (unit->die == die)
			return unit;
	}

	return NULL;
}

int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die,
					unsigned int pmu_idx)
{
	struct intel_uncore_discovery_unit *unit;

	unit = intel_uncore_find_discovery_unit(units, die, pmu_idx);
	if (unit)
		return unit->id;

	return -1;
}

static inline bool unit_less(struct rb_node *a, const struct rb_node *b)
{
	struct intel_uncore_discovery_unit *a_node, *b_node;

	a_node = rb_entry(a, struct intel_uncore_discovery_unit, node);
	b_node = rb_entry(b, struct intel_uncore_discovery_unit, node);

	if (a_node->pmu_idx < b_node->pmu_idx)
		return true;
	if (a_node->pmu_idx > b_node->pmu_idx)
		return false;

	if (a_node->die < b_node->die)
		return true;
	if (a_node->die > b_node->die)
		return false;

	return 0;
}

static inline struct intel_uncore_discovery_unit *
uncore_find_unit(struct rb_root *root, unsigned int id)
{
	struct intel_uncore_discovery_unit *unit;
	struct rb_node *node;

	for (node = rb_first(root); node; node = rb_next(node)) {
		unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
		if (unit->id == id)
			return unit;
	}

	return NULL;
}

void uncore_find_add_unit(struct intel_uncore_discovery_unit *node,
			  struct rb_root *root, u16 *num_units)
{
	struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id);

	if (unit)
		node->pmu_idx = unit->pmu_idx;
	else if (num_units)
		node->pmu_idx = (*num_units)++;

	rb_add(&node->node, root, unit_less);
}

static void
uncore_insert_box_info(struct uncore_unit_discovery *unit,
		       int die, bool parsed)
		       int die)
{
	struct intel_uncore_discovery_unit *node;
	struct intel_uncore_discovery_type *type;
	unsigned int *ids;
	u64 *box_offset;
	int i;

	if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) {
		pr_info("Invalid address is detected for uncore type %d box %d, "
@@ -136,72 +232,30 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit,
		return;
	}

	if (parsed) {
		type = search_uncore_discovery_type(unit->box_type);
		if (!type) {
			pr_info("A spurious uncore type %d is detected, "
				"Disable the uncore type.\n",
				unit->box_type);
			return;
		}
		/* Store the first box of each die */
		if (!type->box_ctrl_die[die])
			type->box_ctrl_die[die] = unit->ctl;
	node = kzalloc(sizeof(*node), GFP_KERNEL);
	if (!node)
		return;
	}

	type = get_uncore_discovery_type(unit);
	if (!type)
		return;
	node->die = die;
	node->id = unit->box_id;
	node->addr = unit->ctl;

	box_offset = kcalloc(type->num_boxes + 1, sizeof(u64), GFP_KERNEL);
	if (!box_offset)
	type = get_uncore_discovery_type(unit);
	if (!type) {
		kfree(node);
		return;
	}

	ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
	if (!ids)
		goto free_box_offset;
	uncore_find_add_unit(node, &type->units, &type->num_units);

	/* Store generic information for the first box */
	if (!type->num_boxes) {
		type->box_ctrl = unit->ctl;
		type->box_ctrl_die[die] = unit->ctl;
	if (type->num_units == 1) {
		type->num_counters = unit->num_regs;
		type->counter_width = unit->bit_width;
		type->ctl_offset = unit->ctl_offset;
		type->ctr_offset = unit->ctr_offset;
		*ids = unit->box_id;
		goto end;
	}

	for (i = 0; i < type->num_boxes; i++) {
		ids[i] = type->ids[i];
		box_offset[i] = type->box_offset[i];

		if (unit->box_id == ids[i]) {
			pr_info("Duplicate uncore type %d box ID %d is detected, "
				"Drop the duplicate uncore unit.\n",
				unit->box_type, unit->box_id);
			goto free_ids;
	}
}
	ids[i] = unit->box_id;
	box_offset[i] = unit->ctl - type->box_ctrl;
	kfree(type->ids);
	kfree(type->box_offset);
end:
	type->ids = ids;
	type->box_offset = box_offset;
	type->num_boxes++;
	return;

free_ids:
	kfree(ids);

free_box_offset:
	kfree(box_offset);

}

static bool
uncore_ignore_unit(struct uncore_unit_discovery *unit, int *ignore)
@@ -271,7 +325,7 @@ static int parse_discovery_table(struct pci_dev *dev, int die,
		if (uncore_ignore_unit(&unit, ignore))
			continue;

		uncore_insert_box_info(&unit, die, *parsed);
		uncore_insert_box_info(&unit, die);
	}

	*parsed = true;
@@ -331,9 +385,16 @@ bool intel_uncore_has_discovery_tables(int *ignore)
void intel_uncore_clear_discovery_tables(void)
{
	struct intel_uncore_discovery_type *type, *next;
	struct intel_uncore_discovery_unit *pos;
	struct rb_node *node;

	rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
		kfree(type->box_ctrl_die);
		while (!RB_EMPTY_ROOT(&type->units)) {
			node = rb_first(&type->units);
			pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
			rb_erase(node, &type->units);
			kfree(pos);
		}
		kfree(type);
	}
}
@@ -358,19 +419,31 @@ static const struct attribute_group generic_uncore_format_group = {
	.attrs = generic_uncore_formats_attr,
};

static u64 intel_generic_uncore_box_ctl(struct intel_uncore_box *box)
{
	struct intel_uncore_discovery_unit *unit;

	unit = intel_uncore_find_discovery_unit(box->pmu->type->boxes,
						-1, box->pmu->pmu_idx);
	if (WARN_ON_ONCE(!unit))
		return 0;

	return unit->addr;
}

void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
{
	wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
	wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
}

void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
{
	wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
	wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
}

void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
{
	wrmsrl(uncore_msr_box_ctl(box), 0);
	wrmsrl(intel_generic_uncore_box_ctl(box), 0);
}

static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
@@ -398,10 +471,47 @@ static struct intel_uncore_ops generic_uncore_msr_ops = {
	.read_counter		= uncore_msr_read_counter,
};

bool intel_generic_uncore_assign_hw_event(struct perf_event *event,
					  struct intel_uncore_box *box)
{
	struct hw_perf_event *hwc = &event->hw;
	u64 box_ctl;

	if (!box->pmu->type->boxes)
		return false;

	if (box->io_addr) {
		hwc->config_base = uncore_pci_event_ctl(box, hwc->idx);
		hwc->event_base  = uncore_pci_perf_ctr(box, hwc->idx);
		return true;
	}

	box_ctl = intel_generic_uncore_box_ctl(box);
	if (!box_ctl)
		return false;

	if (box->pci_dev) {
		box_ctl = UNCORE_DISCOVERY_PCI_BOX_CTRL(box_ctl);
		hwc->config_base = box_ctl + uncore_pci_event_ctl(box, hwc->idx);
		hwc->event_base  = box_ctl + uncore_pci_perf_ctr(box, hwc->idx);
		return true;
	}

	hwc->config_base = box_ctl + box->pmu->type->event_ctl + hwc->idx;
	hwc->event_base  = box_ctl + box->pmu->type->perf_ctr + hwc->idx;

	return true;
}

static inline int intel_pci_uncore_box_ctl(struct intel_uncore_box *box)
{
	return UNCORE_DISCOVERY_PCI_BOX_CTRL(intel_generic_uncore_box_ctl(box));
}

void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
{
	struct pci_dev *pdev = box->pci_dev;
	int box_ctl = uncore_pci_box_ctl(box);
	int box_ctl = intel_pci_uncore_box_ctl(box);

	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
@@ -410,7 +520,7 @@ void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
{
	struct pci_dev *pdev = box->pci_dev;
	int box_ctl = uncore_pci_box_ctl(box);
	int box_ctl = intel_pci_uncore_box_ctl(box);

	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
}
@@ -418,7 +528,7 @@ void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
{
	struct pci_dev *pdev = box->pci_dev;
	int box_ctl = uncore_pci_box_ctl(box);
	int box_ctl = intel_pci_uncore_box_ctl(box);

	pci_write_config_dword(pdev, box_ctl, 0);
}
@@ -465,34 +575,30 @@ static struct intel_uncore_ops generic_uncore_pci_ops = {

#define UNCORE_GENERIC_MMIO_SIZE		0x4000

static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
{
	struct intel_uncore_type *type = box->pmu->type;

	if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets)
		return 0;

	return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
}

void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
{
	unsigned int box_ctl = generic_uncore_mmio_box_ctl(box);
	static struct intel_uncore_discovery_unit *unit;
	struct intel_uncore_type *type = box->pmu->type;
	resource_size_t addr;

	if (!box_ctl) {
	unit = intel_uncore_find_discovery_unit(type->boxes, box->dieid, box->pmu->pmu_idx);
	if (!unit) {
		pr_warn("Uncore type %d id %d: Cannot find box control address.\n",
			type->type_id, box->pmu->pmu_idx);
		return;
	}

	if (!unit->addr) {
		pr_warn("Uncore type %d box %d: Invalid box control address.\n",
			type->type_id, type->box_ids[box->pmu->pmu_idx]);
			type->type_id, unit->id);
		return;
	}

	addr = box_ctl;
	addr = unit->addr;
	box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE);
	if (!box->io_addr) {
		pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
			type->type_id, type->box_ids[box->pmu->pmu_idx],
			(unsigned long long)addr);
			type->type_id, unit->id, (unsigned long long)addr);
		return;
	}

@@ -552,34 +658,22 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id,
				      struct intel_uncore_discovery_type *type)
{
	uncore->type_id = type->type;
	uncore->num_boxes = type->num_boxes;
	uncore->num_counters = type->num_counters;
	uncore->perf_ctr_bits = type->counter_width;
	uncore->box_ids = type->ids;
	uncore->perf_ctr = (unsigned int)type->ctr_offset;
	uncore->event_ctl = (unsigned int)type->ctl_offset;
	uncore->boxes = &type->units;
	uncore->num_boxes = type->num_units;

	switch (type_id) {
	case UNCORE_ACCESS_MSR:
		uncore->ops = &generic_uncore_msr_ops;
		uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset;
		uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset;
		uncore->box_ctl = (unsigned int)type->box_ctrl;
		uncore->msr_offsets = type->box_offset;
		break;
	case UNCORE_ACCESS_PCI:
		uncore->ops = &generic_uncore_pci_ops;
		uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset;
		uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset;
		uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl);
		uncore->box_ctls = type->box_ctrl_die;
		uncore->pci_offsets = type->box_offset;
		break;
	case UNCORE_ACCESS_MMIO:
		uncore->ops = &generic_uncore_mmio_ops;
		uncore->perf_ctr = (unsigned int)type->ctr_offset;
		uncore->event_ctl = (unsigned int)type->ctl_offset;
		uncore->box_ctl = (unsigned int)type->box_ctrl;
		uncore->box_ctls = type->box_ctrl_die;
		uncore->mmio_offsets = type->box_offset;
		uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
		break;
	default:
+17 −5
Original line number Diff line number Diff line
@@ -115,19 +115,24 @@ struct uncore_unit_discovery {
	};
};

struct intel_uncore_discovery_unit {
	struct rb_node	node;
	unsigned int	pmu_idx;	/* The idx of the corresponding PMU */
	unsigned int	id;		/* Unit ID */
	unsigned int	die;		/* Die ID */
	u64		addr;		/* Unit Control Address */
};

struct intel_uncore_discovery_type {
	struct rb_node	node;
	enum uncore_access_type	access_type;
	u64		box_ctrl;	/* Unit ctrl addr of the first box */
	u64		*box_ctrl_die;	/* Unit ctrl addr of the first box of each die */
	struct rb_root	units;		/* Unit ctrl addr for all units */
	u16		type;		/* Type ID of the uncore block */
	u8		num_counters;
	u8		counter_width;
	u8		ctl_offset;	/* Counter Control 0 offset */
	u8		ctr_offset;	/* Counter 0 offset */
	u16		num_boxes;	/* number of boxes for the uncore block */
	unsigned int	*ids;		/* Box IDs */
	u64		*box_offset;	/* Box offset */
	u16		num_units;	/* number of units */
};

bool intel_uncore_has_discovery_tables(int *ignore);
@@ -158,3 +163,10 @@ u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,

struct intel_uncore_type **
intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra);

int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die,
					unsigned int pmu_idx);
bool intel_generic_uncore_assign_hw_event(struct perf_event *event,
					  struct intel_uncore_box *box);
void uncore_find_add_unit(struct intel_uncore_discovery_unit *node,
			  struct rb_root *root, u16 *num_units);
+123 −39

File changed.

Preview size limit exceeded, changes collapsed.