Commit 488e13a4 authored by Like Xu's avatar Like Xu Committed by Peter Zijlstra
Browse files

perf/x86/lbr: Remove cpuc->lbr_xsave allocation from atomic context



If the kernel is compiled with the CONFIG_LOCKDEP option, the conditional
might_sleep_if() deep in kmem_cache_alloc() will generate the following
trace, and potentially cause a deadlock when another LBR event is added:

  [] BUG: sleeping function called from invalid context at include/linux/sched/mm.h:196
  [] Call Trace:
  []  kmem_cache_alloc+0x36/0x250
  []  intel_pmu_lbr_add+0x152/0x170
  []  x86_pmu_add+0x83/0xd0

Make it symmetric with the release_lbr_buffers() call and mirror the
existing DS buffers.

Fixes: c085fb87 ("perf/x86/intel/lbr: Support XSAVES for arch LBR read")
Signed-off-by: default avatarLike Xu <like.xu@linux.intel.com>
[peterz: simplified]
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarKan Liang <kan.liang@linux.intel.com>
Link: https://lkml.kernel.org/r/20210430052247.3079672-2-like.xu@linux.intel.com
parent 3317c26a
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -396,10 +396,12 @@ int x86_reserve_hardware(void)
	if (!atomic_inc_not_zero(&pmc_refcount)) {
		mutex_lock(&pmc_reserve_mutex);
		if (atomic_read(&pmc_refcount) == 0) {
			if (!reserve_pmc_hardware())
			if (!reserve_pmc_hardware()) {
				err = -EBUSY;
			else
			} else {
				reserve_ds_buffers();
				reserve_lbr_buffers();
			}
		}
		if (!err)
			atomic_inc(&pmc_refcount);
+20 −6
Original line number Diff line number Diff line
@@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)

void intel_pmu_lbr_add(struct perf_event *event)
{
	struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

	if (!x86_pmu.lbr_nr)
@@ -696,11 +695,6 @@ void intel_pmu_lbr_add(struct perf_event *event)
	perf_sched_cb_inc(event->ctx->pmu);
	if (!cpuc->lbr_users++ && !event->total_time_running)
		intel_pmu_lbr_reset();

	if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
	    kmem_cache && !cpuc->lbr_xsave &&
	    (cpuc->lbr_users != cpuc->lbr_pebs_users))
		cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
}

void release_lbr_buffers(void)
@@ -722,6 +716,26 @@ void release_lbr_buffers(void)
	}
}

void reserve_lbr_buffers(void)
{
	struct kmem_cache *kmem_cache;
	struct cpu_hw_events *cpuc;
	int cpu;

	if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
		return;

	for_each_possible_cpu(cpu) {
		cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
		kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
		if (!kmem_cache || cpuc->lbr_xsave)
			continue;

		cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
							cpu_to_node(cpu));
	}
}

void intel_pmu_lbr_del(struct perf_event *event)
{
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+6 −0
Original line number Diff line number Diff line
@@ -1244,6 +1244,8 @@ void reserve_ds_buffers(void);

void release_lbr_buffers(void);

void reserve_lbr_buffers(void);

extern struct event_constraint bts_constraint;
extern struct event_constraint vlbr_constraint;

@@ -1393,6 +1395,10 @@ static inline void release_lbr_buffers(void)
{
}

static inline void reserve_lbr_buffers(void)
{
}

static inline int intel_pmu_init(void)
{
	return 0;