Commit c93dc84c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf/x86: Add a microcode revision check for SNB-PEBS



Recent Intel microcode resolved the SNB-PEBS issues, so conditionally
enable PEBS on SNB hardware depending on the microcode revision.

Thanks to Stephane for figuring out the various microcode revisions.

Suggested-by: default avatarStephane Eranian <eranian@google.com>
Acked-by: default avatarBorislav Petkov <borislav.petkov@amd.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-v3672ziwh9damwqwh1uz3krm@git.kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f285f92f
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -232,6 +232,7 @@ struct perf_guest_switch_msr {


extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
extern void perf_check_microcode(void);
#else
#else
static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
{
@@ -245,6 +246,7 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
}
}


static inline void perf_events_lapic_init(void)	{ }
static inline void perf_events_lapic_init(void)	{ }
static inline void perf_check_microcode(void) { }
#endif
#endif


#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
+14 −7
Original line number Original line Diff line number Diff line
@@ -379,7 +379,7 @@ int x86_pmu_hw_config(struct perf_event *event)
		int precise = 0;
		int precise = 0;


		/* Support for constant skid */
		/* Support for constant skid */
		if (x86_pmu.pebs_active) {
		if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
			precise++;
			precise++;


			/* Support for IP fixup */
			/* Support for IP fixup */
@@ -1650,6 +1650,13 @@ static void x86_pmu_flush_branch_stack(void)
		x86_pmu.flush_branch_stack();
		x86_pmu.flush_branch_stack();
}
}


void perf_check_microcode(void)
{
	if (x86_pmu.check_microcode)
		x86_pmu.check_microcode();
}
EXPORT_SYMBOL_GPL(perf_check_microcode);

static struct pmu pmu = {
static struct pmu pmu = {
	.pmu_enable		= x86_pmu_enable,
	.pmu_enable		= x86_pmu_enable,
	.pmu_disable		= x86_pmu_disable,
	.pmu_disable		= x86_pmu_disable,
+3 −1
Original line number Original line Diff line number Diff line
@@ -361,6 +361,8 @@ struct x86_pmu {
	void		(*cpu_starting)(int cpu);
	void		(*cpu_starting)(int cpu);
	void		(*cpu_dying)(int cpu);
	void		(*cpu_dying)(int cpu);
	void		(*cpu_dead)(int cpu);
	void		(*cpu_dead)(int cpu);

	void		(*check_microcode)(void);
	void		(*flush_branch_stack)(void);
	void		(*flush_branch_stack)(void);


	/*
	/*
@@ -373,7 +375,7 @@ struct x86_pmu {
	 * Intel DebugStore bits
	 * Intel DebugStore bits
	 */
	 */
	int		bts, pebs;
	int		bts, pebs;
	int		bts_active, pebs_active;
	int		bts_active, pebs_active, pebs_broken;
	int		pebs_record_size;
	int		pebs_record_size;
	void		(*drain_pebs)(struct pt_regs *regs);
	void		(*drain_pebs)(struct pt_regs *regs);
	struct event_constraint *pebs_constraints;
	struct event_constraint *pebs_constraints;
+48 −3
Original line number Original line Diff line number Diff line
@@ -1712,11 +1712,56 @@ static __init void intel_clovertown_quirk(void)
	x86_pmu.pebs_constraints = NULL;
	x86_pmu.pebs_constraints = NULL;
}
}


static int intel_snb_pebs_broken(int cpu)
{
	u32 rev = UINT_MAX; /* default to broken for unknown models */

	switch (cpu_data(cpu).x86_model) {
	case 42: /* SNB */
		rev = 0x28;
		break;

	case 45: /* SNB-EP */
		switch (cpu_data(cpu).x86_mask) {
		case 6: rev = 0x618; break;
		case 7: rev = 0x70c; break;
		}
	}

	return (cpu_data(cpu).microcode < rev);
}

static void intel_snb_check_microcode(void)
{
	int pebs_broken = 0;
	int cpu;

	get_online_cpus();
	for_each_online_cpu(cpu) {
		if ((pebs_broken = intel_snb_pebs_broken(cpu)))
			break;
	}
	put_online_cpus();

	if (pebs_broken == x86_pmu.pebs_broken)
		return;

	/*
	 * Serialized by the microcode lock..
	 */
	if (x86_pmu.pebs_broken) {
		pr_info("PEBS enabled due to microcode update\n");
		x86_pmu.pebs_broken = 0;
	} else {
		pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
		x86_pmu.pebs_broken = 1;
	}
}

static __init void intel_sandybridge_quirk(void)
static __init void intel_sandybridge_quirk(void)
{
{
	printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
	x86_pmu.check_microcode = intel_snb_check_microcode;
	x86_pmu.pebs = 0;
	intel_snb_check_microcode();
	x86_pmu.pebs_constraints = NULL;
}
}


static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
+7 −3
Original line number Original line Diff line number Diff line
@@ -87,6 +87,7 @@
#include <asm/microcode.h>
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
#include <asm/cpu_device_id.h>
#include <asm/perf_event.h>


MODULE_DESCRIPTION("Microcode Update Driver");
MODULE_DESCRIPTION("Microcode Update Driver");
MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
@@ -277,7 +278,6 @@ static int reload_for_cpu(int cpu)
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
	int err = 0;
	int err = 0;


	mutex_lock(&microcode_mutex);
	if (uci->valid) {
	if (uci->valid) {
		enum ucode_state ustate;
		enum ucode_state ustate;


@@ -288,7 +288,6 @@ static int reload_for_cpu(int cpu)
			if (ustate == UCODE_ERROR)
			if (ustate == UCODE_ERROR)
				err = -EINVAL;
				err = -EINVAL;
	}
	}
	mutex_unlock(&microcode_mutex);


	return err;
	return err;
}
}
@@ -309,6 +308,7 @@ static ssize_t reload_store(struct device *dev,
		return size;
		return size;


	get_online_cpus();
	get_online_cpus();
	mutex_lock(&microcode_mutex);
	for_each_online_cpu(cpu) {
	for_each_online_cpu(cpu) {
		tmp_ret = reload_for_cpu(cpu);
		tmp_ret = reload_for_cpu(cpu);
		if (tmp_ret != 0)
		if (tmp_ret != 0)
@@ -318,6 +318,9 @@ static ssize_t reload_store(struct device *dev,
		if (!ret)
		if (!ret)
			ret = tmp_ret;
			ret = tmp_ret;
	}
	}
	if (!ret)
		perf_check_microcode();
	mutex_unlock(&microcode_mutex);
	put_online_cpus();
	put_online_cpus();


	if (!ret)
	if (!ret)
@@ -557,7 +560,8 @@ static int __init microcode_init(void)
	mutex_lock(&microcode_mutex);
	mutex_lock(&microcode_mutex);


	error = subsys_interface_register(&mc_cpu_interface);
	error = subsys_interface_register(&mc_cpu_interface);

	if (!error)
		perf_check_microcode();
	mutex_unlock(&microcode_mutex);
	mutex_unlock(&microcode_mutex);
	put_online_cpus();
	put_online_cpus();