Commit 369461ce authored by Rob Herring's avatar Rob Herring Committed by Will Deacon
Browse files

x86: perf: Move RDPMC event flag to a common definition



In preparation to enable user counter access on arm64 and to move some
of the user access handling to perf core, create a common event flag for
user counter access and convert x86 to use it.

Since the architecture specific flags start at the LSB, starting at the
MSB for common flags.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: x86@kernel.org
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: linux-perf-users@vger.kernel.org
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarRob Herring <robh@kernel.org>
Link: https://lore.kernel.org/r/20211208201124.310740-2-robh@kernel.org


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent d58071a8
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -2476,7 +2476,7 @@ static int x86_pmu_event_init(struct perf_event *event)

	if (READ_ONCE(x86_pmu.attr_rdpmc) &&
	    !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
		event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
		event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;

	return err;
}
@@ -2510,7 +2510,7 @@ void perf_clear_dirty_counters(void)

static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
	if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
	if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
		return;

	/*
@@ -2531,7 +2531,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)

static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
	if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
	if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
		return;

	if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
@@ -2542,7 +2542,7 @@ static int x86_pmu_event_idx(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;

	if (!(hwc->flags & PERF_X86_EVENT_RDPMC_ALLOWED))
	if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
		return 0;

	if (is_metric_idx(hwc->idx))
@@ -2725,7 +2725,7 @@ void arch_perf_update_userpage(struct perf_event *event,
	userpg->cap_user_time = 0;
	userpg->cap_user_time_zero = 0;
	userpg->cap_user_rdpmc =
		!!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
		!!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
	userpg->pmc_width = x86_pmu.cntval_bits;

	if (!using_native_sched_clock() || !sched_clock_stable())
+1 −1
Original line number Diff line number Diff line
@@ -74,7 +74,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
#define PERF_X86_EVENT_PEBS_NA_HSW	0x0010 /* haswell style datala, unknown */
#define PERF_X86_EVENT_EXCL		0x0020 /* HT exclusivity on counter */
#define PERF_X86_EVENT_DYNAMIC		0x0040 /* dynamic alloc'd constraint */
#define PERF_X86_EVENT_RDPMC_ALLOWED	0x0080 /* grant rdpmc permission */

#define PERF_X86_EVENT_EXCL_ACCT	0x0100 /* accounted EXCL event */
#define PERF_X86_EVENT_AUTO_RELOAD	0x0200 /* use PEBS auto-reload */
#define PERF_X86_EVENT_LARGE_PEBS	0x0400 /* use large PEBS */
+9 −0
Original line number Diff line number Diff line
@@ -129,6 +129,15 @@ struct hw_perf_event_extra {
	int		idx;	/* index in shared_regs->regs[] */
};

/**
 * hw_perf_event::flag values
 *
 * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
 * usage.
 */
#define PERF_EVENT_FLAG_ARCH			0x0000ffff
#define PERF_EVENT_FLAG_USER_READ_CNT		0x80000000

/**
 * struct hw_perf_event - performance event hardware details:
 */