Commit fb7d4948 authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

sched/clock: Provide local_clock_noinstr()



Now that all ARCH_WANTS_NO_INSTR architectures (arm64, loongarch,
s390, x86) provide sched_clock_noinstr(), use this to provide
local_clock_noinstr().

This local_clock_noinstr() will be safe to use from noinstr code with
the assumption that any such noinstr code is non-preemptible (it had
better be, entry code will have IRQs disabled while __cpuidle must
have preemption disabled).

Specifically, preempt_enable_notrace(), a common part of many a
sched_clock() implementation calls out to schedule() -- even though,
per the above, it will never trigger -- which frustrates noinstr
validation.

  vmlinux.o: warning: objtool: local_clock+0xb5: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Michael Kelley <mikelley@microsoft.com>  # Hyper-V
Link: https://lore.kernel.org/r/20230519102715.978624636@infradead.org
parent 5c5e9a2b
Loading
Loading
Loading
Loading
+16 −1
Original line number Original line Diff line number Diff line
@@ -12,7 +12,16 @@
 *
 *
 * Please use one of the three interfaces below.
 * Please use one of the three interfaces below.
 */
 */
extern unsigned long long notrace sched_clock(void);
extern u64 sched_clock(void);

#if defined(CONFIG_ARCH_WANTS_NO_INSTR) || defined(CONFIG_GENERIC_SCHED_CLOCK)
extern u64 sched_clock_noinstr(void);
#else
static __always_inline u64 sched_clock_noinstr(void)
{
	return sched_clock();
}
#endif


/*
/*
 * See the comment in kernel/sched/clock.c
 * See the comment in kernel/sched/clock.c
@@ -45,6 +54,11 @@ static inline u64 cpu_clock(int cpu)
	return sched_clock();
	return sched_clock();
}
}


static __always_inline u64 local_clock_noinstr(void)
{
	return sched_clock_noinstr();
}

static __always_inline u64 local_clock(void)
static __always_inline u64 local_clock(void)
{
{
	return sched_clock();
	return sched_clock();
@@ -79,6 +93,7 @@ static inline u64 cpu_clock(int cpu)
	return sched_clock_cpu(cpu);
	return sched_clock_cpu(cpu);
}
}


extern u64 local_clock_noinstr(void);
extern u64 local_clock(void);
extern u64 local_clock(void);


#endif
#endif
+13 −6
Original line number Original line Diff line number Diff line
@@ -266,7 +266,7 @@ static __always_inline u64 sched_clock_local(struct sched_clock_data *scd)
	s64 delta;
	s64 delta;


again:
again:
	now = sched_clock();
	now = sched_clock_noinstr();
	delta = now - scd->tick_raw;
	delta = now - scd->tick_raw;
	if (unlikely(delta < 0))
	if (unlikely(delta < 0))
		delta = 0;
		delta = 0;
@@ -293,22 +293,29 @@ static __always_inline u64 sched_clock_local(struct sched_clock_data *scd)
	return clock;
	return clock;
}
}


noinstr u64 local_clock(void)
noinstr u64 local_clock_noinstr(void)
{
{
	u64 clock;
	u64 clock;


	if (static_branch_likely(&__sched_clock_stable))
	if (static_branch_likely(&__sched_clock_stable))
		return sched_clock() + __sched_clock_offset;
		return sched_clock_noinstr() + __sched_clock_offset;


	if (!static_branch_likely(&sched_clock_running))
	if (!static_branch_likely(&sched_clock_running))
		return sched_clock();
		return sched_clock_noinstr();


	preempt_disable_notrace();
	clock = sched_clock_local(this_scd());
	clock = sched_clock_local(this_scd());
	preempt_enable_notrace();


	return clock;
	return clock;
}
}

u64 local_clock(void)
{
	u64 now;
	preempt_disable_notrace();
	now = local_clock_noinstr();
	preempt_enable_notrace();
	return now;
}
EXPORT_SYMBOL_GPL(local_clock);
EXPORT_SYMBOL_GPL(local_clock);


static notrace u64 sched_clock_remote(struct sched_clock_data *scd)
static notrace u64 sched_clock_remote(struct sched_clock_data *scd)