Commit fa28abed authored by Ingo Molnar's avatar Ingo Molnar
Browse files

sched/headers: sched/clock: Mark all functions 'notrace', remove CC_FLAGS_FTRACE build asymmetry



Mark all non-init functions in kernel/sched.c as 'notrace', instead of
turning them all off via CC_FLAGS_FTRACE.

This is going to allow the treatment of this file as any other scheduler
file, and it can be #include-ed in compound compilation units as well.

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Reviewed-by: default avatarPeter Zijlstra <peterz@infradead.org>
parent d90a2f16
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_clock.o = $(CC_FLAGS_FTRACE)
endif

# The compilers are complaining about unused variables inside an if(0) scope
# block. This is daft, shut them up.
+21 −21
Original line number Diff line number Diff line
@@ -61,7 +61,7 @@
 * This is default implementation.
 * Architectures and sub-architectures can override this.
 */
unsigned long long __weak sched_clock(void)
notrace unsigned long long __weak sched_clock(void)
{
	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
					* (NSEC_PER_SEC / HZ);
@@ -95,28 +95,28 @@ struct sched_clock_data {

static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);

static inline struct sched_clock_data *this_scd(void)
notrace static inline struct sched_clock_data *this_scd(void)
{
	return this_cpu_ptr(&sched_clock_data);
}

static inline struct sched_clock_data *cpu_sdc(int cpu)
notrace static inline struct sched_clock_data *cpu_sdc(int cpu)
{
	return &per_cpu(sched_clock_data, cpu);
}

int sched_clock_stable(void)
notrace int sched_clock_stable(void)
{
	return static_branch_likely(&__sched_clock_stable);
}

static void __scd_stamp(struct sched_clock_data *scd)
notrace static void __scd_stamp(struct sched_clock_data *scd)
{
	scd->tick_gtod = ktime_get_ns();
	scd->tick_raw = sched_clock();
}

static void __set_sched_clock_stable(void)
notrace static void __set_sched_clock_stable(void)
{
	struct sched_clock_data *scd;

@@ -151,7 +151,7 @@ static void __set_sched_clock_stable(void)
 * The only way to fully avoid random clock jumps is to boot with:
 * "tsc=unstable".
 */
static void __sched_clock_work(struct work_struct *work)
notrace static void __sched_clock_work(struct work_struct *work)
{
	struct sched_clock_data *scd;
	int cpu;
@@ -177,7 +177,7 @@ static void __sched_clock_work(struct work_struct *work)

static DECLARE_WORK(sched_clock_work, __sched_clock_work);

static void __clear_sched_clock_stable(void)
notrace static void __clear_sched_clock_stable(void)
{
	if (!sched_clock_stable())
		return;
@@ -186,7 +186,7 @@ static void __clear_sched_clock_stable(void)
	schedule_work(&sched_clock_work);
}

void clear_sched_clock_stable(void)
notrace void clear_sched_clock_stable(void)
{
	__sched_clock_stable_early = 0;

@@ -196,7 +196,7 @@ void clear_sched_clock_stable(void)
		__clear_sched_clock_stable();
}

static void __sched_clock_gtod_offset(void)
notrace static void __sched_clock_gtod_offset(void)
{
	struct sched_clock_data *scd = this_scd();

@@ -246,12 +246,12 @@ late_initcall(sched_clock_init_late);
 * min, max except they take wrapping into account
 */

static inline u64 wrap_min(u64 x, u64 y)
notrace static inline u64 wrap_min(u64 x, u64 y)
{
	return (s64)(x - y) < 0 ? x : y;
}

static inline u64 wrap_max(u64 x, u64 y)
notrace static inline u64 wrap_max(u64 x, u64 y)
{
	return (s64)(x - y) > 0 ? x : y;
}
@@ -262,7 +262,7 @@ static inline u64 wrap_max(u64 x, u64 y)
 *  - filter out backward motion
 *  - use the GTOD tick value to create a window to filter crazy TSC values
 */
static u64 sched_clock_local(struct sched_clock_data *scd)
notrace static u64 sched_clock_local(struct sched_clock_data *scd)
{
	u64 now, clock, old_clock, min_clock, max_clock, gtod;
	s64 delta;
@@ -295,7 +295,7 @@ static u64 sched_clock_local(struct sched_clock_data *scd)
	return clock;
}

static u64 sched_clock_remote(struct sched_clock_data *scd)
notrace static u64 sched_clock_remote(struct sched_clock_data *scd)
{
	struct sched_clock_data *my_scd = this_scd();
	u64 this_clock, remote_clock;
@@ -362,7 +362,7 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
 *
 * See cpu_clock().
 */
u64 sched_clock_cpu(int cpu)
notrace u64 sched_clock_cpu(int cpu)
{
	struct sched_clock_data *scd;
	u64 clock;
@@ -386,7 +386,7 @@ u64 sched_clock_cpu(int cpu)
}
EXPORT_SYMBOL_GPL(sched_clock_cpu);

void sched_clock_tick(void)
notrace void sched_clock_tick(void)
{
	struct sched_clock_data *scd;

@@ -403,7 +403,7 @@ void sched_clock_tick(void)
	sched_clock_local(scd);
}

void sched_clock_tick_stable(void)
notrace void sched_clock_tick_stable(void)
{
	if (!sched_clock_stable())
		return;
@@ -423,7 +423,7 @@ void sched_clock_tick_stable(void)
/*
 * We are going deep-idle (irqs are disabled):
 */
void sched_clock_idle_sleep_event(void)
notrace void sched_clock_idle_sleep_event(void)
{
	sched_clock_cpu(smp_processor_id());
}
@@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
/*
 * We just idled; resync with ktime.
 */
void sched_clock_idle_wakeup_event(void)
notrace void sched_clock_idle_wakeup_event(void)
{
	unsigned long flags;

@@ -458,7 +458,7 @@ void __init sched_clock_init(void)
	local_irq_enable();
}

u64 sched_clock_cpu(int cpu)
notrace u64 sched_clock_cpu(int cpu)
{
	if (!static_branch_likely(&sched_clock_running))
		return 0;
@@ -476,7 +476,7 @@ u64 sched_clock_cpu(int cpu)
 * On bare metal this function should return the same as local_clock.
 * Architectures and sub-architectures can override this.
 */
u64 __weak running_clock(void)
notrace u64 __weak running_clock(void)
{
	return local_clock();
}