Commit a0827713 authored by Chengming Zhou's avatar Chengming Zhou Committed by Peter Zijlstra
Browse files

perf/core: Don't pass task around when ctx sched in



The current code pass task around for ctx_sched_in(), only
to get perf_cgroup of the task, then update the timestamp
of it and its ancestors and set them to active.

But we can use cpuctx->cgrp to get active perf_cgroup and
its ancestors since cpuctx->cgrp has been set before
ctx_sched_in().

This patch remove the task argument in ctx_sched_in()
and cleanup related code.

Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220329154523.86438-2-zhouchengming@bytedance.com
parent e590928d
Loading
Loading
Loading
Loading
+26 −32
Original line number Diff line number Diff line
@@ -574,8 +574,7 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
			      enum event_type_t event_type);

static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
			     enum event_type_t event_type,
			     struct task_struct *task);
			     enum event_type_t event_type);

static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
@@ -801,10 +800,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
}

static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
{
	struct perf_cgroup *cgrp;
	struct perf_event_context *ctx = &cpuctx->ctx;
	struct perf_cgroup *cgrp = cpuctx->cgrp;
	struct perf_cgroup_info *info;
	struct cgroup_subsys_state *css;

@@ -813,10 +812,10 @@ perf_cgroup_set_timestamp(struct task_struct *task,
	 * ensure we do not access cgroup data
	 * unless we have the cgroup pinned (css_get)
	 */
	if (!task || !ctx->nr_cgroups)
	if (!cgrp)
		return;

	cgrp = perf_cgroup_from_task(task, ctx);
	WARN_ON_ONCE(!ctx->nr_cgroups);

	for (css = &cgrp->css; css; css = css->parent) {
		cgrp = container_of(css, struct perf_cgroup, css);
@@ -869,14 +868,14 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
			WARN_ON_ONCE(cpuctx->cgrp);
			/*
			 * set cgrp before ctxsw in to allow
			 * event_filter_match() to not have to pass
			 * task around
			 * perf_cgroup_set_timestamp() in ctx_sched_in()
			 * to not have to pass task around
			 * we pass the cpuctx->ctx to perf_cgroup_from_task()
			 * because cgorup events are only per-cpu
			 */
			cpuctx->cgrp = perf_cgroup_from_task(task,
							     &cpuctx->ctx);
			cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
			cpu_ctx_sched_in(cpuctx, EVENT_ALL);
		}
		perf_pmu_enable(cpuctx->ctx.pmu);
		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -1118,8 +1117,7 @@ static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
}

static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
{
}

@@ -2713,8 +2711,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
	     enum event_type_t event_type,
	     struct task_struct *task);
	     enum event_type_t event_type);

static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
			       struct perf_event_context *ctx,
@@ -2730,15 +2727,14 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
}

static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
				struct perf_event_context *ctx,
				struct task_struct *task)
				struct perf_event_context *ctx)
{
	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
	cpu_ctx_sched_in(cpuctx, EVENT_PINNED);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
		ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
}

/*
@@ -2788,7 +2784,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
	else if (ctx_event_type & EVENT_PINNED)
		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);

	perf_event_sched_in(cpuctx, task_ctx, current);
	perf_event_sched_in(cpuctx, task_ctx);
	perf_pmu_enable(cpuctx->ctx.pmu);
}

@@ -3011,7 +3007,7 @@ static void __perf_event_enable(struct perf_event *event,
		return;

	if (!event_filter_match(event)) {
		ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
		ctx_sched_in(ctx, cpuctx, EVENT_TIME);
		return;
	}

@@ -3020,7 +3016,7 @@ static void __perf_event_enable(struct perf_event *event,
	 * then don't put it on unless the group is on.
	 */
	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
		ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
		ctx_sched_in(ctx, cpuctx, EVENT_TIME);
		return;
	}

@@ -3865,8 +3861,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
	     enum event_type_t event_type,
	     struct task_struct *task)
	     enum event_type_t event_type)
{
	int is_active = ctx->is_active;

@@ -3878,7 +3873,7 @@ ctx_sched_in(struct perf_event_context *ctx,
	if (is_active ^ EVENT_TIME) {
		/* start ctx time */
		__update_context_time(ctx, false);
		perf_cgroup_set_timestamp(task, ctx);
		perf_cgroup_set_timestamp(cpuctx);
		/*
		 * CPU-release for the below ->is_active store,
		 * see __load_acquire() in perf_event_time_now()
@@ -3909,12 +3904,11 @@ ctx_sched_in(struct perf_event_context *ctx,
}

static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
			     enum event_type_t event_type,
			     struct task_struct *task)
			     enum event_type_t event_type)
{
	struct perf_event_context *ctx = &cpuctx->ctx;

	ctx_sched_in(ctx, cpuctx, event_type, task);
	ctx_sched_in(ctx, cpuctx, event_type);
}

static void perf_event_context_sched_in(struct perf_event_context *ctx,
@@ -3956,7 +3950,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
	 */
	if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
	perf_event_sched_in(cpuctx, ctx, task);
	perf_event_sched_in(cpuctx, ctx);

	if (cpuctx->sched_cb_usage && pmu->sched_task)
		pmu->sched_task(cpuctx->task_ctx, true);
@@ -4267,7 +4261,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
	if (cpu_event)
		rotate_ctx(&cpuctx->ctx, cpu_event);

	perf_event_sched_in(cpuctx, task_ctx, current);
	perf_event_sched_in(cpuctx, task_ctx);

	perf_pmu_enable(cpuctx->ctx.pmu);
	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -4339,7 +4333,7 @@ static void perf_event_enable_on_exec(int ctxn)
		clone_ctx = unclone_ctx(ctx);
		ctx_resched(cpuctx, ctx, event_type);
	} else {
		ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
		ctx_sched_in(ctx, cpuctx, EVENT_TIME);
	}
	perf_ctx_unlock(cpuctx, ctx);