Loading kernel/events/core.c +21 −37 Original line number Diff line number Diff line Loading @@ -1986,6 +1986,12 @@ static int perf_get_aux_event(struct perf_event *event, return 1; } static inline struct list_head *get_event_list(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; } static void perf_group_detach(struct perf_event *event) { struct perf_event *sibling, *tmp; Loading Loading @@ -2028,12 +2034,8 @@ static void perf_group_detach(struct perf_event *event) if (!RB_EMPTY_NODE(&event->group_node)) { add_event_to_groups(sibling, event->ctx); if (sibling->state == PERF_EVENT_STATE_ACTIVE) { struct list_head *list = sibling->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; list_add_tail(&sibling->active_list, list); } if (sibling->state == PERF_EVENT_STATE_ACTIVE) list_add_tail(&sibling->active_list, get_event_list(sibling)); } WARN_ON_ONCE(sibling->ctx != event->ctx); Loading Loading @@ -2350,6 +2352,8 @@ event_sched_in(struct perf_event *event, { int ret = 0; WARN_ON_ONCE(event->ctx != ctx); lockdep_assert_held(&ctx->lock); if (event->state <= PERF_EVENT_STATE_OFF) Loading Loading @@ -3425,10 +3429,12 @@ struct sched_in_data { int can_add_hw; }; static int pinned_sched_in(struct perf_event *event, void *data) static int merge_sched_in(struct perf_event *event, void *data) { struct sched_in_data *sid = data; WARN_ON_ONCE(event->ctx != sid->ctx); if (event->state <= PERF_EVENT_STATE_OFF) return 0; Loading @@ -3437,37 +3443,15 @@ static int pinned_sched_in(struct perf_event *event, void *data) if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { if (!group_sched_in(event, sid->cpuctx, sid->ctx)) list_add_tail(&event->active_list, &sid->ctx->pinned_active); list_add_tail(&event->active_list, get_event_list(event)); } /* * If this pinned group hasn't been scheduled, * put it in error state. */ if (event->state == PERF_EVENT_STATE_INACTIVE) if (event->state == PERF_EVENT_STATE_INACTIVE) { if (event->attr.pinned) perf_event_set_state(event, PERF_EVENT_STATE_ERROR); return 0; } static int flexible_sched_in(struct perf_event *event, void *data) { struct sched_in_data *sid = data; if (event->state <= PERF_EVENT_STATE_OFF) return 0; if (!event_filter_match(event)) return 0; if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { int ret = group_sched_in(event, sid->cpuctx, sid->ctx); if (ret) { sid->can_add_hw = 0; sid->ctx->rotate_necessary = 1; return 0; } list_add_tail(&event->active_list, &sid->ctx->flexible_active); } return 0; Loading @@ -3485,7 +3469,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx, visit_groups_merge(&ctx->pinned_groups, smp_processor_id(), pinned_sched_in, &sid); merge_sched_in, &sid); } static void Loading @@ -3500,7 +3484,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, visit_groups_merge(&ctx->flexible_groups, smp_processor_id(), flexible_sched_in, &sid); merge_sched_in, &sid); } static void Loading Loading
kernel/events/core.c +21 −37 Original line number Diff line number Diff line Loading @@ -1986,6 +1986,12 @@ static int perf_get_aux_event(struct perf_event *event, return 1; } static inline struct list_head *get_event_list(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; } static void perf_group_detach(struct perf_event *event) { struct perf_event *sibling, *tmp; Loading Loading @@ -2028,12 +2034,8 @@ static void perf_group_detach(struct perf_event *event) if (!RB_EMPTY_NODE(&event->group_node)) { add_event_to_groups(sibling, event->ctx); if (sibling->state == PERF_EVENT_STATE_ACTIVE) { struct list_head *list = sibling->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; list_add_tail(&sibling->active_list, list); } if (sibling->state == PERF_EVENT_STATE_ACTIVE) list_add_tail(&sibling->active_list, get_event_list(sibling)); } WARN_ON_ONCE(sibling->ctx != event->ctx); Loading Loading @@ -2350,6 +2352,8 @@ event_sched_in(struct perf_event *event, { int ret = 0; WARN_ON_ONCE(event->ctx != ctx); lockdep_assert_held(&ctx->lock); if (event->state <= PERF_EVENT_STATE_OFF) Loading Loading @@ -3425,10 +3429,12 @@ struct sched_in_data { int can_add_hw; }; static int pinned_sched_in(struct perf_event *event, void *data) static int merge_sched_in(struct perf_event *event, void *data) { struct sched_in_data *sid = data; WARN_ON_ONCE(event->ctx != sid->ctx); if (event->state <= PERF_EVENT_STATE_OFF) return 0; Loading @@ -3437,37 +3443,15 @@ static int pinned_sched_in(struct perf_event *event, void *data) if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { if (!group_sched_in(event, sid->cpuctx, sid->ctx)) list_add_tail(&event->active_list, &sid->ctx->pinned_active); list_add_tail(&event->active_list, get_event_list(event)); } /* * If this pinned group hasn't been scheduled, * put it in error state. */ if (event->state == PERF_EVENT_STATE_INACTIVE) if (event->state == PERF_EVENT_STATE_INACTIVE) { if (event->attr.pinned) perf_event_set_state(event, PERF_EVENT_STATE_ERROR); return 0; } static int flexible_sched_in(struct perf_event *event, void *data) { struct sched_in_data *sid = data; if (event->state <= PERF_EVENT_STATE_OFF) return 0; if (!event_filter_match(event)) return 0; if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { int ret = group_sched_in(event, sid->cpuctx, sid->ctx); if (ret) { sid->can_add_hw = 0; sid->ctx->rotate_necessary = 1; return 0; } list_add_tail(&event->active_list, &sid->ctx->flexible_active); } return 0; Loading @@ -3485,7 +3469,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx, visit_groups_merge(&ctx->pinned_groups, smp_processor_id(), pinned_sched_in, &sid); merge_sched_in, &sid); } static void Loading @@ -3500,7 +3484,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, visit_groups_merge(&ctx->flexible_groups, smp_processor_id(), flexible_sched_in, &sid); merge_sched_in, &sid); } static void Loading