Loading kernel/hrtimer.c +19 −1 Original line number Diff line number Diff line Loading @@ -1063,7 +1063,9 @@ void hrtimer_interrupt(struct clock_event_device *dev) basenow = ktime_add(now, base->offset); while ((node = base->first)) { enum hrtimer_restart (*fn)(struct hrtimer *); struct hrtimer *timer; int restart; timer = rb_entry(node, struct hrtimer, node); Loading Loading @@ -1091,13 +1093,29 @@ void hrtimer_interrupt(struct clock_event_device *dev) HRTIMER_STATE_CALLBACK, 0); timer_stats_account_hrtimer(timer); fn = timer->function; if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { /* * Used for scheduler timers, avoid lock * inversion with rq->lock and tasklist_lock. * * These timers are required to deal with * enqueue expiry themselves and are not * allowed to migrate. */ spin_unlock(&cpu_base->lock); restart = fn(timer); spin_lock(&cpu_base->lock); } else restart = fn(timer); /* * Note: We clear the CALLBACK bit after * enqueue_hrtimer to avoid reprogramming of * the event hardware. This happens at the end * of this function anyway. */ if (timer->function(timer) != HRTIMER_NORESTART) { if (restart != HRTIMER_NORESTART) { BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); enqueue_hrtimer(timer, base, 0); } Loading kernel/time/tick-sched.c +0 −8 Original line number Diff line number Diff line Loading @@ -514,7 +514,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) { struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); struct hrtimer_cpu_base *base = timer->base->cpu_base; struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); int cpu = smp_processor_id(); Loading Loading @@ -552,15 +551,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) touch_softlockup_watchdog(); ts->idle_jiffies++; } /* * update_process_times() might take tasklist_lock, hence * drop the base lock. sched-tick hrtimers are per-CPU and * never accessible by userspace APIs, so this is safe to do. */ spin_unlock(&base->lock); update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); spin_lock(&base->lock); } /* Do not restart, when we are in the idle loop */ Loading Loading
kernel/hrtimer.c +19 −1 Original line number Diff line number Diff line Loading @@ -1063,7 +1063,9 @@ void hrtimer_interrupt(struct clock_event_device *dev) basenow = ktime_add(now, base->offset); while ((node = base->first)) { enum hrtimer_restart (*fn)(struct hrtimer *); struct hrtimer *timer; int restart; timer = rb_entry(node, struct hrtimer, node); Loading Loading @@ -1091,13 +1093,29 @@ void hrtimer_interrupt(struct clock_event_device *dev) HRTIMER_STATE_CALLBACK, 0); timer_stats_account_hrtimer(timer); fn = timer->function; if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { /* * Used for scheduler timers, avoid lock * inversion with rq->lock and tasklist_lock. * * These timers are required to deal with * enqueue expiry themselves and are not * allowed to migrate. */ spin_unlock(&cpu_base->lock); restart = fn(timer); spin_lock(&cpu_base->lock); } else restart = fn(timer); /* * Note: We clear the CALLBACK bit after * enqueue_hrtimer to avoid reprogramming of * the event hardware. This happens at the end * of this function anyway. */ if (timer->function(timer) != HRTIMER_NORESTART) { if (restart != HRTIMER_NORESTART) { BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); enqueue_hrtimer(timer, base, 0); } Loading
kernel/time/tick-sched.c +0 −8 Original line number Diff line number Diff line Loading @@ -514,7 +514,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) { struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); struct hrtimer_cpu_base *base = timer->base->cpu_base; struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); int cpu = smp_processor_id(); Loading Loading @@ -552,15 +551,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) touch_softlockup_watchdog(); ts->idle_jiffies++; } /* * update_process_times() might take tasklist_lock, hence * drop the base lock. sched-tick hrtimers are per-CPU and * never accessible by userspace APIs, so this is safe to do. */ spin_unlock(&base->lock); update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); spin_lock(&base->lock); } /* Do not restart, when we are in the idle loop */ Loading