Unverified Commit a7c59ba6 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15510 v3 CVE-2025-21816

Merge Pull Request from: @ci-robot 
 
PR sync from: Xiongfeng Wang <wangxiongfeng2@huawei.com>
https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/CE3YGRWZPLGH3ODTRGYRTHLW25AMWQ66/ 
Frederic Weisbecker (2):
  hrtimer: Report offline hrtimer enqueue
  hrtimers: Force migrate away hrtimers queued after
    CPUHP_AP_HRTIMERS_DYING

Xiongfeng Wang (1):
  hrtimer: Fix kabi broken of struct hrtimer_cpu_base

 
https://gitee.com/src-openeuler/kernel/issues/IBPLI0 
 
Link:https://gitee.com/openeuler/kernel/pulls/15510

 

Reviewed-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
Reviewed-by: default avatarLi Nan <linan122@huawei.com>
Signed-off-by: default avatarLi Nan <linan122@huawei.com>
parents 585988de bca02300
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -211,6 +211,7 @@ enum hrtimer_base_type {
 * @max_hang_time:	Maximum time spent in hrtimer_interrupt
 * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
 *			 expired
 * @online:		CPU is online from an hrtimers point of view
 * @timer_waiters:	A hrtimer_cancel() invocation waits for the timer
 *			callback to finish.
 * @expires_next:	absolute time of the next event, is required for remote
@@ -234,6 +235,7 @@ struct hrtimer_cpu_base {
					in_hrtirq		: 1,
					hang_detected		: 1,
					softirq_activated       : 1;
	KABI_FILL_HOLE(unsigned int	online:1)
#ifdef CONFIG_HIGH_RES_TIMERS
	unsigned int			nr_events;
	unsigned short			nr_retries;
@@ -252,6 +254,7 @@ struct hrtimer_cpu_base {

	KABI_RESERVE(1)
	KABI_RESERVE(2)
	KABI_EXTEND(call_single_data_t csd)
} ____cacheline_aligned;

static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
+86 −19
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@
#include <linux/sched/deadline.h>
#include <linux/sched/nohz.h>
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>
#include <linux/timer.h>
#include <linux/freezer.h>
#include <linux/compat.h>
@@ -57,6 +58,8 @@
#define HRTIMER_ACTIVE_SOFT	(HRTIMER_ACTIVE_HARD << MASK_SHIFT)
#define HRTIMER_ACTIVE_ALL	(HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)

static void retrigger_next_event(void *arg);

/*
 * The timer bases:
 *
@@ -110,7 +113,8 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
			.clockid = CLOCK_TAI,
			.get_time = &ktime_get_clocktai,
		},
	}
	},
	.csd = CSD_INIT(retrigger_next_event, NULL)
};

static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
@@ -123,6 +127,14 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
	[CLOCK_TAI]		= HRTIMER_BASE_TAI,
};

static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)
{
	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
		return true;
	else
		return likely(base->online);
}

/*
 * Functions and macros which are different for UP/SMP systems are kept in a
 * single place
@@ -181,27 +193,54 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
}

/*
 * We do not migrate the timer when it is expiring before the next
 * event on the target cpu. When high resolution is enabled, we cannot
 * reprogram the target cpu hardware and we would cause it to fire
 * late. To keep it simple, we handle the high resolution enabled and
 * disabled case similar.
 * Check if the elected target is suitable considering its next
 * event and the hotplug state of the current CPU.
 *
 * If the elected target is remote and its next event is after the timer
 * to queue, then a remote reprogram is necessary. However there is no
 * guarantee the IPI handling the operation would arrive in time to meet
 * the high resolution deadline. In this case the local CPU becomes a
 * preferred target, unless it is offline.
 *
 * High and low resolution modes are handled the same way for simplicity.
 *
 * Called with cpu_base->lock of target cpu held.
 */
static int
hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base,
				    struct hrtimer_cpu_base *new_cpu_base,
				    struct hrtimer_cpu_base *this_cpu_base)
{
	ktime_t expires;

	/*
	 * The local CPU clockevent can be reprogrammed. Also get_target_base()
	 * guarantees it is online.
	 */
	if (new_cpu_base == this_cpu_base)
		return true;

	/*
	 * The offline local CPU can't be the default target if the
	 * next remote target event is after this timer. Keep the
	 * elected new base. An IPI will we issued to reprogram
	 * it as a last resort.
	 */
	if (!hrtimer_base_is_online(this_cpu_base))
		return true;

	expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
	return expires < new_base->cpu_base->expires_next;

	return expires >= new_base->cpu_base->expires_next;
}

static inline
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
					 int pinned)
static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned)
{
	if (!hrtimer_base_is_online(base)) {
		int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_FLAG_TIMER));

		return &per_cpu(hrtimer_bases, cpu);
	}

#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
	if (static_branch_likely(&timers_migration_enabled) && !pinned)
		return &per_cpu(hrtimer_bases, get_nohz_timer_target());
@@ -252,8 +291,8 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
		raw_spin_unlock(&base->cpu_base->lock);
		raw_spin_lock(&new_base->cpu_base->lock);

		if (new_cpu_base != this_cpu_base &&
		    hrtimer_check_target(timer, new_base)) {
		if (!hrtimer_suitable_target(timer, new_base, new_cpu_base,
					     this_cpu_base)) {
			raw_spin_unlock(&new_base->cpu_base->lock);
			raw_spin_lock(&base->cpu_base->lock);
			new_cpu_base = this_cpu_base;
@@ -262,8 +301,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
		}
		WRITE_ONCE(timer->base, new_base);
	} else {
		if (new_cpu_base != this_cpu_base &&
		    hrtimer_check_target(timer, new_base)) {
		if (!hrtimer_suitable_target(timer, new_base,  new_cpu_base, this_cpu_base)) {
			new_cpu_base = this_cpu_base;
			goto again;
		}
@@ -980,6 +1018,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
			   enum hrtimer_mode mode)
{
	debug_activate(timer, mode);
	WARN_ON_ONCE(!base->cpu_base->online);

	base->cpu_base->active_bases |= 1 << base->index;

@@ -1111,6 +1150,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
				    u64 delta_ns, const enum hrtimer_mode mode,
				    struct hrtimer_clock_base *base)
{
	struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases);
	struct hrtimer_clock_base *new_base;
	bool force_local, first;

@@ -1122,9 +1162,15 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
	 * and enforce reprogramming after it is queued no matter whether
	 * it is the new first expiring timer again or not.
	 */
	force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
	force_local = base->cpu_base == this_cpu_base;
	force_local &= base->cpu_base->next_timer == timer;

	/*
	 * Don't force local queuing if this enqueue happens on a unplugged
	 * CPU after hrtimer_cpu_dying() has been invoked.
	 */
	force_local &= this_cpu_base->online;

	/*
	 * Remove an active timer from the queue. In case it is not queued
	 * on the current CPU, make sure that remove_hrtimer() updates the
@@ -1154,9 +1200,28 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
	}

	first = enqueue_hrtimer(timer, new_base, mode);
	if (!force_local)
	if (!force_local) {
		/*
		 * If the current CPU base is online, then the timer is
		 * never queued on a remote CPU if it would be the first
		 * expiring timer there.
		 */
		if (hrtimer_base_is_online(this_cpu_base))
			return first;

		/*
		 * Timer was enqueued remote because the current base is
		 * already offline. If the timer is the first to expire,
		 * kick the remote CPU to reprogram the clock event.
		 */
		if (first) {
			struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base;

			smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd);
		}
		return 0;
	}

	/*
	 * Timer was forced to stay on the current CPU to avoid
	 * reprogramming on removal and enqueue. Force reprogram the
@@ -2078,6 +2143,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
	cpu_base->softirq_next_timer = NULL;
	cpu_base->expires_next = KTIME_MAX;
	cpu_base->softirq_expires_next = KTIME_MAX;
	cpu_base->online = 1;
	hrtimer_cpu_base_init_expiry_lock(cpu_base);
	return 0;
}
@@ -2145,6 +2211,7 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
	smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);

	raw_spin_unlock(&new_base->lock);
	old_base->online = 0;
	raw_spin_unlock(&old_base->lock);

	return 0;