Unverified Commit 5e5ca013 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15488 CVE-2025-21816

Merge Pull Request from: @ci-robot 
 
PR sync from: Xiongfeng Wang <wangxiongfeng2@huawei.com>
https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/XTA3TWOUFIRPBHNA3TGD3XGJKWXND7GN/ 
Frederic Weisbecker (1):
  hrtimers: Force migrate away hrtimers queued after
    CPUHP_AP_HRTIMERS_DYING

Xiongfeng Wang (1):
  hrtimer: Fix kabi broken of struct hrtimer_cpu_base

 
https://gitee.com/src-openeuler/kernel/issues/IBPLI0 
 
Link:https://gitee.com/openeuler/kernel/pulls/15488

 

Reviewed-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents c52c83a1 d8140a0e
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -247,6 +247,7 @@ struct hrtimer_cpu_base {

	KABI_RESERVE(1)
	KABI_RESERVE(2)
	KABI_EXTEND(call_single_data_t csd)
} ____cacheline_aligned;

static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
+82 −21
Original line number Diff line number Diff line
@@ -58,6 +58,8 @@
#define HRTIMER_ACTIVE_SOFT	(HRTIMER_ACTIVE_HARD << MASK_SHIFT)
#define HRTIMER_ACTIVE_ALL	(HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)

static void retrigger_next_event(void *arg);

/*
 * The timer bases:
 *
@@ -111,7 +113,8 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
			.clockid = CLOCK_TAI,
			.get_time = &ktime_get_clocktai,
		},
	}
	},
	.csd = CSD_INIT(retrigger_next_event, NULL)
};

static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
@@ -124,6 +127,14 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
	[CLOCK_TAI]		= HRTIMER_BASE_TAI,
};

static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)
{
	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
		return true;
	else
		return likely(base->online);
}

/*
 * Functions and macros which are different for UP/SMP systems are kept in a
 * single place
@@ -183,27 +194,54 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
}

/*
 * We do not migrate the timer when it is expiring before the next
 * event on the target cpu. When high resolution is enabled, we cannot
 * reprogram the target cpu hardware and we would cause it to fire
 * late. To keep it simple, we handle the high resolution enabled and
 * disabled case similar.
 * Check if the elected target is suitable considering its next
 * event and the hotplug state of the current CPU.
 *
 * If the elected target is remote and its next event is after the timer
 * to queue, then a remote reprogram is necessary. However there is no
 * guarantee the IPI handling the operation would arrive in time to meet
 * the high resolution deadline. In this case the local CPU becomes a
 * preferred target, unless it is offline.
 *
 * High and low resolution modes are handled the same way for simplicity.
 *
 * Called with cpu_base->lock of target cpu held.
 */
static int
hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base,
				    struct hrtimer_cpu_base *new_cpu_base,
				    struct hrtimer_cpu_base *this_cpu_base)
{
	ktime_t expires;

	/*
	 * The local CPU clockevent can be reprogrammed. Also get_target_base()
	 * guarantees it is online.
	 */
	if (new_cpu_base == this_cpu_base)
		return true;

	/*
	 * The offline local CPU can't be the default target if the
	 * next remote target event is after this timer. Keep the
	 * elected new base. An IPI will we issued to reprogram
	 * it as a last resort.
	 */
	if (!hrtimer_base_is_online(this_cpu_base))
		return true;

	expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
	return expires < new_base->cpu_base->expires_next;

	return expires >= new_base->cpu_base->expires_next;
}

static inline
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
					 int pinned)
static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned)
{
	if (!hrtimer_base_is_online(base)) {
		int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER));

		return &per_cpu(hrtimer_bases, cpu);
	}

#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
	if (static_branch_likely(&timers_migration_enabled) && !pinned)
		return &per_cpu(hrtimer_bases, get_nohz_timer_target());
@@ -254,8 +292,8 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
		raw_spin_unlock(&base->cpu_base->lock);
		raw_spin_lock(&new_base->cpu_base->lock);

		if (new_cpu_base != this_cpu_base &&
		    hrtimer_check_target(timer, new_base)) {
		if (!hrtimer_suitable_target(timer, new_base, new_cpu_base,
					     this_cpu_base)) {
			raw_spin_unlock(&new_base->cpu_base->lock);
			raw_spin_lock(&base->cpu_base->lock);
			new_cpu_base = this_cpu_base;
@@ -264,8 +302,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
		}
		WRITE_ONCE(timer->base, new_base);
	} else {
		if (new_cpu_base != this_cpu_base &&
		    hrtimer_check_target(timer, new_base)) {
		if (!hrtimer_suitable_target(timer, new_base,  new_cpu_base, this_cpu_base)) {
			new_cpu_base = this_cpu_base;
			goto again;
		}
@@ -730,8 +767,6 @@ static inline int hrtimer_is_hres_enabled(void)
	return hrtimer_hres_enabled;
}

static void retrigger_next_event(void *arg);

/*
 * Switch to high resolution mode
 */
@@ -1218,6 +1253,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
				    u64 delta_ns, const enum hrtimer_mode mode,
				    struct hrtimer_clock_base *base)
{
	struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases);
	struct hrtimer_clock_base *new_base;
	bool force_local, first;

@@ -1229,9 +1265,15 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
	 * and enforce reprogramming after it is queued no matter whether
	 * it is the new first expiring timer again or not.
	 */
	force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
	force_local = base->cpu_base == this_cpu_base;
	force_local &= base->cpu_base->next_timer == timer;

	/*
	 * Don't force local queuing if this enqueue happens on a unplugged
	 * CPU after hrtimer_cpu_dying() has been invoked.
	 */
	force_local &= this_cpu_base->online;

	/*
	 * Remove an active timer from the queue. In case it is not queued
	 * on the current CPU, make sure that remove_hrtimer() updates the
@@ -1261,9 +1303,28 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
	}

	first = enqueue_hrtimer(timer, new_base, mode);
	if (!force_local)
	if (!force_local) {
		/*
		 * If the current CPU base is online, then the timer is
		 * never queued on a remote CPU if it would be the first
		 * expiring timer there.
		 */
		if (hrtimer_base_is_online(this_cpu_base))
			return first;

		/*
		 * Timer was enqueued remote because the current base is
		 * already offline. If the timer is the first to expire,
		 * kick the remote CPU to reprogram the clock event.
		 */
		if (first) {
			struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base;

			smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd);
		}
		return 0;
	}

	/*
	 * Timer was forced to stay on the current CPU to avoid
	 * reprogramming on removal and enqueue. Force reprogram the