Commit eb8c5072 authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Jakub Kicinski
Browse files

jump_label: Prevent key->enabled int overflow



1. With CONFIG_JUMP_LABEL=n static_key_slow_inc() doesn't have any
   protection against key->enabled refcounter overflow.
2. With CONFIG_JUMP_LABEL=y static_key_slow_inc_cpuslocked()
   still may turn the refcounter negative as (v + 1) may overflow.

key->enabled is indeed a ref-counter as it's documented in multiple
places: top comment in jump_label.h, Documentation/staging/static-keys.rst,
etc.

As -1 is reserved for static key that's in process of being enabled,
functions would break with negative key->enabled refcount:
- for CONFIG_JUMP_LABEL=n negative return of static_key_count()
  breaks static_key_false(), static_key_true()
- the ref counter may become 0 from negative side by too many
  static_key_slow_inc() calls and lead to use-after-free issues.

These flaws result in that some users have to introduce an additional
mutex and prevent the reference counter from overflowing themselves,
see bpf_enable_runtime_stats() checking the counter against INT_MAX / 2.

Prevent the reference counter overflow by checking if (v + 1) > 0.
Change functions API to return whether the increment was successful.

Signed-off-by: default avatarDmitry Safonov <dima@arista.com>
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 19833ae2
Loading
Loading
Loading
Loading
+17 −4
Original line number Diff line number Diff line
@@ -224,9 +224,10 @@ extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
					    enum jump_label_type type);
extern void arch_jump_label_transform_apply(void);
extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern bool static_key_slow_inc(struct static_key *key);
extern bool static_key_fast_inc_not_disabled(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void static_key_slow_inc_cpuslocked(struct static_key *key);
extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
@@ -278,11 +279,23 @@ static __always_inline bool static_key_true(struct static_key *key)
	return false;
}

static inline void static_key_slow_inc(struct static_key *key)
static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
{
	int v;

	STATIC_KEY_CHECK_USE(key);
	atomic_inc(&key->enabled);
	/*
	 * Prevent key->enabled getting negative to follow the same semantics
	 * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
	 */
	v = atomic_read(&key->enabled);
	do {
		if (v < 0 || (v + 1) < 0)
			return false;
	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
	return true;
}
#define static_key_slow_inc(key)	static_key_fast_inc_not_disabled(key)

static inline void static_key_slow_dec(struct static_key *key)
{
+44 −12
Original line number Diff line number Diff line
@@ -113,9 +113,40 @@ int static_key_count(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_count);

void static_key_slow_inc_cpuslocked(struct static_key *key)
/*
 * static_key_fast_inc_not_disabled - adds a user for a static key
 * @key: static key that must be already enabled
 *
 * The caller must make sure that the static key can't get disabled while
 * in this function. It doesn't patch jump labels, only adds a user to
 * an already enabled static key.
 *
 * Returns true if the increment was done. Unlike refcount_t the ref counter
 * is not saturated, but will fail to increment on overflow.
 */
bool static_key_fast_inc_not_disabled(struct static_key *key)
{
	int v;

	STATIC_KEY_CHECK_USE(key);
	/*
	 * Negative key->enabled has a special meaning: it sends
	 * static_key_slow_inc() down the slow path, and it is non-zero
	 * so it counts as "enabled" in jump_label_update().  Note that
	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
	 */
	v = atomic_read(&key->enabled);
	do {
		if (v <= 0 || (v + 1) < 0)
			return false;
	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));

	return true;
}
EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);

bool static_key_slow_inc_cpuslocked(struct static_key *key)
{
	lockdep_assert_cpus_held();

	/*
@@ -124,15 +155,9 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
	 * jump_label_update() process.  At the same time, however,
	 * the jump_label_update() call below wants to see
	 * static_key_enabled(&key) for jumps to be updated properly.
	 *
	 * So give a special meaning to negative key->enabled: it sends
	 * static_key_slow_inc() down the slow path, and it is non-zero
	 * so it counts as "enabled" in jump_label_update().  Note that
	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
	 */
	for (int v = atomic_read(&key->enabled); v > 0; )
		if (likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)))
			return;
	if (static_key_fast_inc_not_disabled(key))
		return true;

	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
@@ -144,16 +169,23 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
		 */
		atomic_set_release(&key->enabled, 1);
	} else {
		atomic_inc(&key->enabled);
		if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) {
			jump_label_unlock();
			return false;
		}
	}
	jump_label_unlock();
	return true;
}

void static_key_slow_inc(struct static_key *key)
bool static_key_slow_inc(struct static_key *key)
{
	bool ret;

	cpus_read_lock();
	static_key_slow_inc_cpuslocked(key);
	ret = static_key_slow_inc_cpuslocked(key);
	cpus_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);