Commit b6c33652 authored by Alexey Gladkov's avatar Alexey Gladkov Committed by Eric W. Biederman
Browse files

Use atomic_t for ucounts reference counting



The current implementation of the ucounts reference counter requires the
use of spin_lock. We're going to use get_ucounts() in more performance
critical areas like a handling of RLIMIT_SIGPENDING.

Now we need to use spin_lock only if we want to change the hashtable.

v10:
* Always try to put ucounts in case we cannot increase ucounts->count.
  This will allow to cover the case when all consumers will return
  ucounts at once.

v9:
* Use a negative value to check that the ucounts->count is close to
  overflow.

Signed-off-by: default avatarAlexey Gladkov <legion@kernel.org>
Link: https://lkml.kernel.org/r/94d1dbecab060a6b116b0a2d1accd8ca1bbb4f5f.1619094428.git.legion@kernel.org


Signed-off-by: default avatarEric W. Biederman <ebiederm@xmission.com>
parent 905ae01c
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ struct ucounts {
	struct hlist_node node;
	struct user_namespace *ns;
	kuid_t uid;
	int count;
	atomic_t count;
	atomic_long_t ucount[UCOUNT_COUNTS];
};

@@ -107,7 +107,7 @@ void retire_userns_sysctls(struct user_namespace *ns);
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
struct ucounts *get_ucounts(struct ucounts *ucounts);
struct ucounts * __must_check get_ucounts(struct ucounts *ucounts);
void put_ucounts(struct ucounts *ucounts);

#ifdef CONFIG_USER_NS
+19 −34
Original line number Diff line number Diff line
@@ -11,7 +11,7 @@
struct ucounts init_ucounts = {
	.ns    = &init_user_ns,
	.uid   = GLOBAL_ROOT_UID,
	.count = 1,
	.count = ATOMIC_INIT(1),
};

#define UCOUNTS_HASHTABLE_BITS 10
@@ -139,6 +139,15 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
	spin_unlock_irq(&ucounts_lock);
}

struct ucounts *get_ucounts(struct ucounts *ucounts)
{
	if (ucounts && atomic_add_negative(1, &ucounts->count)) {
		put_ucounts(ucounts);
		ucounts = NULL;
	}
	return ucounts;
}

struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
	struct hlist_head *hashent = ucounts_hashentry(ns, uid);
@@ -155,7 +164,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)

		new->ns = ns;
		new->uid = uid;
		new->count = 0;
		atomic_set(&new->count, 1);

		spin_lock_irq(&ucounts_lock);
		ucounts = find_ucounts(ns, uid, hashent);
@@ -163,33 +172,12 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
			kfree(new);
		} else {
			hlist_add_head(&new->node, hashent);
			ucounts = new;
		}
	}
	if (ucounts->count == INT_MAX)
		ucounts = NULL;
	else
		ucounts->count += 1;
			spin_unlock_irq(&ucounts_lock);
	return ucounts;
			return new;
		}

struct ucounts *get_ucounts(struct ucounts *ucounts)
{
	unsigned long flags;

	if (!ucounts)
		return NULL;

	spin_lock_irqsave(&ucounts_lock, flags);
	if (ucounts->count == INT_MAX) {
		WARN_ONCE(1, "ucounts: counter has reached its maximum value");
		ucounts = NULL;
	} else {
		ucounts->count += 1;
	}
	spin_unlock_irqrestore(&ucounts_lock, flags);

	spin_unlock_irq(&ucounts_lock);
	ucounts = get_ucounts(ucounts);
	return ucounts;
}

@@ -197,16 +185,13 @@ void put_ucounts(struct ucounts *ucounts)
{
	unsigned long flags;

	if (atomic_dec_and_test(&ucounts->count)) {
		spin_lock_irqsave(&ucounts_lock, flags);
	ucounts->count -= 1;
	if (!ucounts->count)
		hlist_del_init(&ucounts->node);
	else
		ucounts = NULL;
		spin_unlock_irqrestore(&ucounts_lock, flags);

		kfree(ucounts);
	}
}

static inline bool atomic_long_inc_below(atomic_long_t *v, int u)
{