Commit e5d96a31 authored by Liu Shixin's avatar Liu Shixin
Browse files

mm/dynamic_hugetlb: replace spin_lock with mutex_lock and fix kabi broken

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I6MH03


CVE: NA

--------------------------------

When memory is fragmented, update_reserve_pages() may call migrate_pages()
to collect continuous memory. This function can sleep, so we should use
mutex lock instead of spin lock. Use KABI_EXTEND to fix kabi broken.

Fixes: 0c06a1c0 ("mm/dynamic_hugetlb: add interface to configure the count of hugepages")
Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
parent 2430060b
Loading
Loading
Loading
Loading
+13 −1
Original line number Diff line number Diff line
@@ -66,7 +66,7 @@ enum huge_pages_pool_type {
struct dhugetlb_pool {
	int nid;
	spinlock_t lock;
	spinlock_t reserved_lock;
	KABI_DEPRECATE(spinlock_t, reserved_lock)
	atomic_t refcnt;
	unsigned long normal_pages_disabled;

@@ -74,6 +74,18 @@ struct dhugetlb_pool {

	unsigned long total_huge_pages;
	struct huge_pages_pool hpages_pool[HUGE_PAGES_POOL_MAX];

	/* The dhugetlb_pool structures is only used by core kernel, it is
	 * also accessed only the memory cgroup and hugetlb core code and
	 * so changes made to dhugetlb_pool structure should not affect
	 * third-party kernel modules.
	 */
	KABI_EXTEND(struct mutex reserved_lock)

	/*
	 * The percpu_pool[] should only be used by dynamic hugetlb core.
	 * External kernel modules should not used it.
	 */
	struct percpu_pages_pool percpu_pool[0];
};

+3 −3
Original line number Diff line number Diff line
@@ -887,7 +887,7 @@ static int hugetlb_pool_create(struct mem_cgroup *memcg, unsigned long nid)
		return -ENOMEM;

	spin_lock_init(&hpool->lock);
	spin_lock_init(&hpool->reserved_lock);
	mutex_init(&hpool->reserved_lock);
	hpool->nid = nid;
	atomic_set(&hpool->refcnt, 1);

@@ -1000,7 +1000,7 @@ static ssize_t update_reserved_pages(struct mem_cgroup *memcg, char *buf, int hp
	if (!get_hpool_unless_zero(hpool))
		return -EINVAL;

	spin_lock(&hpool->reserved_lock);
	mutex_lock(&hpool->reserved_lock);
	spin_lock(&hpool->lock);
	hpages_pool = &hpool->hpages_pool[hpages_pool_idx];
	if (nr_pages > hpages_pool->nr_huge_pages) {
@@ -1036,7 +1036,7 @@ static ssize_t update_reserved_pages(struct mem_cgroup *memcg, char *buf, int hp
		hpages_pool->free_normal_pages += delta;
	}
	spin_unlock(&hpool->lock);
	spin_unlock(&hpool->reserved_lock);
	mutex_unlock(&hpool->reserved_lock);
	put_hpool(hpool);
	return 0;
}