Commit 8deff3a6 authored by Chen Jun's avatar Chen Jun Committed by Zhang Zekun
Browse files

mm/sharepool: Add mg_sp_alloc_nodemask



hulk inclusion
category: feature
bugzilla: N/A

--------------------------------

Support alloc memory from nodes.

mg_sp_alloc allow to alloc memory from one node.
If the node have no enough memory, the caller would
pick a next node. But that has a lot of overhead.

To improve performance, we support a new interface to
alloc memory from nodes.

Signed-off-by: default avatarChen Jun <chenjun102@huawei.com>
parent 598a1d66
Loading
Loading
Loading
Loading
+15 −0
Original line number Diff line number Diff line
@@ -629,6 +629,9 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,

const struct hstate *hugetlb_get_hstate(void);
struct page *hugetlb_alloc_hugepage(int nid, int flag);
struct page *hugetlb_alloc_hugepage_vma(struct vm_area_struct *vma,
		unsigned long address, int flag);

int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr,
				pgprot_t prot, struct page *hpage);
int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm,
@@ -645,6 +648,12 @@ static inline struct page *hugetlb_alloc_hugepage(int nid, int flag)
	return  NULL;
}

static inline struct page *hugetlb_alloc_hugepage_vma(struct vm_area_struct *vma,
		unsigned long address, int flag)
{
	return NULL;
}

static inline int hugetlb_insert_hugepage_pte(struct mm_struct *mm,
		unsigned long addr, pgprot_t prot, struct page *hpage)
{
@@ -1091,6 +1100,12 @@ static inline struct page *hugetlb_alloc_hugepage(int nid, int flag)
	return  NULL;
}

static inline struct page *hugetlb_alloc_hugepage_vma(struct vm_area_struct *vma,
		unsigned long address, int flag)
{
	return  NULL;
}

static inline int hugetlb_insert_hugepage_pte(struct mm_struct *mm,
		unsigned long addr, pgprot_t prot, struct page *hpage)
{
+4 −6
Original line number Diff line number Diff line
@@ -12,6 +12,8 @@
#include <linux/jump_label.h>
#include <linux/kabi.h>

#include <linux/share_pool_interface.h>

#define SP_HUGEPAGE		(1 << 0)
#define SP_HUGEPAGE_ONLY	(1 << 1)
#define SP_DVPP			(1 << 2)
@@ -256,6 +258,8 @@ extern int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns,
			struct pid *pid, struct task_struct *task);

extern void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id);
extern void *mg_sp_alloc_nodemask(unsigned long size, unsigned long sp_flags, int spg_id,
		nodemask_t nodemask);
extern int mg_sp_free(unsigned long addr, int id);

extern void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size,
@@ -286,7 +290,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
			     unsigned long address, pte_t *ptep, unsigned int flags);
extern bool sp_check_addr(unsigned long addr);
extern bool sp_check_mmap_addr(unsigned long addr, unsigned long flags);
extern int sp_node_id(struct vm_area_struct *vma);

static inline bool sp_is_enabled(void)
{
@@ -452,11 +455,6 @@ static inline bool is_vmalloc_sharepool(unsigned long vm_flags)
	return NULL;
}

static inline int sp_node_id(struct vm_area_struct *vma)
{
	return numa_node_id();
}

static inline bool sp_check_addr(unsigned long addr)
{
	return false;
+19 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_SHARE_POOL_INTERFACE_H
#define LINUX_SHARE_POOL_INTERFACE_H

#include <linux/mman.h>
#include <linux/mm_types.h>
#include <linux/numa.h>
#include <linux/kabi.h>

#ifdef CONFIG_ASCEND_SHARE_POOL
extern int sp_node_id(struct vm_area_struct *vma);
#else
static inline int sp_node_id(struct vm_area_struct *vma)
{
	return numa_node_id();
}
#endif /* !CONFIG_ASCEND_SHARE_POOL */

#endif /* LINUX_SHARE_POOL_INTERFACE_H */
+26 −4
Original line number Diff line number Diff line
@@ -6312,7 +6312,7 @@ static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h,
/*
 * Allocate hugepage without reserve
 */
struct page *hugetlb_alloc_hugepage(int nid, int flag)
struct page *hugetlb_alloc_hugepage_nodemask(int nid, int flag, nodemask_t *nodemask)
{
	struct hstate *h = &default_hstate;
	gfp_t gfp_mask = htlb_alloc_mask(h);
@@ -6327,7 +6327,6 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag)
	if (flag & ~HUGETLB_ALLOC_MASK)
		return NULL;

	gfp_mask |= __GFP_THISNODE;
	if (enable_charge_mighp)
		gfp_mask |= __GFP_ACCOUNT;

@@ -6337,12 +6336,22 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag)
	if (flag & HUGETLB_ALLOC_NORMAL)
		page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid);
	else if (flag & HUGETLB_ALLOC_BUDDY)
		page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
		page = alloc_migrate_huge_page(h, gfp_mask, nid, nodemask);
	else
		page = alloc_huge_page_nodemask(h, nid, NULL, gfp_mask);
		page = alloc_huge_page_nodemask(h, nid, nodemask, gfp_mask);

	return page;
}

struct page *hugetlb_alloc_hugepage(int nid, int flag)
{
	nodemask_t nodemask;

	nodes_clear(nodemask);
	node_set(nid, nodemask);

	return hugetlb_alloc_hugepage_nodemask(nid, flag, &nodemask);
}
EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);

static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
@@ -6364,6 +6373,19 @@ static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
	return ptep;
}

struct page *hugetlb_alloc_hugepage_vma(struct vm_area_struct *vma, unsigned long address, int flag)
{
	int nid;
	struct hstate *h = hstate_vma(vma);
	struct mempolicy *mpol;
	nodemask_t *nodemask;
	gfp_t gfp_mask;

	gfp_mask = htlb_alloc_mask(h);
	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
	return hugetlb_alloc_hugepage_nodemask(nid, flag, nodemask);
}

static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr,
				     pgprot_t prot, unsigned long pfn)
{
+3 −1
Original line number Diff line number Diff line
@@ -103,6 +103,8 @@
#include <linux/printk.h>
#include <linux/swapops.h>

#include <linux/share_pool_interface.h>

#include <asm/tlbflush.h>
#include <linux/uaccess.h>

@@ -2198,7 +2200,7 @@ int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
		nid = interleave_nid(*mpol, vma, addr,
					huge_page_shift(hstate_vma(vma)));
	} else {
		nid = policy_node(gfp_flags, *mpol, numa_node_id());
		nid = policy_node(gfp_flags, *mpol, sp_node_id(vma));
		if ((*mpol)->mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
			*nodemask = &(*mpol)->v.nodes;
	}
Loading