Unverified Commit 567ef11f authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3461 Revert "mm/mempolicy: don't handle MPOL_LOCA

Merge Pull Request from: @ci-robot 
 
PR sync from: Wupeng Ma <mawupeng1@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/GFPHJBL2H4UE4JSHE2G6DGCQ6FYBSGQS/ 
From: Ma Wupeng <mawupeng1@huawei.com>

Revert to bring MPOL_LOCAL back.

changelog since v2:
 - introduce mpol_rebind_preferred

changelog since v1:
 - bugfix in __mpol_equal

Ma Wupeng (1):
  Revert "mm/mempolicy: don't handle MPOL_LOCAL like a fake
    MPOL_PREFERRED policy"


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I8PGB8 
 
Link:https://gitee.com/openeuler/kernel/pulls/3461

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents e2b3e0a4 4a7002a0
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -59,6 +59,7 @@ enum {
 * are never OR'ed into the mode in mempolicy API arguments.
 */
#define MPOL_F_SHARED  (1 << 0)	/* identify shared policies */
#define MPOL_F_LOCAL   (1 << 1)	/* preferred local allocation */
#define MPOL_F_MOF	(1 << 3) /* this policy wants migrate on fault */
#define MPOL_F_MORON	(1 << 4) /* Migrate On protnone Reference On Node */

+86 −56
Original line number Diff line number Diff line
@@ -128,7 +128,8 @@ enum zone_type policy_zone = 0;
static struct mempolicy_wrapper default_policy = {
	.policy = {
		.refcnt = ATOMIC_INIT(1), /* never free it */
		.mode = MPOL_LOCAL,
		.mode = MPOL_PREFERRED,
		.flags = MPOL_F_LOCAL,
	}
};

@@ -243,8 +244,11 @@ static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)

static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
{
	if (nodes_empty(*nodes))
		return -EINVAL;
	if (!nodes)
		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
	else if (nodes_empty(*nodes))
		return -EINVAL;			/*  no allowed nodes */
	else
		pol->v.preferred_node = first_node(*nodes);
	return 0;
}
@@ -268,7 +272,8 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
/*
 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 * any, for the new policy.  mpol_new() has already validated the nodes
 * parameter with respect to the policy mode and flags.
 * parameter with respect to the policy mode and flags.  But, we need to
 * handle an empty nodemask with MPOL_PREFERRED here.
 *
 * Must be called holding task's alloc_lock to protect task's mems_allowed
 * and mempolicy.  May also be called holding the mmap_lock for write.
@@ -278,14 +283,9 @@ static int mpol_set_nodemask(struct mempolicy *pol,
{
	int ret;

	/*
	 * Default (pol==NULL) resp. local memory policies are not a
	 * subject of any remapping. They also do not need any special
	 * constructor.
	 */
	if (!pol || pol->mode == MPOL_LOCAL)
	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
	if (pol == NULL)
		return 0;

	/* Check N_MEMORY */
	nodes_and(nsc->mask1,
		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
@@ -294,7 +294,9 @@ static int mpol_set_nodemask(struct mempolicy *pol,
	nodes_or(nsc->mask1, cdmmask, nsc->mask1);
#endif
	VM_BUG_ON(!nodes);

	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
		nodes = NULL;	/* explicit local allocation */
	else {
		if (pol->flags & MPOL_F_RELATIVE_NODES)
			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
		else
@@ -303,9 +305,14 @@ static int mpol_set_nodemask(struct mempolicy *pol,
		if (mpol_store_user_nodemask(pol))
			pol->w.user_nodemask = *nodes;
		else
		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
			pol->w.cpuset_mems_allowed =
						cpuset_current_mems_allowed;
	}

	if (nodes)
		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
	else
		ret = mpol_ops[pol->mode].create(pol, NULL);
	return ret;
}

@@ -339,14 +346,13 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
			if (((flags & MPOL_F_STATIC_NODES) ||
			     (flags & MPOL_F_RELATIVE_NODES)))
				return ERR_PTR(-EINVAL);

			mode = MPOL_LOCAL;
		}
	} else if (mode == MPOL_LOCAL) {
		if (!nodes_empty(*nodes) ||
		    (flags & MPOL_F_STATIC_NODES) ||
		    (flags & MPOL_F_RELATIVE_NODES))
			return ERR_PTR(-EINVAL);
		mode = MPOL_PREFERRED;
	} else if (nodes_empty(*nodes))
		return ERR_PTR(-EINVAL);
	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
@@ -395,6 +401,30 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)

static void mpol_rebind_preferred(struct mempolicy *pol,
						const nodemask_t *nodes)
{
	nodemask_t tmp;

	if (pol->flags & MPOL_F_STATIC_NODES) {
		int node = first_node(pol->w.user_nodemask);

		if (node_isset(node, *nodes)) {
			pol->v.preferred_node = node;
			pol->flags &= ~MPOL_F_LOCAL;
		} else
			pol->flags |= MPOL_F_LOCAL;
	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
		pol->v.preferred_node = first_node(tmp);
	} else if (!(pol->flags & MPOL_F_LOCAL)) {
		pol->v.preferred_node = node_remap(pol->v.preferred_node,
						   pol->w.cpuset_mems_allowed,
						   *nodes);
		pol->w.cpuset_mems_allowed = *nodes;
	}
}

static void mpol_rebind_preferred_many(struct mempolicy *pol,
						const nodemask_t *nodes)
{
	pol->w.cpuset_mems_allowed = *nodes;
}
@@ -410,7 +440,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
	if (!pol || pol->mode == MPOL_LOCAL)
		return;
	if (!mpol_store_user_nodemask(pol) &&
	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
		return;

@@ -461,12 +491,9 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
		.create = mpol_new_bind,
		.rebind = mpol_rebind_nodemask,
	},
	[MPOL_LOCAL] = {
		.rebind = mpol_rebind_default,
	},
	[MPOL_PREFERRED_MANY] = {
		.create = mpol_new_preferred_many,
		.rebind = mpol_rebind_preferred,
		.rebind = mpol_rebind_preferred_many,
	},
};

@@ -950,12 +977,10 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
	case MPOL_PREFERRED_MANY:
		*nodes = p->v.nodes;
		break;
	case MPOL_LOCAL:
		/* return empty node mask for local allocation */
		break;

	case MPOL_PREFERRED:
		if (!(p->flags & MPOL_F_LOCAL))
			node_set(p->v.preferred_node, *nodes);
		/* else return empty node mask for local allocation */
		break;
	default:
		BUG();
@@ -2035,7 +2060,7 @@ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
	struct mempolicy_wrapper *warpper;

	warpper = container_of(policy, struct mempolicy_wrapper, policy);
	if (policy->mode == MPOL_PREFERRED) {
	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) {
		nd = policy->v.preferred_node;
	} else {
		/*
@@ -2086,11 +2111,14 @@ unsigned int mempolicy_slab_node(void)
		return node;

	policy = current->mempolicy;
	if (!policy)
	if (!policy || policy->flags & MPOL_F_LOCAL)
		return node;

	switch (policy->mode) {
	case MPOL_PREFERRED:
		/*
		 * handled MPOL_F_LOCAL above
		 */
		return policy->v.preferred_node;

	case MPOL_INTERLEAVE:
@@ -2112,8 +2140,6 @@ unsigned int mempolicy_slab_node(void)
							&policy->v.nodes);
		return z->zone ? zone_to_nid(z->zone) : node;
	}
	case MPOL_LOCAL:
		return node;

	default:
		BUG();
@@ -2237,6 +2263,9 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
	mempolicy = current->mempolicy;
	switch (mempolicy->mode) {
	case MPOL_PREFERRED:
		if (mempolicy->flags & MPOL_F_LOCAL)
			nid = numa_node_id();
		else
			nid = mempolicy->v.preferred_node;
		init_nodemask_of_node(mask, nid);
		break;
@@ -2247,11 +2276,6 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
		*mask =  mempolicy->v.nodes;
		break;

	case MPOL_LOCAL:
		nid = numa_node_id();
		init_nodemask_of_node(mask, nid);
		break;

	default:
		BUG();
	}
@@ -2407,7 +2431,7 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
		 * If the policy is interleave or does not allow the current
		 * node in its nodemask, we allocate the standard way.
		 */
		if (pol->mode == MPOL_PREFERRED)
		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
			hpage_node = pol->v.preferred_node;

		nmask = policy_nodemask(gfp, pol);
@@ -2557,13 +2581,14 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)

	switch (a->mode) {
	case MPOL_BIND:
	case MPOL_PREFERRED_MANY:
	case MPOL_INTERLEAVE:
		return !!nodes_equal(a->v.nodes, b->v.nodes);
	case MPOL_PREFERRED:
	case MPOL_PREFERRED_MANY:
		return a->v.preferred_node == b->v.preferred_node;
	case MPOL_LOCAL:
		/* a's ->flags is the same as b's */
		if (a->flags & MPOL_F_LOCAL)
			return true;
		return a->v.preferred_node == b->v.preferred_node;
	default:
		BUG();
		return false;
@@ -2701,11 +2726,10 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
		break;

	case MPOL_PREFERRED:
		polnid = pol->v.preferred_node;
		break;

	case MPOL_LOCAL:
		if (pol->flags & MPOL_F_LOCAL)
			polnid = numa_node_id();
		else
			polnid = pol->v.preferred_node;
		break;

	case MPOL_BIND:
@@ -3071,6 +3095,9 @@ void numa_default_policy(void)
 * Parse and format mempolicy from/to strings
 */

/*
 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
 */
static const char * const policy_modes[] =
{
	[MPOL_DEFAULT]    = "default",
@@ -3149,6 +3176,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
		 */
		if (nodelist)
			goto out;
		mode = MPOL_PREFERRED;
		break;
	case MPOL_DEFAULT:
		/*
@@ -3193,7 +3221,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
	else if (nodelist)
		new->v.preferred_node = first_node(nodes);
	else
		new->mode = MPOL_LOCAL;
		new->flags |= MPOL_F_LOCAL;

	/*
	 * Save nodes for contextualization: this will be used to "clone"
@@ -3240,9 +3268,11 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)

	switch (mode) {
	case MPOL_DEFAULT:
	case MPOL_LOCAL:
		break;
	case MPOL_PREFERRED:
		if (flags & MPOL_F_LOCAL)
			mode = MPOL_LOCAL;
		else
			node_set(pol->v.preferred_node, nodes);
		break;
	case MPOL_PREFERRED_MANY: