Commit 54ab4ff4 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: Move sched domain storage into the topology list



In order to remove the last dependency on the statid domain levels,
move the sd_data storage into the topology structure.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.924926412@chello.nl


Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d069b916
Loading
Loading
Loading
Loading
+77 −52
Original line number Diff line number Diff line
@@ -6837,7 +6837,6 @@ struct sd_data {

struct s_data {
	struct sched_domain ** __percpu sd;
	struct sd_data 		sdd[SD_LV_MAX];
	struct root_domain	*rd;
};

@@ -6848,12 +6847,15 @@ enum s_alloc {
	sa_none,
};

typedef struct sched_domain *(*sched_domain_init_f)(struct s_data *d, int cpu);
struct sched_domain_topology_level;

typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);

struct sched_domain_topology_level {
	sched_domain_init_f init;
	sched_domain_mask_f mask;
	struct sd_data      data;
};

/*
@@ -6959,13 +6961,14 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
#endif

#define SD_INIT_FUNC(type)						\
static noinline struct sched_domain *sd_init_##type(struct s_data *d, int cpu) \
static noinline struct sched_domain *					\
sd_init_##type(struct sched_domain_topology_level *tl, int cpu) 	\
{									\
	struct sched_domain *sd = *per_cpu_ptr(d->sdd[SD_LV_##type].sd, cpu);  \
	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);	\
	*sd = SD_##type##_INIT;						\
	sd->level = SD_LV_##type;					\
	SD_INIT_NAME(sd, type);						\
	sd->private = &d->sdd[SD_LV_##type];				       \
	sd->private = &tl->data;					\
	return sd;							\
}

@@ -7019,11 +7022,12 @@ static void set_domain_attribute(struct sched_domain *sd,
	}
}

static void __sdt_free(const struct cpumask *cpu_map);
static int __sdt_alloc(const struct cpumask *cpu_map);

static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
				 const struct cpumask *cpu_map)
{
	int i, j;

	switch (what) {
	case sa_rootdomain:
		if (!atomic_read(&d->rd->refcount))
@@ -7031,14 +7035,7 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
	case sa_sd:
		free_percpu(d->sd); /* fall through */
	case sa_sd_storage:
		for (i = 0; i < SD_LV_MAX; i++) {
			for_each_cpu(j, cpu_map) {
				kfree(*per_cpu_ptr(d->sdd[i].sd, j));
				kfree(*per_cpu_ptr(d->sdd[i].sg, j));
			}
			free_percpu(d->sdd[i].sd);
			free_percpu(d->sdd[i].sg);
		} /* fall through */
		__sdt_free(cpu_map); /* fall through */
	case sa_none:
		break;
	}
@@ -7047,38 +7044,10 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
						   const struct cpumask *cpu_map)
{
	int i, j;

	memset(d, 0, sizeof(*d));

	for (i = 0; i < SD_LV_MAX; i++) {
		d->sdd[i].sd = alloc_percpu(struct sched_domain *);
		if (!d->sdd[i].sd)
			return sa_sd_storage;

		d->sdd[i].sg = alloc_percpu(struct sched_group *);
		if (!d->sdd[i].sg)
			return sa_sd_storage;

		for_each_cpu(j, cpu_map) {
			struct sched_domain *sd;
			struct sched_group *sg;

		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
					GFP_KERNEL, cpu_to_node(j));
			if (!sd)
				return sa_sd_storage;

			*per_cpu_ptr(d->sdd[i].sd, j) = sd;

			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
					GFP_KERNEL, cpu_to_node(j));
			if (!sg)
	if (__sdt_alloc(cpu_map))
		return sa_sd_storage;

			*per_cpu_ptr(d->sdd[i].sg, j) = sg;
		}
	}
	d->sd = alloc_percpu(struct sched_domain *);
	if (!d->sd)
		return sa_sd_storage;
@@ -7137,12 +7106,68 @@ static struct sched_domain_topology_level default_topology[] = {

static struct sched_domain_topology_level *sched_domain_topology = default_topology;

static int __sdt_alloc(const struct cpumask *cpu_map)
{
	struct sched_domain_topology_level *tl;
	int j;

	for (tl = sched_domain_topology; tl->init; tl++) {
		struct sd_data *sdd = &tl->data;

		sdd->sd = alloc_percpu(struct sched_domain *);
		if (!sdd->sd)
			return -ENOMEM;

		sdd->sg = alloc_percpu(struct sched_group *);
		if (!sdd->sg)
			return -ENOMEM;

		for_each_cpu(j, cpu_map) {
			struct sched_domain *sd;
			struct sched_group *sg;

		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
					GFP_KERNEL, cpu_to_node(j));
			if (!sd)
				return -ENOMEM;

			*per_cpu_ptr(sdd->sd, j) = sd;

			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
					GFP_KERNEL, cpu_to_node(j));
			if (!sg)
				return -ENOMEM;

			*per_cpu_ptr(sdd->sg, j) = sg;
		}
	}

	return 0;
}

static void __sdt_free(const struct cpumask *cpu_map)
{
	struct sched_domain_topology_level *tl;
	int j;

	for (tl = sched_domain_topology; tl->init; tl++) {
		struct sd_data *sdd = &tl->data;

		for_each_cpu(j, cpu_map) {
			kfree(*per_cpu_ptr(sdd->sd, j));
			kfree(*per_cpu_ptr(sdd->sg, j));
		}
		free_percpu(sdd->sd);
		free_percpu(sdd->sg);
	}
}

struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
		struct s_data *d, const struct cpumask *cpu_map,
		struct sched_domain_attr *attr, struct sched_domain *child,
		int cpu)
{
	struct sched_domain *sd = tl->init(d, cpu);
	struct sched_domain *sd = tl->init(tl, cpu);
	if (!sd)
		return child;