Commit 13c8da5d authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

Merge branch 'sched/core' into core/mm

Pull the migrate disable mechanics which is a prerequisite for preemptible
kmap_local().
parents a0e16997 74d862b6
Loading
Loading
Loading
Loading
+11 −15
Original line number Diff line number Diff line
@@ -65,21 +65,17 @@ of the SMP domain will span the entire machine, with each group having the
cpumask of a node. Or, you could do multi-level NUMA or Opteron, for example,
might have just one domain covering its one NUMA level.

The implementor should read comments in include/linux/sched.h:
struct sched_domain fields, SD_FLAG_*, SD_*_INIT to get an idea of
the specifics and what to tune.
The implementor should read comments in include/linux/sched/sd_flags.h:
SD_* to get an idea of the specifics and what to tune for the SD flags
of a sched_domain.

Architectures may retain the regular override the default SD_*_INIT flags
while using the generic domain builder in kernel/sched/core.c if they wish to
retain the traditional SMT->SMP->NUMA topology (or some subset of that). This
can be done by #define'ing ARCH_HASH_SCHED_TUNE.

Alternatively, the architecture may completely override the generic domain
builder by #define'ing ARCH_HASH_SCHED_DOMAIN, and exporting your
arch_init_sched_domains function. This function will attach domains to all
CPUs using cpu_attach_domain.
Architectures may override the generic domain builder and the default SD flags
for a given topology level by creating a sched_domain_topology_level array and
calling set_sched_topology() with this array as the parameter.

The sched-domains debugging infrastructure can be enabled by enabling
CONFIG_SCHED_DEBUG. This enables an error checking parse of the sched domains
which should catch most possible errors (described above). It also prints out
the domain structure in a visual format.
CONFIG_SCHED_DEBUG and adding 'sched_debug' to your cmdline. If you forgot to
tweak your cmdline, you can also flip the /sys/kernel/debug/sched_debug
knob. This enables an error checking parse of the sched domains which should
catch most possible errors (described above). It also prints out the domain
structure in a visual format.
+10 −0
Original line number Diff line number Diff line
@@ -213,6 +213,7 @@ static DEFINE_STATIC_KEY_FALSE(amu_fie_key);

static int __init init_amu_fie(void)
{
	bool invariance_status = topology_scale_freq_invariant();
	cpumask_var_t valid_cpus;
	bool have_policy = false;
	int ret = 0;
@@ -255,6 +256,15 @@ static int __init init_amu_fie(void)
	if (!topology_scale_freq_invariant())
		static_branch_disable(&amu_fie_key);

	/*
	 * Task scheduler behavior depends on frequency invariance support,
	 * either cpufreq or counter driven. If the support status changes as
	 * a result of counter initialisation and use, retrigger the build of
	 * scheduling domains to ensure the information is propagated properly.
	 */
	if (invariance_status != topology_scale_freq_invariant())
		rebuild_sched_domains_energy();

free_valid_mask:
	free_cpumask_var(valid_cpus);

+2 −2
Original line number Diff line number Diff line
@@ -382,9 +382,9 @@ static inline void task_context_switch_counts(struct seq_file *m,
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
	seq_printf(m, "Cpus_allowed:\t%*pb\n",
		   cpumask_pr_args(task->cpus_ptr));
		   cpumask_pr_args(&task->cpus_mask));
	seq_printf(m, "Cpus_allowed_list:\t%*pbl\n",
		   cpumask_pr_args(task->cpus_ptr));
		   cpumask_pr_args(&task->cpus_mask));
}

static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
+1 −0
Original line number Diff line number Diff line
@@ -152,6 +152,7 @@ enum cpuhp_state {
	CPUHP_AP_ONLINE,
	CPUHP_TEARDOWN_CPU,
	CPUHP_AP_ONLINE_IDLE,
	CPUHP_AP_SCHED_WAIT_EMPTY,
	CPUHP_AP_SMPBOOT_THREADS,
	CPUHP_AP_X86_VDSO_VMA_ONLINE,
	CPUHP_AP_IRQ_AFFINITY_ONLINE,
+6 −0
Original line number Diff line number Diff line
@@ -199,6 +199,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
	return cpumask_next_and(-1, src1p, src2p);
}

static inline int cpumask_any_distribute(const struct cpumask *srcp)
{
	return cpumask_first(srcp);
}

#define for_each_cpu(cpu, mask)			\
	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask)		\
@@ -252,6 +257,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node);
int cpumask_any_and_distribute(const struct cpumask *src1p,
			       const struct cpumask *src2p);
int cpumask_any_distribute(const struct cpumask *srcp);

/**
 * for_each_cpu - iterate over every cpu in a mask
Loading