Commit 0b30191b authored by Michael Ellerman's avatar Michael Ellerman
Browse files

Merge branch 'topic/irqs-off-activate-mm' into next

Merge Nick's series to add ARCH_WANT_IRQS_OFF_ACTIVATE_MM.
parents d208e13c a665eec0
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -414,6 +414,13 @@ config MMU_GATHER_NO_GATHER
	bool
	depends on MMU_GATHER_TABLE_FREE

config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
	bool
	help
	  Temporary select until all architectures can be converted to have
	  irqs disabled over activate_mm. Architectures that do IPI based TLB
	  shootdowns should enable this.

config ARCH_HAVE_NMI_SAFE_CMPXCHG
	bool

+1 −0
Original line number Diff line number Diff line
@@ -151,6 +151,7 @@ config PPC
	select ARCH_USE_QUEUED_RWLOCKS		if PPC_QUEUED_SPINLOCKS
	select ARCH_USE_QUEUED_SPINLOCKS	if PPC_QUEUED_SPINLOCKS
	select ARCH_WANT_IPC_PARSE_VERSION
	select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
	select ARCH_WEAK_RELEASE_ACQUIRE
	select BINFMT_ELF
	select BUILDTIME_TABLE_SORT
+1 −1
Original line number Diff line number Diff line
@@ -244,7 +244,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 */
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
	switch_mm(prev, next, current);
	switch_mm_irqs_off(prev, next, current);
}

/* We don't currently use enter_lazy_tlb() for anything */
+0 −13
Original line number Diff line number Diff line
@@ -66,19 +66,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
		return false;
	return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
}
static inline void mm_reset_thread_local(struct mm_struct *mm)
{
	WARN_ON(atomic_read(&mm->context.copros) > 0);
	/*
	 * It's possible for mm_access to take a reference on mm_users to
	 * access the remote mm from another thread, but it's not allowed
	 * to set mm_cpumask, so mm_users may be > 1 here.
	 */
	WARN_ON(current->mm != mm);
	atomic_set(&mm->context.active_cpus, 1);
	cpumask_clear(mm_cpumask(mm));
	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
}
#else /* CONFIG_PPC_BOOK3S_64 */
static inline int mm_is_thread_local(struct mm_struct *mm)
{
+16 −7
Original line number Diff line number Diff line
@@ -645,19 +645,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
	struct mm_struct *mm = arg;
	unsigned long pid = mm->context.id;

	/*
	 * A kthread could have done a mmget_not_zero() after the flushing CPU
	 * checked mm_is_singlethreaded, and be in the process of
	 * kthread_use_mm when interrupted here. In that case, current->mm will
	 * be set to mm, because kthread_use_mm() setting ->mm and switching to
	 * the mm is done with interrupts off.
	 */
	if (current->mm == mm)
		return; /* Local CPU */
		goto out_flush;

	if (current->active_mm == mm) {
		/*
		 * Must be a kernel thread because sender is single-threaded.
		 */
		BUG_ON(current->mm);
		WARN_ON_ONCE(current->mm != NULL);
		/* Is a kernel thread and is using mm as the lazy tlb */
		mmgrab(&init_mm);
		switch_mm(mm, &init_mm, current);
		current->active_mm = &init_mm;
		switch_mm_irqs_off(mm, &init_mm, current);
		mmdrop(mm);
	}

	atomic_dec(&mm->context.active_cpus);
	cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));

out_flush:
	_tlbiel_pid(pid, RIC_FLUSH_ALL);
}

@@ -672,7 +682,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
	 */
	smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
				(void *)mm, 1);
	mm_reset_thread_local(mm);
}

void radix__flush_tlb_mm(struct mm_struct *mm)
Loading