Unverified Commit f6635f87 authored by Gary Guo's avatar Gary Guo Committed by Palmer Dabbelt
Browse files

riscv: move switch_mm to its own file



switch_mm is an expensive operations that has two users.
flush_icache_deferred is only called within switch_mm and can be moved
together. The function is expected to be more complicated when ASID
support is added, so clean up eagerly.

By moving them to a separate file we also removes some excessive
dependency of tlbflush.h and cacheflush.h.

Signed-off-by: default avatarGary Guo <gary@garyguo.net>
Reviewed-by: default avatarAnup Patel <anup@brainfault.org>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarPalmer Dabbelt <palmer@sifive.com>
parent 58de7754
Loading
Loading
Loading
Loading
+2 −52
Original line number Diff line number Diff line
@@ -20,8 +20,6 @@

#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>

static inline void enter_lazy_tlb(struct mm_struct *mm,
	struct task_struct *task)
@@ -39,56 +37,8 @@ static inline void destroy_context(struct mm_struct *mm)
{
}

/*
 * When necessary, performs a deferred icache flush for the given MM context,
 * on the local CPU.  RISC-V has no direct mechanism for instruction cache
 * shoot downs, so instead we send an IPI that informs the remote harts they
 * need to flush their local instruction caches.  To avoid pathologically slow
 * behavior in a common case (a bunch of single-hart processes on a many-hart
 * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
 * executing a MM context and instead schedule a deferred local instruction
 * cache flush to be performed before execution resumes on each hart.  This
 * actually performs that local instruction cache flush, which implicitly only
 * refers to the current hart.
 */
static inline void flush_icache_deferred(struct mm_struct *mm)
{
#ifdef CONFIG_SMP
	unsigned int cpu = smp_processor_id();
	cpumask_t *mask = &mm->context.icache_stale_mask;

	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
		/*
		 * Ensure the remote hart's writes are visible to this hart.
		 * This pairs with a barrier in flush_icache_mm.
		 */
		smp_mb();
		local_flush_icache_all();
	}
#endif
}

static inline void switch_mm(struct mm_struct *prev,
	struct mm_struct *next, struct task_struct *task)
{
	if (likely(prev != next)) {
		/*
		 * Mark the current MM context as inactive, and the next as
		 * active.  This is at least used by the icache flushing
		 * routines in order to determine who should
		 */
		unsigned int cpu = smp_processor_id();

		cpumask_clear_cpu(cpu, mm_cpumask(prev));
		cpumask_set_cpu(cpu, mm_cpumask(next));

		csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
		local_flush_tlb_all();

		flush_icache_deferred(next);
	}
}
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	struct task_struct *task);

static inline void activate_mm(struct mm_struct *prev,
			       struct mm_struct *next)
+1 −0
Original line number Diff line number Diff line
@@ -9,3 +9,4 @@ obj-y += fault.o
obj-y += extable.o
obj-y += ioremap.o
obj-y += cacheflush.o
obj-y += context.o
+69 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2012 Regents of the University of California
 * Copyright (C) 2017 SiFive
 */

#include <linux/mm.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>

/*
 * When necessary, performs a deferred icache flush for the given MM context,
 * on the local CPU.  RISC-V has no direct mechanism for instruction cache
 * shoot downs, so instead we send an IPI that informs the remote harts they
 * need to flush their local instruction caches.  To avoid pathologically slow
 * behavior in a common case (a bunch of single-hart processes on a many-hart
 * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
 * executing a MM context and instead schedule a deferred local instruction
 * cache flush to be performed before execution resumes on each hart.  This
 * actually performs that local instruction cache flush, which implicitly only
 * refers to the current hart.
 */
static inline void flush_icache_deferred(struct mm_struct *mm)
{
#ifdef CONFIG_SMP
	unsigned int cpu = smp_processor_id();
	cpumask_t *mask = &mm->context.icache_stale_mask;

	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
		/*
		 * Ensure the remote hart's writes are visible to this hart.
		 * This pairs with a barrier in flush_icache_mm.
		 */
		smp_mb();
		local_flush_icache_all();
	}

#endif
}

void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	struct task_struct *task)
{
	unsigned int cpu;

	if (unlikely(prev == next))
		return;

	/*
	 * Mark the current MM context as inactive, and the next as
	 * active.  This is at least used by the icache flushing
	 * routines in order to determine who should be flushed.
	 */
	cpu = smp_processor_id();

	cpumask_clear_cpu(cpu, mm_cpumask(prev));
	cpumask_set_cpu(cpu, mm_cpumask(next));

	/*
	 * Use the old spbtr name instead of using the current satp
	 * name to support binutils 2.29 which doesn't know about the
	 * privileged ISA 1.10 yet.
	 */
	csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
	local_flush_tlb_all();

	flush_icache_deferred(next);
}