Commit 629ed3f7 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

xtensa/mm/highmem: Switch to generic kmap atomic



No reason having the same code in every architecture

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Chris Zankel <chris@zankel.net>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20201103095858.311016780@linutronix.de
parent 3293efa9
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -666,6 +666,7 @@ endchoice
config HIGHMEM
	bool "High Memory Support"
	depends on MMU
	select KMAP_LOCAL
	help
	  Linux can use the full amount of RAM in the system by
	  default. However, the default MMUv2 setup only maps the
+2 −2
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <linux/pgtable.h>
#include <asm/kmap_types.h>
#include <asm/kmap_size.h>
#endif

/*
@@ -39,7 +39,7 @@ enum fixed_addresses {
	/* reserved pte's for temporary kernel mappings */
	FIX_KMAP_BEGIN,
	FIX_KMAP_END = FIX_KMAP_BEGIN +
		(KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1,
		(KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1,
#endif
	__end_of_fixed_addresses
};
+10 −2
Original line number Diff line number Diff line
@@ -16,7 +16,6 @@
#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kmap_types.h>

#define PKMAP_BASE		((FIXADDR_START -			\
				  (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
@@ -68,6 +67,15 @@ static inline void flush_cache_kmaps(void)
	flush_cache_all();
}

enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
#define arch_kmap_local_map_idx		kmap_local_map_idx

enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
#define arch_kmap_local_unmap_idx	kmap_local_unmap_idx

#define arch_kmap_local_post_unmap(vaddr)	\
	local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)

void kmap_init(void);

#endif
+5 −41
Original line number Diff line number Diff line
@@ -12,8 +12,6 @@
#include <linux/highmem.h>
#include <asm/tlbflush.h>

static pte_t *kmap_pte;

#if DCACHE_WAY_SIZE > PAGE_SIZE
unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
@@ -33,59 +31,25 @@ static inline void kmap_waitqueues_init(void)

static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
{
	return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
	return (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS +
		color;
}

void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

	idx = kmap_idx(kmap_atomic_idx_push(),
		       DCACHE_ALIAS(page_to_phys(page)));
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(kmap_pte + idx)));
#endif
	set_pte(kmap_pte + idx, mk_pte(page, prot));

	return (void *)vaddr;
	return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT));
}
EXPORT_SYMBOL(kmap_atomic_high_prot);

void kunmap_atomic_high(void *kvaddr)
enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr)
{
	if (kvaddr >= (void *)FIXADDR_START &&
	    kvaddr < (void *)FIXADDR_TOP) {
		int idx = kmap_idx(kmap_atomic_idx(),
				   DCACHE_ALIAS((unsigned long)kvaddr));

		/*
		 * Force other mappings to Oops if they'll try to access this
		 * pte without first remap it.  Keeping stale mappings around
		 * is a bad idea also, in case the page changes cacheability
		 * attributes or becomes a protected page in a hypervisor.
		 */
		pte_clear(&init_mm, kvaddr, kmap_pte + idx);
		local_flush_tlb_kernel_range((unsigned long)kvaddr,
					     (unsigned long)kvaddr + PAGE_SIZE);

		kmap_atomic_idx_pop();
	return kmap_idx(type, DCACHE_ALIAS(addr));
}
}
EXPORT_SYMBOL(kunmap_atomic_high);

void __init kmap_init(void)
{
	unsigned long kmap_vstart;

	/* Check if this memory layout is broken because PKMAP overlaps
	 * page table.
	 */
	BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
	/* cache the first kmap pte */
	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
	kmap_pte = virt_to_kpte(kmap_vstart);
	kmap_waitqueues_init();
}