Commit f23eab0b authored by Kefeng Wang's avatar Kefeng Wang Committed by Will Deacon
Browse files

arm64: mm: Convert to GENERIC_IOREMAP



Add hook for arm64's special operation when ioremap(), then
ioremap_wc/np/cache is converted to use ioremap_prot() from
GENERIC_IOREMAP, update the Copyright and kill the unused
inclusions.

Reviewed-by: default avatarAnshuman Khandual <anshuman.khandual@arm.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/r/20220607125027.44946-6-wangkefeng.wang@huawei.com


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 18e780b4
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -126,6 +126,7 @@ config ARM64
	select GENERIC_CPU_VULNERABILITIES
	select GENERIC_EARLY_IOREMAP
	select GENERIC_IDLE_POLL_SETUP
	select GENERIC_IOREMAP
	select GENERIC_IRQ_IPI
	select GENERIC_IRQ_PROBE
	select GENERIC_IRQ_SHOW
+18 −6
Original line number Diff line number Diff line
@@ -163,13 +163,16 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
/*
 * I/O memory mapping functions.
 */
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);

#define ioremap(addr, size)		__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size)		__ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define ioremap_np(addr, size)		__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot);
#define ioremap_allowed ioremap_allowed

#define _PAGE_IOREMAP PROT_DEVICE_nGnRE

#define ioremap_wc(addr, size)	\
	ioremap_prot((addr), (size), PROT_NORMAL_NC)
#define ioremap_np(addr, size)	\
	ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE)

/*
 * io{read,write}{16,32,64}be() macros
@@ -184,6 +187,15 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);

#include <asm-generic/io.h>

#define ioremap_cache ioremap_cache
static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
{
	if (pfn_is_map_memory(__phys_to_pfn(addr)))
		return (void __iomem *)__phys_to_virt(addr);

	return ioremap_prot(addr, size, PROT_NORMAL);
}

/*
 * More restrictive address range checking than the default implementation
 * (PHYS_OFFSET and PHYS_MASK taken into account).
+1 −1
Original line number Diff line number Diff line
@@ -351,7 +351,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
				prot = __acpi_get_writethrough_mem_attribute();
		}
	}
	return __ioremap(phys, size, prot);
	return ioremap_prot(phys, size, pgprot_val(prot));
}

/*
+8 −82
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Based on arch/arm/mm/ioremap.c
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 * Hacked for ARM by Phil Blundell <philb@gnu.org>
 * Hacked to allow all architectures to build, and various cleanups
 * by Russell King
 * Copyright (C) 2012 ARM Ltd.
 */

#include <linux/export.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/io.h>

#include <asm/fixmap.h>
#include <asm/tlbflush.h>

static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
				      pgprot_t prot, void *caller)
bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot)
{
	unsigned long last_addr;
	unsigned long offset = phys_addr & ~PAGE_MASK;
	int err;
	unsigned long addr;
	struct vm_struct *area;
	unsigned long last_addr = phys_addr + size - 1;

	/*
	 * Page align the mapping address and size, taking account of any
	 * offset.
	 */
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(size + offset);
	/* Don't allow outside PHYS_MASK */
	if (last_addr & ~PHYS_MASK)
		return false;

	/*
	 * Don't allow wraparound, zero size or outside PHYS_MASK.
	 */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped.
	 */
	/* Don't allow RAM to be mapped. */
	if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
		return NULL;

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
	if (!area)
		return NULL;
	addr = (unsigned long)area->addr;
	area->phys_addr = phys_addr;

	err = ioremap_page_range(addr, addr + size, phys_addr, prot);
	if (err) {
		vunmap((void *)addr);
		return NULL;
	}

	return (void __iomem *)(offset + addr);
}

void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
{
	return __ioremap_caller(phys_addr, size, prot,
				__builtin_return_address(0));
}
EXPORT_SYMBOL(__ioremap);

void iounmap(volatile void __iomem *io_addr)
{
	unsigned long addr = (unsigned long)io_addr & PAGE_MASK;

	/*
	 * We could get an address outside vmalloc range in case
	 * of ioremap_cache() reusing a RAM mapping.
	 */
	if (is_vmalloc_addr((void *)addr))
		vunmap((void *)addr);
}
EXPORT_SYMBOL(iounmap);

void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
{
	/* For normal memory we already have a cacheable mapping. */
	if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
		return (void __iomem *)__phys_to_virt(phys_addr);
		return false;

	return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
				__builtin_return_address(0));
	return true;
}
EXPORT_SYMBOL(ioremap_cache);

/*
 * Must be called after early_fixmap_init