Unverified Commit 01112e5e authored by Palmer Dabbelt's avatar Palmer Dabbelt
Browse files

Merge branch 'riscv-wx-mappings' into for-next

This contains both the short-term fix for the W+X boot mappings and the
larger cleanup.

* riscv-wx-mappings:
  riscv: Map the kernel with correct permissions the first time
  riscv: Introduce set_kernel_memory helper
  riscv: Simplify xip and !xip kernel address conversion macros
  riscv: Remove CONFIG_PHYS_RAM_BASE_FIXED
  riscv: mm: Fix W+X mappings at boot
parents 47513f24 e5c35fa0
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -494,13 +494,8 @@ config STACKPROTECTOR_PER_TASK
	def_bool y
	depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS

config PHYS_RAM_BASE_FIXED
	bool "Explicitly specified physical RAM address"
	default n

config PHYS_RAM_BASE
	hex "Platform Physical RAM address"
	depends on PHYS_RAM_BASE_FIXED
	default "0x80000000"
	help
	  This is the physical address of RAM in the system. It has to be
@@ -513,7 +508,6 @@ config XIP_KERNEL
	# This prevents XIP from being enabled by all{yes,mod}config, which
	# fail to build since XIP doesn't support large kernels.
	depends on !COMPILE_TEST
	select PHYS_RAM_BASE_FIXED
	help
	  Execute-In-Place allows the kernel to run from non-volatile storage
	  directly addressable by the CPU, such as NOR flash. This saves RAM
+15 −12
Original line number Diff line number Diff line
@@ -83,55 +83,58 @@ extern unsigned long va_pa_offset;
#ifdef CONFIG_64BIT
extern unsigned long va_kernel_pa_offset;
#endif
#ifdef CONFIG_XIP_KERNEL
extern unsigned long va_kernel_xip_pa_offset;
#endif
extern unsigned long pfn_base;
extern uintptr_t load_sz;
#define ARCH_PFN_OFFSET		(pfn_base)
#else
#define va_pa_offset		0
#ifdef CONFIG_64BIT
#define va_kernel_pa_offset	0
#endif
#define va_kernel_xip_pa_offset 0
#define ARCH_PFN_OFFSET		(PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */

extern unsigned long kernel_virt_addr;

#ifdef CONFIG_64BIT
#define is_kernel_mapping(x)	\
	((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
#define is_linear_mapping(x)	\
	((x) >= PAGE_OFFSET && (x) < kernel_virt_addr)

#define linear_mapping_pa_to_va(x)	((void *)((unsigned long)(x) + va_pa_offset))
#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_pa_to_va(y)	({						\
	unsigned long _y = y;								\
	(_y >= CONFIG_PHYS_RAM_BASE) ?							\
		(void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) :	\
		(void *)((unsigned long)(_y) + va_kernel_xip_pa_offset);		\
	})
#else
#define kernel_mapping_pa_to_va(x)	((void *)((unsigned long)(x) + va_kernel_pa_offset))
#endif
#define __pa_to_va_nodebug(x)		linear_mapping_pa_to_va(x)

#define linear_mapping_va_to_pa(x)	((unsigned long)(x) - va_pa_offset)
#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_va_to_pa(y) ({						\
	unsigned long _y = y;							\
	(_y < kernel_virt_addr + XIP_OFFSET) ?					\
		((unsigned long)(_y) - va_kernel_xip_pa_offset) :		\
		((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET);	\
	})
#else
#define kernel_mapping_va_to_pa(x)	((unsigned long)(x) - va_kernel_pa_offset)
#endif

#define __va_to_pa_nodebug(x)	({						\
	unsigned long _x = x;							\
	(_x < kernel_virt_addr) ?						\
	is_linear_mapping(_x) ?							\
		linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x);	\
	})
#else
#define is_kernel_mapping(x)	\
	((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
#define is_linear_mapping(x)	\
	((x) >= PAGE_OFFSET)

#define __pa_to_va_nodebug(x)  ((void *)((unsigned long) (x) + va_pa_offset))
#define __va_to_pa_nodebug(x)  ((unsigned long)(x) - va_pa_offset)
#endif
#endif /* CONFIG_64BIT */

#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
+2 −0
Original line number Diff line number Diff line
@@ -77,6 +77,8 @@

#ifdef CONFIG_XIP_KERNEL
#define XIP_OFFSET		SZ_8M
#else
#define XIP_OFFSET		0
#endif

#ifndef __ASSEMBLY__
+17 −0
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@
#define __ASM_SECTIONS_H

#include <asm-generic/sections.h>
#include <linux/mm.h>

extern char _start[];
extern char _start_kernel[];
@@ -13,4 +14,20 @@ extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[];

static inline bool is_va_kernel_text(uintptr_t va)
{
	uintptr_t start = (uintptr_t)_start;
	uintptr_t end = (uintptr_t)__init_data_begin;

	return va >= start && va < end;
}

static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
{
	uintptr_t start = (uintptr_t)lm_alias(_start);
	uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);

	return va >= start && va < end;
}

#endif /* __ASM_SECTIONS_H */
+16 −8
Original line number Diff line number Diff line
@@ -16,20 +16,28 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_rw_nx(unsigned long addr, int numpages);
void protect_kernel_text_data(void);
static __always_inline int set_kernel_memory(char *startp, char *endp,
					     int (*set_memory)(unsigned long start,
							       int num_pages))
{
	unsigned long start = (unsigned long)startp;
	unsigned long end = (unsigned long)endp;
	int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT;

	return set_memory(start, num_pages);
}
#else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
#endif

#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
void __init protect_kernel_linear_mapping_text_rodata(void);
#else
static inline void protect_kernel_linear_mapping_text_rodata(void) {}
static inline int set_kernel_memory(char *startp, char *endp,
				    int (*set_memory)(unsigned long start,
						      int num_pages))
{
	return 0;
}
#endif

int set_direct_map_invalid_noflush(struct page *page);
Loading