Commit 6498f615 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'riscv-for-linus-5.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - properly set the memory size, which fixes 32-bit systems

 - allow initrd to load anywhere in memory, rather that restricting it
   to the first 256MiB

 - fix the 'mem=' parameter on 64-bit systems to properly account for
   the maximum supported memory now that the kernel is outside the
   linear map

 - avoid installing mappings into the last 4KiB of memory, which
   conflicts with error values

 - avoid the stack from being freed while it is being walked

 - a handful of fixes to the new copy to/from user routines

* tag 'riscv-for-linus-5.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: __asm_copy_to-from_user: Fix: Typos in comments
  riscv: __asm_copy_to-from_user: Remove unnecessary size check
  riscv: __asm_copy_to-from_user: Fix: fail on RV32
  riscv: __asm_copy_to-from_user: Fix: overrun copy
  riscv: stacktrace: pin the task's stack in get_wchan
  riscv: Make sure the kernel mapping does not overlap with IS_ERR_VALUE
  riscv: Make sure the linear mapping does not use the kernel mapping
  riscv: Fix memory_limit for 64-bit kernel
  RISC-V: load initrd wherever it fits into memory
  riscv: Fix 32-bit RISC-V boot failure
parents fc68f42a ea196c54
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);

#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)

/* Load initrd at enough distance from DRAM start */
/* Load initrd anywhere in system RAM */
static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
	return image_addr + SZ_256M;
	return ULONG_MAX;
}

#define alloc_screen_info(x...)		(&screen_info)
+5 −1
Original line number Diff line number Diff line
@@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task)
{
	unsigned long pc = 0;

	if (likely(task && task != current && !task_is_running(task)))
	if (likely(task && task != current && !task_is_running(task))) {
		if (!try_get_task_stack(task))
			return 0;
		walk_stackframe(task, NULL, save_wchan, &pc);
		put_task_stack(task);
	}
	return pc;
}

+13 −14
Original line number Diff line number Diff line
@@ -30,23 +30,23 @@ ENTRY(__asm_copy_from_user)
	 * t0 - end of uncopied dst
	 */
	add	t0, a0, a2
	bgtu	a0, t0, 5f

	/*
	 * Use byte copy only if too small.
	 * SZREG holds 4 for RV32 and 8 for RV64
	 */
	li	a3, 8*SZREG /* size must be larger than size in word_copy */
	li	a3, 9*SZREG /* size must be larger than size in word_copy */
	bltu	a2, a3, .Lbyte_copy_tail

	/*
	 * Copy first bytes until dst is align to word boundary.
	 * Copy first bytes until dst is aligned to word boundary.
	 * a0 - start of dst
	 * t1 - start of aligned dst
	 */
	addi	t1, a0, SZREG-1
	andi	t1, t1, ~(SZREG-1)
	/* dst is already aligned, skip */
	beq	a0, t1, .Lskip_first_bytes
	beq	a0, t1, .Lskip_align_dst
1:
	/* a5 - one byte for copying data */
	fixup lb      a5, 0(a1), 10f
@@ -55,7 +55,7 @@ ENTRY(__asm_copy_from_user)
	addi	a0, a0, 1	/* dst */
	bltu	a0, t1, 1b	/* t1 - start of aligned dst */

.Lskip_first_bytes:
.Lskip_align_dst:
	/*
	 * Now dst is aligned.
	 * Use shift-copy if src is misaligned.
@@ -72,10 +72,9 @@ ENTRY(__asm_copy_from_user)
	 *
	 * a0 - start of aligned dst
	 * a1 - start of aligned src
	 * a3 - a1 & mask:(SZREG-1)
	 * t0 - end of aligned dst
	 */
	addi	t0, t0, -(8*SZREG-1) /* not to over run */
	addi	t0, t0, -(8*SZREG) /* not to over run */
2:
	fixup REG_L   a4,        0(a1), 10f
	fixup REG_L   a5,    SZREG(a1), 10f
@@ -97,7 +96,7 @@ ENTRY(__asm_copy_from_user)
	addi	a1, a1, 8*SZREG
	bltu	a0, t0, 2b

	addi	t0, t0, 8*SZREG-1 /* revert to original value */
	addi	t0, t0, 8*SZREG /* revert to original value */
	j	.Lbyte_copy_tail

.Lshift_copy:
@@ -107,7 +106,7 @@ ENTRY(__asm_copy_from_user)
	 * For misaligned copy we still perform aligned word copy, but
	 * we need to use the value fetched from the previous iteration and
	 * do some shifts.
	 * This is safe because reading less than a word size.
	 * This is safe because reading is less than a word size.
	 *
	 * a0 - start of aligned dst
	 * a1 - start of src
@@ -117,7 +116,7 @@ ENTRY(__asm_copy_from_user)
	 */
	/* calculating aligned word boundary for dst */
	andi	t1, t0, ~(SZREG-1)
	/* Converting unaligned src to aligned arc */
	/* Converting unaligned src to aligned src */
	andi	a1, a1, ~(SZREG-1)

	/*
@@ -125,11 +124,11 @@ ENTRY(__asm_copy_from_user)
	 * t3 - prev shift
	 * t4 - current shift
	 */
	slli	t3, a3, LGREG
	slli	t3, a3, 3 /* converting bytes in a3 to bits */
	li	a5, SZREG*8
	sub	t4, a5, t3

	/* Load the first word to combine with seceond word */
	/* Load the first word to combine with second word */
	fixup REG_L   a5, 0(a1), 10f

3:
@@ -161,7 +160,7 @@ ENTRY(__asm_copy_from_user)
	 * a1 - start of remaining src
	 * t0 - end of remaining dst
	 */
	bgeu	a0, t0, 5f
	bgeu	a0, t0, .Lout_copy_user  /* check if end of copy */
4:
	fixup lb      a5, 0(a1), 10f
	addi	a1, a1, 1	/* src */
@@ -169,7 +168,7 @@ ENTRY(__asm_copy_from_user)
	addi	a0, a0, 1	/* dst */
	bltu	a0, t0, 4b	/* t0 - end of dst */

5:
.Lout_copy_user:
	/* Disable access to user memory */
	csrc CSR_STATUS, t6
	li	a0, 0
+28 −4
Original line number Diff line number Diff line
@@ -127,10 +127,17 @@ void __init mem_init(void)
}

/*
 * The default maximal physical memory size is -PAGE_OFFSET,
 * limit the memory size via mem.
 * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
 * whereas for 64-bit kernel, the end of the virtual address space is occupied
 * by the modules/BPF/kernel mappings which reduces the available size of the
 * linear mapping.
 * Limit the memory size via mem.
 */
#ifdef CONFIG_64BIT
static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
#else
static phys_addr_t memory_limit = -PAGE_OFFSET;
#endif

static int __init early_mem(char *p)
{
@@ -152,7 +159,7 @@ static void __init setup_bootmem(void)
{
	phys_addr_t vmlinux_end = __pa_symbol(&_end);
	phys_addr_t vmlinux_start = __pa_symbol(&_start);
	phys_addr_t max_mapped_addr = __pa(~(ulong)0);
	phys_addr_t __maybe_unused max_mapped_addr;
	phys_addr_t dram_end;

#ifdef CONFIG_XIP_KERNEL
@@ -175,14 +182,21 @@ static void __init setup_bootmem(void)
	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);

	dram_end = memblock_end_of_DRAM();

#ifndef CONFIG_64BIT
	/*
	 * memblock allocator is not aware of the fact that last 4K bytes of
	 * the addressable memory can not be mapped because of IS_ERR_VALUE
	 * macro. Make sure that last 4k bytes are not usable by memblock
	 * if end of dram is equal to maximum addressable memory.
	 * if end of dram is equal to maximum addressable memory.  For 64-bit
	 * kernel, this problem can't happen here as the end of the virtual
	 * address space is occupied by the kernel mapping then this check must
	 * be done in create_kernel_page_table.
	 */
	max_mapped_addr = __pa(~(ulong)0);
	if (max_mapped_addr == (dram_end - 1))
		memblock_set_current_limit(max_mapped_addr - 4096);
#endif

	min_low_pfn = PFN_UP(memblock_start_of_DRAM());
	max_low_pfn = max_pfn = PFN_DOWN(dram_end);
@@ -570,6 +584,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
	BUG_ON((kernel_map.phys_addr % map_size) != 0);

#ifdef CONFIG_64BIT
	/*
	 * The last 4K bytes of the addressable memory can not be mapped because
	 * of IS_ERR_VALUE macro.
	 */
	BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
#endif

	pt_ops.alloc_pte = alloc_pte_early;
	pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
@@ -709,6 +731,8 @@ static void __init setup_vm_final(void)
		if (start <= __pa(PAGE_OFFSET) &&
		    __pa(PAGE_OFFSET) < end)
			start = __pa(PAGE_OFFSET);
		if (end >= __pa(PAGE_OFFSET) + memory_limit)
			end = __pa(PAGE_OFFSET) + memory_limit;

		map_size = best_map_size(start, end - start);
		for (pa = start; pa < end; pa += map_size) {