Unverified Commit 6f4eea90 authored by Vincent Chen's avatar Vincent Chen Committed by Palmer Dabbelt
Browse files

riscv: Introduce alternative mechanism to apply errata solution



Introduce the "alternative" mechanism from ARM64 and x86 to apply the CPU
vendors' errata solution at runtime. The main purpose of this patch is
to provide a framework. Therefore, the implementation is quite basic for
now so that some scenarios could not use this schemei, such as patching
code to a module, relocating the patching code and heterogeneous CPU
topology.

Users could use the macro ALTERNATIVE to apply an errata to the existing
code flow. In the macro ALTERNATIVE, users need to specify the manufacturer
information(vendorid, archid, and impid) for this errata. Therefore, kernel
will know this errata is suitable for which CPU core. During the booting
procedure, kernel will select the errata required by the CPU core and then
patch it. It means that the kernel only applies the errata to the specified
CPU core. In this case, the vendor's errata does not affect each other at
runtime. The above patching procedure only occurs during the booting phase,
so we only take the overhead of the "alternative" mechanism once.

This "alternative" mechanism is enabled by default to ensure that all
required errata will be applied. However, users can disable this feature by
the Kconfig "CONFIG_RISCV_ERRATA_ALTERNATIVE".

Signed-off-by: default avatarVincent Chen <vincent.chen@sifive.com>
Reviewed-by: default avatarAnup Patel <anup@brainfault.org>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent 183787c6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -207,6 +207,7 @@ config LOCKDEP_SUPPORT
	def_bool y

source "arch/riscv/Kconfig.socs"
source "arch/riscv/Kconfig.erratas"

menu "Platform type"

+12 −0
Original line number Diff line number Diff line
menu "CPU errata selection"

config RISCV_ERRATA_ALTERNATIVE
	bool "RISC-V alternative scheme"
	default y
	help
	  This Kconfig allows the kernel to automatically patch the
	  errata required by the execution platform at run time. The
	  code patching is performed once in the boot stages. It means
	  that the overhead from this mechanism is just taken once.

endmenu
+1 −0
Original line number Diff line number Diff line
@@ -87,6 +87,7 @@ KBUILD_IMAGE := $(boot)/Image.gz
head-y := arch/riscv/kernel/head.o

core-y += arch/riscv/
core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/

libs-y += arch/riscv/lib/
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+1 −0
Original line number Diff line number Diff line
obj-y	+= alternative.o
+69 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * alternative runtime patching
 * inspired by the ARM64 and x86 version
 *
 * Copyright (C) 2021 Sifive.
 */

#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/sections.h>
#include <asm/vendorid_list.h>
#include <asm/sbi.h>
#include <asm/csr.h>

static struct cpu_manufacturer_info_t {
	unsigned long vendor_id;
	unsigned long arch_id;
	unsigned long imp_id;
} cpu_mfr_info;

static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
				 unsigned long archid, unsigned long impid);

static inline void __init riscv_fill_cpu_mfr_info(void)
{
#ifdef CONFIG_RISCV_M_MODE
	cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID);
	cpu_mfr_info.arch_id = csr_read(CSR_MARCHID);
	cpu_mfr_info.imp_id = csr_read(CSR_MIMPID);
#else
	cpu_mfr_info.vendor_id = sbi_get_mvendorid();
	cpu_mfr_info.arch_id = sbi_get_marchid();
	cpu_mfr_info.imp_id = sbi_get_mimpid();
#endif
}

static void __init init_alternative(void)
{
	riscv_fill_cpu_mfr_info();

	switch (cpu_mfr_info.vendor_id) {
	default:
		vendor_patch_func = NULL;
	}
}

/*
 * This is called very early in the boot process (directly after we run
 * a feature detect on the boot CPU). No need to worry about other CPUs
 * here.
 */
void __init apply_boot_alternatives(void)
{
	/* If called on non-boot cpu things could go wrong */
	WARN_ON(smp_processor_id() != 0);

	init_alternative();

	if (!vendor_patch_func)
		return;

	vendor_patch_func((struct alt_entry *)__alt_start,
			  (struct alt_entry *)__alt_end,
			  cpu_mfr_info.arch_id, cpu_mfr_info.imp_id);
}
Loading