Commit d438fabc authored by Marco Elver's avatar Marco Elver Committed by Linus Torvalds
Browse files

kfence: use pt_regs to generate stack trace on faults

Instead of removing the fault handling portion of the stack trace based on
the fault handler's name, just use struct pt_regs directly.

Change kfence_handle_page_fault() to take a struct pt_regs, and plumb it
through to kfence_report_error() for out-of-bounds, use-after-free, or
invalid access errors, where pt_regs is used to generate the stack trace.

If the kernel is a DEBUG_KERNEL, also show registers for more information.

Link: https://lkml.kernel.org/r/20201105092133.2075331-1-elver@google.com


Signed-off-by: default avatarMarco Elver <elver@google.com>
Suggested-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Jann Horn <jannh@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 840b2398
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -10,8 +10,6 @@

#include <asm/cacheflush.h>

#define KFENCE_SKIP_ARCH_FAULT_HANDLER "el1_sync"

static inline bool arch_kfence_init_pool(void) { return true; }

static inline bool kfence_protect_page(unsigned long addr, bool protect)
+1 −1
Original line number Diff line number Diff line
@@ -390,7 +390,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
	} else if (addr < PAGE_SIZE) {
		msg = "NULL pointer dereference";
	} else {
		if (kfence_handle_page_fault(addr))
		if (kfence_handle_page_fault(addr, regs))
			return;

		msg = "paging request";
+0 −6
Original line number Diff line number Diff line
@@ -16,12 +16,6 @@
#include <asm/set_memory.h>
#include <asm/tlbflush.h>

/*
 * The page fault handler entry function, up to which the stack trace is
 * truncated in reports.
 */
#define KFENCE_SKIP_ARCH_FAULT_HANDLER "asm_exc_page_fault"

/* Force 4K pages for __kfence_pool. */
static inline bool arch_kfence_init_pool(void)
{
+1 −1
Original line number Diff line number Diff line
@@ -682,7 +682,7 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code,
		efi_crash_gracefully_on_page_fault(address);

	/* Only not-present faults should be handled by KFENCE. */
	if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address))
	if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address, regs))
		return;

oops:
+3 −2
Original line number Diff line number Diff line
@@ -186,6 +186,7 @@ static __always_inline __must_check bool kfence_free(void *addr)
/**
 * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
 * @addr: faulting address
 * @regs: current struct pt_regs (can be NULL, but shows full stack trace)
 *
 * Return:
 * * false - address outside KFENCE pool,
@@ -196,7 +197,7 @@ static __always_inline __must_check bool kfence_free(void *addr)
 * cases KFENCE prints an error message and marks the offending page as
 * present, so that the kernel can proceed.
 */
bool __must_check kfence_handle_page_fault(unsigned long addr);
bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs);

#else /* CONFIG_KFENCE */

@@ -209,7 +210,7 @@ static inline size_t kfence_ksize(const void *addr) { return 0; }
static inline void *kfence_object_start(const void *addr) { return NULL; }
static inline void __kfence_free(void *addr) { }
static inline bool __must_check kfence_free(void *addr) { return false; }
static inline bool __must_check kfence_handle_page_fault(unsigned long addr) { return false; }
static inline bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs) { return false; }

#endif

Loading