Commit 30306f61 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull hardening fixes from Kees Cook:

 - Correctly handle vm_map areas in hardened usercopy (Matthew Wilcox)

 - Adjust CFI RCU usage to avoid boot splats with cpuidle (Sami Tolvanen)

* tag 'hardening-v5.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  usercopy: Make usercopy resilient against ridiculously large copies
  usercopy: Cast pointer to an integer once
  usercopy: Handle vm_map_ram() areas
  cfi: Fix __cfi_slowpath_diag RCU usage with cpuidle
parents afe9eb14 1dfbe9fc
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -215,6 +215,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
struct vmap_area *find_vmap_area(unsigned long addr);

static inline bool is_vm_area_hugepages(const void *addr)
{
+16 −6
Original line number Diff line number Diff line
@@ -281,6 +281,8 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
static inline cfi_check_fn find_check_fn(unsigned long ptr)
{
	cfi_check_fn fn = NULL;
	unsigned long flags;
	bool rcu_idle;

	if (is_kernel_text(ptr))
		return __cfi_check;
@@ -290,13 +292,21 @@ static inline cfi_check_fn find_check_fn(unsigned long ptr)
	 * the shadow and __module_address use RCU, so we need to wake it
	 * up if necessary.
	 */
	RCU_NONIDLE({
	rcu_idle = !rcu_is_watching();
	if (rcu_idle) {
		local_irq_save(flags);
		rcu_irq_enter();
	}

	if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW))
		fn = find_shadow_check_fn(ptr);

	if (!fn)
		fn = find_module_check_fn(ptr);
	});

	if (rcu_idle) {
		rcu_irq_exit();
		local_irq_restore(flags);
	}

	return fn;
}
+12 −14
Original line number Diff line number Diff line
@@ -161,29 +161,27 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
static inline void check_heap_object(const void *ptr, unsigned long n,
				     bool to_user)
{
	uintptr_t addr = (uintptr_t)ptr;
	unsigned long offset;
	struct folio *folio;

	if (is_kmap_addr(ptr)) {
		unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1);

		if ((unsigned long)ptr + n - 1 > page_end)
			usercopy_abort("kmap", NULL, to_user,
					offset_in_page(ptr), n);
		offset = offset_in_page(ptr);
		if (n > PAGE_SIZE - offset)
			usercopy_abort("kmap", NULL, to_user, offset, n);
		return;
	}

	if (is_vmalloc_addr(ptr)) {
		struct vm_struct *area = find_vm_area(ptr);
		unsigned long offset;
		struct vmap_area *area = find_vmap_area(addr);

		if (!area) {
		if (!area)
			usercopy_abort("vmalloc", "no area", to_user, 0, n);
			return;
		}

		offset = ptr - area->addr;
		if (offset + n > get_vm_area_size(area))
		if (n > area->va_end - addr) {
			offset = addr - area->va_start;
			usercopy_abort("vmalloc", NULL, to_user, offset, n);
		}
		return;
	}

@@ -196,8 +194,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
		/* Check slab allocator for flags and size. */
		__check_heap_object(ptr, n, folio_slab(folio), to_user);
	} else if (folio_test_large(folio)) {
		unsigned long offset = ptr - folio_address(folio);
		if (offset + n > folio_size(folio))
		offset = ptr - folio_address(folio);
		if (n > folio_size(folio) - offset)
			usercopy_abort("page alloc", NULL, to_user, offset, n);
	}
}
+1 −1
Original line number Diff line number Diff line
@@ -1798,7 +1798,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
	free_vmap_area_noflush(va);
}

static struct vmap_area *find_vmap_area(unsigned long addr)
struct vmap_area *find_vmap_area(unsigned long addr)
{
	struct vmap_area *va;