Commit 3d053e80 authored by Vlastimil Babka's avatar Vlastimil Babka
Browse files

Merge branch 'slab/for-6.6/random_kmalloc' into slab/for-next

Merge the new hardening feature to make heap spraying harder, by GONG,
Ruiqi. It creates multiple (16) copies of kmalloc caches, reducing the
chance of an attacker-controllable allocation site to land in the same
slab as e.g.  an allocation site with use-after-free vulnerability. The
selection of the copy is derived from the allocation site address,
including a per-boot random seed.

In line with SLAB deprecation, this is a SLUB only feature, incompatible
with SLUB_TINY due to the memory overhead of the extra cache copies.
parents 1662b6c2 3c615294
Loading
Loading
Loading
Loading
+9 −3
Original line number Diff line number Diff line
@@ -35,6 +35,12 @@
#define PCPU_BITMAP_BLOCK_BITS		(PCPU_BITMAP_BLOCK_SIZE >>	\
					 PCPU_MIN_ALLOC_SHIFT)

#ifdef CONFIG_RANDOM_KMALLOC_CACHES
#define PERCPU_DYNAMIC_SIZE_SHIFT      12
#else
#define PERCPU_DYNAMIC_SIZE_SHIFT      10
#endif

/*
 * Percpu allocator can serve percpu allocations before slab is
 * initialized which allows slab to depend on the percpu allocator.
@@ -42,7 +48,7 @@
 * for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or larger than
 * PERCPU_DYNAMIC_EARLY_SIZE.
 */
#define PERCPU_DYNAMIC_EARLY_SIZE	(20 << 10)
#define PERCPU_DYNAMIC_EARLY_SIZE	(20 << PERCPU_DYNAMIC_SIZE_SHIFT)

/*
 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
@@ -56,9 +62,9 @@
 * intelligent way to determine this would be nice.
 */
#if BITS_PER_LONG > 32
#define PERCPU_DYNAMIC_RESERVE		(28 << 10)
#define PERCPU_DYNAMIC_RESERVE		(28 << PERCPU_DYNAMIC_SIZE_SHIFT)
#else
#define PERCPU_DYNAMIC_RESERVE		(20 << 10)
#define PERCPU_DYNAMIC_RESERVE		(20 << PERCPU_DYNAMIC_SIZE_SHIFT)
#endif

extern void *pcpu_base_addr;
+20 −3
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
#include <linux/cleanup.h>
#include <linux/hash.h>


/*
@@ -345,6 +346,12 @@ static inline unsigned int arch_slab_minalign(void)
#define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
                               (KMALLOC_MIN_SIZE) : 16)

#ifdef CONFIG_RANDOM_KMALLOC_CACHES
#define RANDOM_KMALLOC_CACHES_NR	15 // # of cache copies
#else
#define RANDOM_KMALLOC_CACHES_NR	0
#endif

/*
 * Whenever changing this, take care of that kmalloc_type() and
 * create_kmalloc_caches() still work as intended.
@@ -361,6 +368,8 @@ enum kmalloc_cache_type {
#ifndef CONFIG_MEMCG_KMEM
	KMALLOC_CGROUP = KMALLOC_NORMAL,
#endif
	KMALLOC_RANDOM_START = KMALLOC_NORMAL,
	KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
#ifdef CONFIG_SLUB_TINY
	KMALLOC_RECLAIM = KMALLOC_NORMAL,
#else
@@ -386,14 +395,22 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
	(IS_ENABLED(CONFIG_ZONE_DMA)   ? __GFP_DMA : 0) |	\
	(IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))

static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
extern unsigned long random_kmalloc_seed;

static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
{
	/*
	 * The most common case is KMALLOC_NORMAL, so test for it
	 * with a single branch for all the relevant flags.
	 */
	if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
#ifdef CONFIG_RANDOM_KMALLOC_CACHES
		/* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
		return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
						      ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
#else
		return KMALLOC_NORMAL;
#endif

	/*
	 * At least one of the flags has to be set. Their priorities in
@@ -580,7 +597,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)

		index = kmalloc_index(size);
		return kmalloc_trace(
				kmalloc_caches[kmalloc_type(flags)][index],
				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
				flags, size);
	}
	return __kmalloc(size, flags);
@@ -596,7 +613,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla

		index = kmalloc_index(size);
		return kmalloc_node_trace(
				kmalloc_caches[kmalloc_type(flags)][index],
				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
				flags, node, size);
	}
	return __kmalloc_node(size, flags, node);
+17 −0
Original line number Diff line number Diff line
@@ -337,6 +337,23 @@ config SLUB_CPU_PARTIAL
	  which requires the taking of locks that may cause latency spikes.
	  Typically one would choose no for a realtime system.

config RANDOM_KMALLOC_CACHES
	default n
	depends on SLUB && !SLUB_TINY
	bool "Randomize slab caches for normal kmalloc"
	help
	  A hardening feature that creates multiple copies of slab caches for
	  normal kmalloc allocation and makes kmalloc randomly pick one based
	  on code address, which makes the attackers more difficult to spray
	  vulnerable memory objects on the heap for the purpose of exploiting
	  memory vulnerabilities.

	  Currently the number of copies is set to 16, a reasonably large value
	  that effectively diverges the memory objects allocated for different
	  subsystems or modules into different caches, at the expense of a
	  limited degree of memory and CPU overhead that relates to hardware and
	  system workload.

endmenu # SLAB allocator options

config SHUFFLE_PAGE_ALLOCATOR
+5 −2
Original line number Diff line number Diff line
@@ -212,7 +212,9 @@ static void test_cache_destroy(void)

static inline size_t kmalloc_cache_alignment(size_t size)
{
	return kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]->align;
	/* just to get ->align so no need to pass in the real caller */
	enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, 0);
	return kmalloc_caches[type][__kmalloc_index(size, false)]->align;
}

/* Must always inline to match stack trace against caller. */
@@ -282,8 +284,9 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat

		if (is_kfence_address(alloc)) {
			struct slab *slab = virt_to_slab(alloc);
			enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, _RET_IP_);
			struct kmem_cache *s = test_cache ?:
					kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)];
					kmalloc_caches[type][__kmalloc_index(size, false)];

			/*
			 * Verify that various helpers return the right values
+1 −1
Original line number Diff line number Diff line
@@ -1670,7 +1670,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
			if (freelist_size > KMALLOC_MAX_CACHE_SIZE) {
				freelist_cache_size = PAGE_SIZE << get_order(freelist_size);
			} else {
				freelist_cache = kmalloc_slab(freelist_size, 0u);
				freelist_cache = kmalloc_slab(freelist_size, 0u, _RET_IP_);
				if (!freelist_cache)
					continue;
				freelist_cache_size = freelist_cache->size;
Loading