Commit 3e4519b7 authored by Rebecca Mckeever's avatar Rebecca Mckeever Committed by Mike Rapoport
Browse files

memblock tests: add generic NUMA tests for memblock_alloc_try_nid*



Add tests for memblock_alloc_try_nid() and memblock_alloc_try_nid_raw()
where the simulated physical memory is set up with multiple NUMA nodes.
Additionally, two of these tests set nid != NUMA_NO_NODE. All tests are
run for both top-down and bottom-up allocation directions.

The tested scenarios are:

Range unrestricted:
- region cannot be allocated:
      + none of the nodes have enough memory to allocate the region

Range restricted:
- region can be allocated in the specific node requested without dropping
  min_addr:
      + the range fully overlaps with the node, and there are adjacent
        reserved regions
- region cannot be allocated:
      + nid is set to NUMA_NO_NODE and the total range can fit the region,
        but the range is split between two nodes and everything else is
        reserved

Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarRebecca Mckeever <remckee0@gmail.com>
Signed-off-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Link: https://lore.kernel.org/r/4b2c7e6e5f3a9837939e99293c77e0e6fc3ae4f9.1663046060.git.remckee0@gmail.com
parent 4b41046e
Loading
Loading
Loading
Loading
+197 −0
Original line number Diff line number Diff line
@@ -2316,6 +2316,173 @@ static int alloc_try_nid_bottom_up_numa_no_overlap_high_check(void)
	return 0;
}

/*
 * A test that tries to allocate a memory region in a specific NUMA node that
 * does not have enough memory to allocate a region of the requested size.
 * Additionally, none of the nodes have enough memory to allocate the region:
 *
 * +-----------------------------------+
 * |                new                |
 * +-----------------------------------+
 *     |-------+-------+-------+-------+-------+-------+-------+-------|
 *     | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
 *     +-------+-------+-------+-------+-------+-------+-------+-------+
 *
 * Expect no allocation to happen.
 */
static int alloc_try_nid_numa_large_region_generic_check(void)
{
	int nid_req = 3;
	void *allocated_ptr = NULL;
	phys_addr_t size = MEM_SIZE / SZ_2;
	phys_addr_t min_addr;
	phys_addr_t max_addr;

	PREFIX_PUSH();
	setup_numa_memblock(node_fractions);

	min_addr = memblock_start_of_DRAM();
	max_addr = memblock_end_of_DRAM();

	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
						   min_addr, max_addr, nid_req);
	ASSERT_EQ(allocated_ptr, NULL);

	test_pass_pop();

	return 0;
}

/*
 * A test that tries to allocate memory within min_addr and max_addr range when
 * there are two reserved regions at the borders. The requested node starts at
 * min_addr and ends at max_addr and is the same size as the region to be
 * allocated:
 *
 *                     min_addr
 *                     |                       max_addr
 *                     |                       |
 *                     v                       v
 *  |      +-----------+-----------------------+-----------------------|
 *  |      |   node5   |       requested       |         node7         |
 *  +------+-----------+-----------------------+-----------------------+
 *                     +                       +
 *  |             +----+-----------------------+----+                  |
 *  |             | r2 |          new          | r1 |                  |
 *  +-------------+----+-----------------------+----+------------------+
 *
 * Expect to merge all of the regions into one. The region counter and total
 * size fields get updated.
 */
static int alloc_try_nid_numa_reserved_full_merge_generic_check(void)
{
	int nid_req = 6;
	int nid_next = nid_req + 1;
	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
	struct memblock_region *next_node = &memblock.memory.regions[nid_next];
	void *allocated_ptr = NULL;
	struct region r1, r2;
	phys_addr_t size = req_node->size;
	phys_addr_t total_size;
	phys_addr_t max_addr;
	phys_addr_t min_addr;

	PREFIX_PUSH();
	setup_numa_memblock(node_fractions);

	r1.base = next_node->base;
	r1.size = SZ_128;

	r2.size = SZ_128;
	r2.base = r1.base - (size + r2.size);

	total_size = r1.size + r2.size + size;
	min_addr = r2.base + r2.size;
	max_addr = r1.base;

	memblock_reserve(r1.base, r1.size);
	memblock_reserve(r2.base, r2.size);

	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
						   min_addr, max_addr, nid_req);

	ASSERT_NE(allocated_ptr, NULL);
	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);

	ASSERT_EQ(new_rgn->size, total_size);
	ASSERT_EQ(new_rgn->base, r2.base);

	ASSERT_LE(new_rgn->base, req_node->base);
	ASSERT_LE(region_end(req_node), region_end(new_rgn));

	ASSERT_EQ(memblock.reserved.cnt, 1);
	ASSERT_EQ(memblock.reserved.total_size, total_size);

	test_pass_pop();

	return 0;
}

/*
 * A test that tries to allocate memory within min_addr and max_add range,
 * where the total range can fit the region, but it is split between two nodes
 * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
 * instead of requesting a specific node:
 *
 *                         +-----------+
 *                         |    new    |
 *                         +-----------+
 *  |      +---------------------+-----------|
 *  |      |      prev node      | next node |
 *  +------+---------------------+-----------+
 *                         +           +
 *  |----------------------+           +-----|
 *  |          r1          |           |  r2 |
 *  +----------------------+-----------+-----+
 *                         ^           ^
 *                         |           |
 *                         |           max_addr
 *                         |
 *                         min_addr
 *
 * Expect no allocation to happen.
 */
static int alloc_try_nid_numa_split_all_reserved_generic_check(void)
{
	void *allocated_ptr = NULL;
	struct memblock_region *next_node = &memblock.memory.regions[7];
	struct region r1, r2;
	phys_addr_t size = SZ_256;
	phys_addr_t max_addr;
	phys_addr_t min_addr;

	PREFIX_PUSH();
	setup_numa_memblock(node_fractions);

	r2.base = next_node->base + SZ_128;
	r2.size = memblock_end_of_DRAM() - r2.base;

	r1.size = MEM_SIZE - (r2.size + size);
	r1.base = memblock_start_of_DRAM();

	min_addr = r1.base + r1.size;
	max_addr = r2.base;

	memblock_reserve(r1.base, r1.size);
	memblock_reserve(r2.base, r2.size);

	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
						   min_addr, max_addr,
						   NUMA_NO_NODE);

	ASSERT_EQ(allocated_ptr, NULL);

	test_pass_pop();

	return 0;
}

/* Test case wrappers for NUMA tests */
static int alloc_try_nid_numa_simple_check(void)
{
@@ -2427,6 +2594,33 @@ static int alloc_try_nid_numa_no_overlap_high_check(void)
	return 0;
}

static int alloc_try_nid_numa_large_region_check(void)
{
	test_print("\tRunning %s...\n", __func__);
	run_top_down(alloc_try_nid_numa_large_region_generic_check);
	run_bottom_up(alloc_try_nid_numa_large_region_generic_check);

	return 0;
}

static int alloc_try_nid_numa_reserved_full_merge_check(void)
{
	test_print("\tRunning %s...\n", __func__);
	run_top_down(alloc_try_nid_numa_reserved_full_merge_generic_check);
	run_bottom_up(alloc_try_nid_numa_reserved_full_merge_generic_check);

	return 0;
}

static int alloc_try_nid_numa_split_all_reserved_check(void)
{
	test_print("\tRunning %s...\n", __func__);
	run_top_down(alloc_try_nid_numa_split_all_reserved_generic_check);
	run_bottom_up(alloc_try_nid_numa_split_all_reserved_generic_check);

	return 0;
}

int __memblock_alloc_nid_numa_checks(void)
{
	test_print("Running %s NUMA tests...\n",
@@ -2443,6 +2637,9 @@ int __memblock_alloc_nid_numa_checks(void)
	alloc_try_nid_numa_no_overlap_split_check();
	alloc_try_nid_numa_no_overlap_low_check();
	alloc_try_nid_numa_no_overlap_high_check();
	alloc_try_nid_numa_large_region_check();
	alloc_try_nid_numa_reserved_full_merge_check();
	alloc_try_nid_numa_split_all_reserved_check();

	return 0;
}