Unverified Commit 9773fbed authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15261 mm: introduce kernel cmdline option "kernel_replication="

parents 2caf0ac9 2f939f96
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -2534,6 +2534,13 @@
			some extension. These two features are alternatives.
			Current only arm64 is supported.

	kernel_replication=
			[ARM64]
			Format: [on|off]
			If CONFIG_KERNEL_REPLICATION is set, it allows
			enabling/disabling the kernel replication feature
			via cmdline. Default value is off.

	kgdbdbgp=	[KGDB,HW] kgdb over EHCI usb debug port.
			Format: <Controller#>[,poll interval]
			The controller # is the number of the ehci usb debug
+1 −1
Original line number Diff line number Diff line
@@ -1212,7 +1212,7 @@ CONFIG_ARM64_HAFT=y
CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y
CONFIG_PER_VMA_LOCK=y
CONFIG_LOCK_MM_AND_FIND_VMA=y
# CONFIG_KERNEL_REPLICATION is not set
CONFIG_KERNEL_REPLICATION=y
CONFIG_IOMMU_MM_DATA=y
# CONFIG_ASCEND_FEATURES is not set
CONFIG_PAGE_CACHE_LIMIT=y
+42 −14
Original line number Diff line number Diff line
@@ -18,6 +18,24 @@

static struct kmem_cache *pgd_cache __ro_after_init;

static pgd_t *pgd_alloc_orig(struct mm_struct *mm)
{
	gfp_t gfp = GFP_PGTABLE_USER;

	if (PGD_SIZE == PAGE_SIZE)
		return (pgd_t *)__get_free_page(gfp);
	else
		return kmem_cache_alloc(pgd_cache, gfp);
}

static void pgd_free_orig(struct mm_struct *mm, pgd_t *pgd)
{
	if (PGD_SIZE == PAGE_SIZE)
		free_page((unsigned long)pgd);
	else
		kmem_cache_free(pgd_cache, pgd);
}

#ifdef CONFIG_KERNEL_REPLICATION
pgd_t *page_pgd_alloc(struct mm_struct *mm)
{
@@ -37,11 +55,11 @@ pgd_t *page_pgd_alloc(struct mm_struct *mm)

		WARN_ON_ONCE(page_to_nid(page) != nid);

		per_node_pgd(mm, nid) = (pgd_t *)page_address(page);
		*per_node_pgd_ptr(mm, nid) = (pgd_t *)page_address(page);
	}

	for_each_online_node(nid)
		per_node_pgd(mm, nid) = per_node_pgd(mm, numa_get_memory_node(nid));
		*per_node_pgd_ptr(mm, nid) = per_node_pgd(mm, numa_get_memory_node(nid));

	mm->pgd = per_node_pgd(mm, numa_get_memory_node(0));/*!!!*/

@@ -53,7 +71,7 @@ pgd_t *page_pgd_alloc(struct mm_struct *mm)
	return NULL;
}

pgd_t *pgd_alloc(struct mm_struct *mm)
static pgd_t *pgd_alloc_replica(struct mm_struct *mm)
{
	pgd_t **pgd_numa = (pgd_t **)kmalloc(sizeof(pgd_t *) * MAX_NUMNODES, GFP_PGTABLE_KERNEL);

@@ -81,34 +99,44 @@ static void page_pgd_free(struct mm_struct *mm, pgd_t *pgd)
	}

	for_each_online_node(nid)
		per_node_pgd(mm, nid) = NULL;
		*per_node_pgd_ptr(mm, nid) = NULL;

}

void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static void pgd_free_replica(struct mm_struct *mm, pgd_t *pgd)
{
	page_pgd_free(mm, pgd);

	kfree(mm->pgd_numa);
}

#else /* !CONFIG_KERNEL_REPLICATION */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	gfp_t gfp = GFP_PGTABLE_USER;

	if (PGD_SIZE == PAGE_SIZE)
		return (pgd_t *)__get_free_page(gfp);
	mm->pgd_numa = NULL;
	if (is_text_replicated())
		return pgd_alloc_replica(mm);
	else
		return kmem_cache_alloc(pgd_cache, gfp);
		return pgd_alloc_orig(mm);

}

void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	if (PGD_SIZE == PAGE_SIZE)
		free_page((unsigned long)pgd);
	if (is_text_replicated())
		pgd_free_replica(mm, pgd);
	else
		kmem_cache_free(pgd_cache, pgd);
		pgd_free_orig(mm, pgd);
}

#else /* !CONFIG_KERNEL_REPLICATION */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	return pgd_alloc_orig(mm);
}

void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	pgd_free_orig(mm, pgd);
}
#endif /* CONFIG_KERNEL_REPLICATION */

+25 −3
Original line number Diff line number Diff line
@@ -42,8 +42,31 @@ extern nodemask_t replica_nodes;
	     nid != MAX_NUMNODES;			\
	     nid = next_node(nid, replica_nodes))

#define this_node_pgd(mm) ((mm)->pgd_numa[numa_node_id()])
#define per_node_pgd(mm, nid) ((mm)->pgd_numa[nid])
bool is_text_replicated(void);

static inline pgd_t *this_node_pgd(struct mm_struct *mm)
{
	if (is_text_replicated())
		return mm->pgd_numa[numa_node_id()];
	else
		return mm->pgd;
}

static inline pgd_t *per_node_pgd(struct mm_struct *mm, int nid)
{
	if (is_text_replicated())
		return mm->pgd_numa[nid];
	else
		return mm->pgd;
}

static inline pgd_t **per_node_pgd_ptr(struct mm_struct *mm, int nid)
{
	if (is_text_replicated())
		return &mm->pgd_numa[nid];
	else
		return &mm->pgd;
}

static inline bool numa_addr_has_replica(const void *addr)
{
@@ -56,7 +79,6 @@ void __init numa_replicate_kernel_text(void);
void numa_replicate_kernel_rodata(void);
void numa_replication_fini(void);

bool is_text_replicated(void);
propagation_level_t get_propagation_level(void);
void numa_setup_pgd(void);
void __init_or_module *numa_get_replica(void *vaddr, int nid);
+8 −3
Original line number Diff line number Diff line
@@ -1217,6 +1217,9 @@ static void module_replicate_sections(struct module *mod)
{
	int i;

	if (!is_text_replicated())
		return;

	for (i = 0; i < ARRAY_SIZE(sections_to_replicate); i++)
		module_replicate(mod->mem[sections_to_replicate[i]].base);
}
@@ -1228,10 +1231,12 @@ static void *module_memory_alloc(unsigned int size, enum mod_mem_type type)
	if (mod_mem_use_vmalloc(type))
		return vzalloc(size);

	if (is_text_replicated()) {
		for (i = 0; i < ARRAY_SIZE(sections_to_replicate); i++) {
			if (type == sections_to_replicate[i])
				return module_alloc_replica(size);
		}
	}
	return module_alloc(size);
}

Loading