Commit ac3b4328 authored by Song Liu's avatar Song Liu Committed by Luis Chamberlain
Browse files

module: replace module_layout with module_memory



module_layout manages different types of memory (text, data, rodata, etc.)
in one allocation, which is problematic for some reasons:

1. It is hard to enable CONFIG_STRICT_MODULE_RWX.
2. It is hard to use huge pages in modules (and not break strict rwx).
3. Many archs uses module_layout for arch-specific data, but it is not
   obvious how these data are used (are they RO, RX, or RW?)

Improve the scenario by replacing 2 (or 3) module_layout per module with
up to 7 module_memory per module:

        MOD_TEXT,
        MOD_DATA,
        MOD_RODATA,
        MOD_RO_AFTER_INIT,
        MOD_INIT_TEXT,
        MOD_INIT_DATA,
        MOD_INIT_RODATA,

and allocating them separately. This adds slightly more entries to
mod_tree (from up to 3 entries per module, to up to 7 entries per
module). However, this at most adds a small constant overhead to
__module_address(), which is expected to be fast.

Various archs use module_layout for different data. These data are put
into different module_memory based on their location in module_layout.
IOW, data that used to go with text is allocated with MOD_MEM_TYPE_TEXT;
data that used to go with data is allocated with MOD_MEM_TYPE_DATA, etc.

module_memory simplifies quite some of the module code. For example,
ARCH_WANTS_MODULES_DATA_IN_VMALLOC is a lot cleaner, as it just uses a
different allocator for the data. kernel/module/strict_rwx.c is also
much cleaner with module_memory.

Signed-off-by: default avatarSong Liu <song@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: default avatarLuis Chamberlain <mcgrof@kernel.org>
Signed-off-by: default avatarLuis Chamberlain <mcgrof@kernel.org>
parent fe15c26e
Loading
Loading
Loading
Loading
+7 −5
Original line number Diff line number Diff line
@@ -369,6 +369,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
		       unsigned long table_size)
{
	struct unwind_table *table;
	struct module_memory *core_text;
	struct module_memory *init_text;

	if (table_size <= 0)
		return NULL;
@@ -377,11 +379,11 @@ void *unwind_add_table(struct module *module, const void *table_start,
	if (!table)
		return NULL;

	init_unwind_table(table, module->name,
			  module->core_layout.base, module->core_layout.size,
			  module->init_layout.base, module->init_layout.size,
			  table_start, table_size,
			  NULL, 0);
	core_text = &module->mem[MOD_TEXT];
	init_text = &module->mem[MOD_INIT_TEXT];

	init_unwind_table(table, module->name, core_text->base, core_text->size,
			  init_text->base, init_text->size, table_start, table_size, NULL, 0);

	init_unwind_hdr(table, unw_hdr_alloc);

+2 −7
Original line number Diff line number Diff line
@@ -28,11 +28,6 @@ static const u32 fixed_plts[] = {
#endif
};

static bool in_init(const struct module *mod, unsigned long loc)
{
	return loc - (u32)mod->init_layout.base < mod->init_layout.size;
}

static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
{
	int i;
@@ -50,8 +45,8 @@ static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)

u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
							  &mod->arch.init;
	struct mod_plt_sec *pltsec = !within_module_init(loc, mod) ?
						&mod->arch.core : &mod->arch.init;
	struct plt_entries *plt;
	int idx;

+4 −9
Original line number Diff line number Diff line
@@ -65,17 +65,12 @@ static bool plt_entries_equal(const struct plt_entry *a,
	       (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
}

static bool in_init(const struct module *mod, void *loc)
{
	return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
}

u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
			  void *loc, const Elf64_Rela *rela,
			  Elf64_Sym *sym)
{
	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
							  &mod->arch.init;
	struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
						&mod->arch.core : &mod->arch.init;
	struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
	int i = pltsec->plt_num_entries;
	int j = i - 1;
@@ -105,8 +100,8 @@ u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
				void *loc, u64 val)
{
	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
							  &mod->arch.init;
	struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
						&mod->arch.core : &mod->arch.init;
	struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
	int i = pltsec->plt_num_entries++;
	u32 br;
+14 −10
Original line number Diff line number Diff line
@@ -485,19 +485,19 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
	return 0;
}

static inline int
static inline bool
in_init (const struct module *mod, uint64_t addr)
{
	return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
	return within_module_init(addr, mod);
}

static inline int
static inline bool
in_core (const struct module *mod, uint64_t addr)
{
	return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
	return within_module_core(addr, mod);
}

static inline int
static inline bool
is_internal (const struct module *mod, uint64_t value)
{
	return in_init(mod, value) || in_core(mod, value);
@@ -677,7 +677,8 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
		break;

	      case RV_BDREL:
		val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
		val -= (uint64_t) (in_init(mod, val) ? mod->mem[MOD_INIT_TEXT].base :
				   mod->mem[MOD_TEXT].base);
		break;

	      case RV_LTV:
@@ -812,15 +813,18 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
		 *     addresses have been selected...
		 */
		uint64_t gp;
		if (mod->core_layout.size > MAX_LTOFF)
		struct module_memory *mod_mem;

		mod_mem = &mod->mem[MOD_DATA];
		if (mod_mem->size > MAX_LTOFF)
			/*
			 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
			 * at the end of the module.
			 */
			gp = mod->core_layout.size - MAX_LTOFF / 2;
			gp = mod_mem->size - MAX_LTOFF / 2;
		else
			gp = mod->core_layout.size / 2;
		gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
			gp = mod_mem->size / 2;
		gp = (uint64_t) mod_mem->base + ((gp + 7) & -8);
		mod->arch.gp = gp;
		DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
	}
+5 −6
Original line number Diff line number Diff line
@@ -199,18 +199,17 @@ static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
		for (i = 0; i < hdr->e_shnum; ++i) {
			Elf_Shdr *s = &sechdrs[i];
			struct module_memory *mod_mem;

			mod_mem = &mod->mem[MOD_TEXT];

			if ((s->sh_flags & masks[m][0]) != masks[m][0]
			    || (s->sh_flags & masks[m][1])
			    || s->sh_entsize != ~0UL)
				continue;
			s->sh_entsize =
				get_offset((unsigned long *)&mod->core_layout.size, s);
				get_offset((unsigned long *)&mod_mem->size, s);
		}

		if (m == 0)
			mod->core_layout.text_size = mod->core_layout.size;

	}
}

@@ -641,7 +640,7 @@ static int vpe_elfload(struct vpe *v)
		layout_sections(&mod, hdr, sechdrs, secstrings);
	}

	v->load_addr = alloc_progmem(mod.core_layout.size);
	v->load_addr = alloc_progmem(mod.mod_mem[MOD_TEXT].size);
	if (!v->load_addr)
		return -ENOMEM;

Loading