Commit e56b2b60 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'x86-urgent-2023-09-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Fix preemption delays in the SGX code, remove unnecessarily
  UAPI-exported code, fix a ld.lld linker (in)compatibility quirk and
  make the x86 SMP init code a bit more conservative to fix kexec()
  lockups"

* tag 'x86-urgent-2023-09-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/sgx: Break up long non-preemptible delays in sgx_vepc_release()
  x86: Remove the arch_calc_vm_prot_bits() macro from the UAPI
  x86/build: Fix linker fill bytes quirk/incompatibility for ld.lld
  x86/smp: Don't send INIT to non-present and non-booted CPUs
parents e79dbf03 3d7d72a3
Loading
Loading
Loading
Loading
+15 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MMAN_H__
#define __ASM_MMAN_H__

#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
#define arch_calc_vm_prot_bits(prot, key) (		\
		((key) & 0x1 ? VM_PKEY_BIT0 : 0) |      \
		((key) & 0x2 ? VM_PKEY_BIT1 : 0) |      \
		((key) & 0x4 ? VM_PKEY_BIT2 : 0) |      \
		((key) & 0x8 ? VM_PKEY_BIT3 : 0))
#endif

#include <uapi/asm/mman.h>

#endif /* __ASM_MMAN_H__ */
+0 −8
Original line number Diff line number Diff line
@@ -5,14 +5,6 @@
#define MAP_32BIT	0x40		/* only give out 32bit addresses */
#define MAP_ABOVE4G	0x80		/* only map above 4GB */

#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
#define arch_calc_vm_prot_bits(prot, key) (		\
		((key) & 0x1 ? VM_PKEY_BIT0 : 0) |      \
		((key) & 0x2 ? VM_PKEY_BIT1 : 0) |      \
		((key) & 0x4 ? VM_PKEY_BIT2 : 0) |      \
		((key) & 0x8 ? VM_PKEY_BIT3 : 0))
#endif

/* Flags for map_shadow_stack(2) */
#define SHADOW_STACK_SET_TOKEN	(1ULL << 0)	/* Set up a restore token in the shadow stack */

+3 −0
Original line number Diff line number Diff line
@@ -204,6 +204,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
			continue;

		xa_erase(&vepc->page_array, index);
		cond_resched();
	}

	/*
@@ -222,6 +223,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
			list_add_tail(&epc_page->list, &secs_pages);

		xa_erase(&vepc->page_array, index);
		cond_resched();
	}

	/*
@@ -243,6 +245,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)

		if (sgx_vepc_free_page(epc_page))
			list_add_tail(&epc_page->list, &secs_pages);
		cond_resched();
	}

	if (!list_empty(&secs_pages))
+1 −1
Original line number Diff line number Diff line
@@ -1250,7 +1250,7 @@ bool smp_park_other_cpus_in_init(void)
	if (this_cpu)
		return false;

	for_each_present_cpu(cpu) {
	for_each_cpu_and(cpu, &cpus_booted_once_mask, cpu_present_mask) {
		if (cpu == this_cpu)
			continue;
		apicid = apic->cpu_present_to_apicid(cpu);
+1 −1
Original line number Diff line number Diff line
@@ -156,7 +156,7 @@ SECTIONS
		ALIGN_ENTRY_TEXT_END
		*(.gnu.warning)

	} :text =0xcccc
	} :text = 0xcccccccc

	/* End of text section, which should occupy whole number of pages */
	_etext = .;
Loading