Unverified Commit a5cbe5b7 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14873 [openEuler-24.03-LTS][linux-6.6.y sync] Backport 6.6.62-6.6.63 LTS Patches

Merge Pull Request from: @wenzhiwei11 
 
git log --oneline v6.6.62..v6.6.63 | wc -l
82
56+5+21

Conflicts(5):
NFSD: Async COPY result needs to return a write verifier
mptcp: update local address flags when setting it
mm: avoid unsafe VMA hook invocation when error arises on mmap hook
mm: refactor arch_calc_vm_flag_bits() and arm64 MTE handling
mm: resolve faulty mmap_region() error path behaviour

Merged(21):
fs/9p: fix uninitialized values during inode evict
mptcp: drop lookup_by_id in lookup_addr
mptcp: define more local variables sk
NFSD: Initialize struct nfsd4_copy earlier
NFSD: Limit the number of concurrent async COPY operations
leds: mlxreg: Use devm_mutex_init() for mutex initialization
mptcp: add userspace_pm_lookup_addr_by_id helper
NFSD: Never decrement pending_async_copies on error
media: dvbdev: fix the logic when DVB_DYNAMIC_MINORS is not set
drm/amd/display: Adjust VSDB parser for replay feature
pmdomain: imx93-blk-ctrl: correct remove path
Revert "mmc: dw_mmc: Fix IDMAC operation with pages bigger than 4K"
LoongArch: Fix early_numa_add_cpu() usage for FDT systems
nilfs2: fix null-ptr-deref in block_dirty_buffer tracepoint
nilfs2: fix null-ptr-deref in block_touch_buffer tracepoint
ima: fix buffer overrun in ima_eventdigest_init_common
vp_vdpa: fix id_table array not null terminated error
vdpa: solidrun: Fix UB bug with devres
mm: revert "mm: shmem: fix data-race in shmem_getattr()"
ocfs2: uncache inode which has failed entering the group
drm/rockchip: vop: Fix a dereferenced before check warning
 
 
Link:https://gitee.com/openeuler/kernel/pulls/14873

 

Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 874edbaf e66c3184
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -252,11 +252,15 @@ __create_page_tables:
	 */
	add	r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
	ldr	r6, =(_end - 1)

	/* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */
#ifndef CONFIG_XIP_KERNEL
	adr_l	r5, kernel_sec_start		@ _pa(kernel_sec_start)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
	str	r8, [r5, #4]			@ Save physical start of kernel (BE)
#else
	str	r8, [r5]			@ Save physical start of kernel (LE)
#endif
#endif
	orr	r3, r8, r7			@ Add the MMU flags
	add	r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
@@ -264,6 +268,7 @@ __create_page_tables:
	add	r3, r3, #1 << SECTION_SHIFT
	cmp	r0, r6
	bls	1b
#ifndef CONFIG_XIP_KERNEL
	eor	r3, r3, r7			@ Remove the MMU flags
	adr_l	r5, kernel_sec_end		@ _pa(kernel_sec_end)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
@@ -271,8 +276,7 @@ __create_page_tables:
#else
	str	r3, [r5]			@ Save physical end of kernel (LE)
#endif

#ifdef CONFIG_XIP_KERNEL
#else
	/*
	 * Map the kernel image separately as it is not located in RAM.
	 */
+21 −13
Original line number Diff line number Diff line
@@ -1402,18 +1402,6 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
		create_mapping(&map);
	}

	/*
	 * Map the kernel if it is XIP.
	 * It is always first in the modulearea.
	 */
#ifdef CONFIG_XIP_KERNEL
	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
	map.virtual = MODULES_VADDR;
	map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
	map.type = MT_ROM;
	create_mapping(&map);
#endif

	/*
	 * Map the cache flushing regions.
	 */
@@ -1603,12 +1591,27 @@ static void __init map_kernel(void)
	 * This will only persist until we turn on proper memory management later on
	 * and we remap the whole kernel with page granularity.
	 */
#ifdef CONFIG_XIP_KERNEL
	phys_addr_t kernel_nx_start = kernel_sec_start;
#else
	phys_addr_t kernel_x_start = kernel_sec_start;
	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
	phys_addr_t kernel_nx_start = kernel_x_end;
#endif
	phys_addr_t kernel_nx_end = kernel_sec_end;
	struct map_desc map;

	/*
	 * Map the kernel if it is XIP.
	 * It is always first in the modulearea.
	 */
#ifdef CONFIG_XIP_KERNEL
	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
	map.virtual = MODULES_VADDR;
	map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
	map.type = MT_ROM;
	create_mapping(&map);
#else
	map.pfn = __phys_to_pfn(kernel_x_start);
	map.virtual = __phys_to_virt(kernel_x_start);
	map.length = kernel_x_end - kernel_x_start;
@@ -1618,7 +1621,7 @@ static void __init map_kernel(void)
	/* If the nx part is small it may end up covered by the tail of the RWX section */
	if (kernel_x_end == kernel_nx_end)
		return;

#endif
	map.pfn = __phys_to_pfn(kernel_nx_start);
	map.virtual = __phys_to_virt(kernel_nx_start);
	map.length = kernel_nx_end - kernel_nx_start;
@@ -1763,6 +1766,11 @@ void __init paging_init(const struct machine_desc *mdesc)
{
	void *zero_page;

#ifdef CONFIG_XIP_KERNEL
	/* Store the kernel RW RAM region start/end in these variables */
	kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK;
	kernel_sec_end = round_up(__pa(_end), SECTION_SIZE);
#endif
	pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
		 kernel_sec_start, kernel_sec_end);

+1 −1
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@
/* KAsan shadow memory start right after vmalloc. */
#define KASAN_SHADOW_START		round_up(KFENCE_AREA_END, PGDIR_SIZE)
#define KASAN_SHADOW_SIZE		(XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
#define KASAN_SHADOW_END		round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
#define KASAN_SHADOW_END		(round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1)

#define XKPRANGE_CC_SHADOW_OFFSET	(KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
#define XKPRANGE_UC_SHADOW_OFFSET	(KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
+36 −5
Original line number Diff line number Diff line
@@ -13,6 +13,13 @@

static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);

#ifdef __PAGETABLE_P4D_FOLDED
#define __pgd_none(early, pgd) (0)
#else
#define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
(__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
#endif

#ifdef __PAGETABLE_PUD_FOLDED
#define __p4d_none(early, p4d) (0)
#else
@@ -142,6 +149,19 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
	return pud_offset(p4dp, addr);
}

static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
{
	if (__pgd_none(early, pgdp_get(pgdp))) {
		phys_addr_t p4d_phys = early ?
			__pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
		if (!early)
			memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
		pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
	}

	return p4d_offset(pgdp, addr);
}

static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
				      unsigned long end, int node, bool early)
{
@@ -178,19 +198,19 @@ static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
	do {
		next = pud_addr_end(addr, end);
		kasan_pmd_populate(pudp, addr, next, node, early);
	} while (pudp++, addr = next, addr != end);
	} while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
}

static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
					    unsigned long end, int node, bool early)
{
	unsigned long next;
	p4d_t *p4dp = p4d_offset(pgdp, addr);
	p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);

	do {
		next = p4d_addr_end(addr, end);
		kasan_pud_populate(p4dp, addr, next, node, early);
	} while (p4dp++, addr = next, addr != end);
	} while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
}

static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
@@ -218,7 +238,7 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
asmlinkage void __init kasan_early_init(void)
{
	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
}

static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
@@ -233,7 +253,7 @@ static void __init clear_pgds(unsigned long start, unsigned long end)
	 * swapper_pg_dir. pgd_clear() can't be used
	 * here because it's nop on 2,3-level pagetable setups
	 */
	for (; start < end; start += PGDIR_SIZE)
	for (; start < end; start = pgd_addr_end(start, end))
		kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
}

@@ -242,6 +262,17 @@ void __init kasan_init(void)
	u64 i;
	phys_addr_t pa_start, pa_end;

	/*
	 * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
	 * overflow UINTPTR_MAX and then looks like a user space address.
	 * For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
	 * large for Loongson-2K series whose cpu_vabits = 39.
	 */
	if (KASAN_SHADOW_END < vm_map_base) {
		pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
		return;
	}

	/*
	 * PGD was populated as invalid_pmd_table or invalid_pud_table
	 * in pagetable_init() which depends on how many levels of page
+18 −11
Original line number Diff line number Diff line
@@ -2603,19 +2603,26 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

	if (apic->apicv_active) {
		/* irr_pending is always true when apicv is activated. */
		apic->irr_pending = true;
		apic->isr_count = 1;
	} else {
	/*
		 * Don't clear irr_pending, searching the IRR can race with
		 * updates from the CPU as APICv is still active from hardware's
		 * perspective.  The flag will be cleared as appropriate when
		 * KVM injects the interrupt.
	 * When APICv is enabled, KVM must always search the IRR for a pending
	 * IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
	 * isn't running.  If APICv is disabled, KVM _should_ search the IRR
	 * for a pending IRQ.  But KVM currently doesn't ensure *all* hardware,
	 * e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
	 * the IRR at this time could race with IRQ delivery from hardware that
	 * still sees APICv as being enabled.
	 *
	 * FIXME: Ensure other vCPUs and devices observe the change in APICv
	 *        state prior to updating KVM's metadata caches, so that KVM
	 *        can safely search the IRR and set irr_pending accordingly.
	 */
	apic->irr_pending = true;

	if (apic->apicv_active)
		apic->isr_count = 1;
	else
		apic->isr_count = count_vectors(apic->regs + APIC_ISR);
	}

	apic->highest_isr_cache = -1;
}

Loading