Unverified Commit 95b70300 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!372 Backport 5.10.141 LTS

Merge Pull Request from: @zhangjialin11 
 
Backport 5.10.141 LTS patches from upstream.

Conflicts:

Already merged(12):
895428ee124a mm: Force TLB flush for PFNMAP mappings before unlink_file_vma()
38267d266336 Bluetooth: L2CAP: Fix build errors in some archs
bacb37bdc2a2 media: pvrusb2: fix memory leak in pvr_probe
6204bf78b2a9 bpf: Don't redirect packets with invalid pkt_len
98f401d36396 mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse
744b0d308070 kprobes: don't call disarm_kprobe() for disabled kprobes
28d8d2737e82 io_uring: disable polling pollfree files
cb41f22df3ec xfs: remove infinite loop when reserving free block pool
72a259bdd50d xfs: always succeed at setting the reserve pool size
f168801da95f xfs: fix overfilling of reserve pool
d34798d846d7 xfs: fix soft lockup via spinning in filestream ag selection loop
64f6da455b66 xfs: revert "xfs: actually bump warning counts when we send warnings"

Total patches: 37 - 12 = 25 
 
Link:https://gitee.com/openeuler/kernel/pulls/372

 

Reviewed-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents 4c0bc9e5 4d958c70
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
	int rc;

	if (diag204_probe()) {
		pr_err("The hardware system does not support hypfs\n");
		pr_info("The hardware system does not support hypfs\n");
		return -ENODATA;
	}

+1 −1
Original line number Diff line number Diff line
@@ -496,9 +496,9 @@ static int __init hypfs_init(void)
	hypfs_vm_exit();
fail_hypfs_diag_exit:
	hypfs_diag_exit();
	pr_err("Initialization of hypfs failed with rc=%i\n", rc);
fail_dbfs_exit:
	hypfs_dbfs_exit();
	pr_err("Initialization of hypfs failed with rc=%i\n", rc);
	return rc;
}
device_initcall(hypfs_init)
+3 −1
Original line number Diff line number Diff line
@@ -429,7 +429,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
	flags = FAULT_FLAG_DEFAULT;
	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;
	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
	if ((trans_exc_code & store_indication) == 0x400)
		access = VM_WRITE;
	if (access == VM_WRITE)
		flags |= FAULT_FLAG_WRITE;
	mmap_read_lock(mm);

+51 −41
Original line number Diff line number Diff line
@@ -35,34 +35,57 @@
#define RSB_CLEAR_LOOPS		32	/* To forcibly overwrite all entries */

/*
 * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
 */
#define __FILL_RETURN_SLOT			\
	ANNOTATE_INTRA_FUNCTION_CALL;		\
	call	772f;				\
	int3;					\
772:

/*
 * Stuff the entire RSB.
 *
 * Google experimented with loop-unrolling and this turned out to be
 * the optimal version — two calls, each with their own speculation
 * trap should their return address end up getting used, in a loop.
 */
#define __FILL_RETURN_BUFFER(reg, nr, sp)	\
#ifdef CONFIG_X86_64
#define __FILL_RETURN_BUFFER(reg, nr)			\
	mov	$(nr/2), reg;				\
771:							\
	ANNOTATE_INTRA_FUNCTION_CALL;		\
	call	772f;				\
773:	/* speculation trap */			\
	UNWIND_HINT_EMPTY;			\
	pause;					\
	lfence;					\
	jmp	773b;				\
772:						\
	ANNOTATE_INTRA_FUNCTION_CALL;		\
	call	774f;				\
775:	/* speculation trap */			\
	UNWIND_HINT_EMPTY;			\
	pause;					\
	lfence;					\
	jmp	775b;				\
774:						\
	add	$(BITS_PER_LONG/8) * 2, sp;	\
	__FILL_RETURN_SLOT				\
	__FILL_RETURN_SLOT				\
	add	$(BITS_PER_LONG/8) * 2, %_ASM_SP;	\
	dec	reg;					\
	jnz	771b;					\
	/* barrier for jnz misprediction */		\
	lfence;
#else
/*
 * i386 doesn't unconditionally have LFENCE, as such it can't
 * do a loop.
 */
#define __FILL_RETURN_BUFFER(reg, nr)			\
	.rept nr;					\
	__FILL_RETURN_SLOT;				\
	.endr;						\
	add	$(BITS_PER_LONG/8) * nr, %_ASM_SP;
#endif

/*
 * Stuff a single RSB slot.
 *
 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
 * forced to retire before letting a RET instruction execute.
 *
 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
 * before this point.
 */
#define __FILL_ONE_RETURN				\
	__FILL_RETURN_SLOT				\
	add	$(BITS_PER_LONG/8), %_ASM_SP;		\
	lfence;

#ifdef __ASSEMBLY__

@@ -120,28 +143,15 @@
#endif
.endm

.macro ISSUE_UNBALANCED_RET_GUARD
	ANNOTATE_INTRA_FUNCTION_CALL
	call .Lunbalanced_ret_guard_\@
	int3
.Lunbalanced_ret_guard_\@:
	add $(BITS_PER_LONG/8), %_ASM_SP
	lfence
.endm

 /*
  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
  * monstrosity above, manually.
  */
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
.ifb \ftr2
	ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
.else
	ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
.endif
	__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
.Lunbalanced_\@:
	ISSUE_UNBALANCED_RET_GUARD
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
	ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
		__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
		__stringify(__FILL_ONE_RETURN), \ftr2

.Lskip_rsb_\@:
.endm

+17 −1
Original line number Diff line number Diff line
@@ -327,7 +327,23 @@ static struct miscdevice udmabuf_misc = {

static int __init udmabuf_dev_init(void)
{
	return misc_register(&udmabuf_misc);
	int ret;

	ret = misc_register(&udmabuf_misc);
	if (ret < 0) {
		pr_err("Could not initialize udmabuf device\n");
		return ret;
	}

	ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
					   DMA_BIT_MASK(64));
	if (ret < 0) {
		pr_err("Could not setup DMA mask for udmabuf device\n");
		misc_deregister(&udmabuf_misc);
		return ret;
	}

	return 0;
}

static void __exit udmabuf_dev_exit(void)
Loading