Unverified Commit 1bf2d33f authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15783 [openEuler-24.03-LTS][linux-6.6.y sync] Backport 6.6.80-6.6.81 LTS Patches

Merge Pull Request from: @koishimind 
 
git log --oneline v6.6.80..v6.6.81 |wc
147
71+11+52+13+1

check-kabi(11):
RDMA/mlx5: Handle errors returned from mlx5r_ib_rate
RDMA/mlx5: Fix AH static rate parsing
IB/core: Add support for XDR link speed
ipvlan: ensure network headers are in skb linear part
ipvlan: Prepare ipvlan_process_v4_outbound() to future .flowi4_tos conversion.
ipv4: Convert ip_route_input() to dscp_t.
ipv4: Convert icmp_route_lookup() to dscp_t.
ipvlan: Unmask upper DSCP bits in ipvlan_process_v4_outbound()
ipv4: icmp: Unmask upper DSCP bits in icmp_route_lookup()
ipv4: icmp: Pass full DS field to ip_route_input()
net/ipv4: add tracepoint for icmp_send

conflicts(52):
net: ipv6: fix dst ref loop on input in rpl lwt
net: ipv6: rpl_iptunnel: mitigate 2-realloc issue
net: ipv6: seg6_iptunnel: mitigate 2-realloc issue
tcp: Defer ts_recent changes until req is owned
net: ipv6: fix dst ref loop on input in seg6 lwt
net: cadence: macb: Synchronize stats calculations
x86/microcode/AMD: Load only SHA256-checksummed patches
x86/microcode/AMD: Add get_patch_level()
x86/microcode/AMD: Get rid of the _load_microcode_amd() forward declaration
x86/microcode/AMD: Merge early_apply_microcode() into its single callsite
x86/microcode/AMD: Have __apply_microcode_amd() return bool
x86/microcode/AMD: Make __verify_patch_size() return bool
x86/microcode/AMD: Return bool from find_blobs_in_containers()
x86/microcode/AMD: Flush patch buffer mapping after application
x86/microcode/AMD: Split load_microcode_amd()
x86/microcode/AMD: Pay attention to the stepping dynamically
x86/microcode/AMD: Use the family,model,stepping encoded in the patch ID
x86/microcode/intel: Set new revision only after a successful update
x86/microcode: Rework early revisions reporting
x86/microcode: Prepare for minimal revision check
x86/microcode: Handle "offline" CPUs correctly
x86/apic: Provide apic_force_nmi_on_cpu()
x86/microcode: Protect against instrumentation
x86/microcode: Rendezvous and load in NMI
x86/microcode: Replace the all-in-one rendevous handler
x86/microcode: Provide new control functions
x86/microcode: Add per CPU control field
x86/microcode: Add per CPU result state
x86/microcode: Sanitize __wait_for_cpus()
x86/microcode: Clarify the late load logic
x86/microcode: Handle "nosmt" correctly
x86/microcode: Clean up mc_cpu_down_prep()
x86/microcode: Get rid of the schedule work indirection
x86/microcode: Mop up early loading leftovers
x86/microcode/amd: Use cached microcode for AP load
x86/microcode/amd: Cache builtin/initrd microcode early
x86/microcode/amd: Cache builtin microcode too
x86/microcode/amd: Use correct per CPU ucode_cpu_info
x86/microcode: Remove pointless apply() invocation
x86/microcode/intel: Rework intel_find_matching_signature()
x86/microcode/intel: Reuse intel_cpu_collect_info()
x86/microcode/intel: Rework intel_cpu_collect_info()
x86/microcode/intel: Unify microcode apply() functions
x86/microcode/intel: Switch to kvmalloc()
x86/microcode/intel: Save the microcode only after a successful late-load
x86/microcode/intel: Simplify early loading
x86/microcode/intel: Cleanup code further
x86/microcode/intel: Simplify and rename generic_load_microcode()
x86/microcode/intel: Simplify scan_microcode()
x86/microcode/intel: Rip out mixed stepping support for Intel CPUs
x86/microcode/32: Move early loading after paging enable
i2c: ls2x: Fix frequency division register access

merged(13):
rtla/timerlat_top: Set OSNOISE_WORKLOAD for kernel threads
rtla/timerlat_hist: Set OSNOISE_WORKLOAD for kernel threads
Revert "rtla/timerlat_hist: Set OSNOISE_WORKLOAD for kernel threads"
Revert "rtla/timerlat_top: Set OSNOISE_WORKLOAD for kernel threads"
perf/core: Add RCU read lock protection to perf_iterate_ctx()
ALSA: hda/realtek: Fix microphone regression on ASUS N705UD
ftrace: Avoid potential division by zero in function_stat_show()
tracing: Fix bad hist from corrupting named_triggers list
uprobes: Reject the shared zeropage in uprobe_write_opcode()
perf/core: Order the PMU list to fix warning about unordered pmu_ctx_list
ALSA: hda/realtek: Fix wrong mic setup for ASUS VivoBook 15
ALSA: hda/realtek: Add quirks for ASUS ROG 2023 models
ovl: fix UAF in ovl_dentry_update_reval by moving dput() in ovl_link_up

check-depends(1):
firmware: cs_dsp: Ensure cs_dsp_load[_coeff]() returns 0 on success

 
 
Link:https://gitee.com/openeuler/kernel/pulls/15783

 

Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 264cea76 07276d66
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -367,6 +367,11 @@
	status = "okay";
};

&uart5 {
	/delete-property/ dmas;
	/delete-property/ dma-names;
};

/* Mule UCAN */
&usb_host0_ehci {
	status = "okay";
+1 −1
Original line number Diff line number Diff line
@@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
		_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r])	\
		_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r])	\
	: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
	: [ov] "Jr" (oldval), [nv] "Jr" (newval)
	: [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
	: "memory");
	__disable_user_access();

+6 −2
Original line number Diff line number Diff line
@@ -236,8 +236,9 @@ struct kvm_vcpu_arch {
	/* Cache pages needed to program page tables with spinlock held */
	struct kvm_mmu_memory_cache mmu_page_cache;

	/* VCPU power-off state */
	bool power_off;
	/* VCPU power state */
	struct kvm_mp_state mp_state;
	spinlock_t mp_state_lock;

	/* Don't run the VCPU (blocked) */
	bool pause;
@@ -351,7 +352,10 @@ int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);

#endif /* __RISCV_KVM_HOST_H__ */
+0 −6
Original line number Diff line number Diff line
@@ -211,12 +211,6 @@ static size_t get_rt_frame_size(bool cal_all)
		if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
			total_context_size += riscv_v_sc_size;
	}
	/*
	 * Preserved a __riscv_ctx_hdr for END signal context header if an
	 * extension uses __riscv_extra_ext_header
	 */
	if (total_context_size)
		total_context_size += sizeof(struct __riscv_ctx_hdr);

	frame_size += total_context_size;

+35 −13
Original line number Diff line number Diff line
@@ -100,6 +100,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
	struct kvm_cpu_context *cntx;
	struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;

	spin_lock_init(&vcpu->arch.mp_state_lock);

	/* Mark this VCPU never ran */
	vcpu->arch.ran_atleast_once = false;
	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
@@ -193,7 +195,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
	return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
		!vcpu->arch.power_off && !vcpu->arch.pause);
		!kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause);
}

int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
@@ -421,26 +423,42 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
	return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
}

void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
{
	vcpu->arch.power_off = true;
	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
	kvm_make_request(KVM_REQ_SLEEP, vcpu);
	kvm_vcpu_kick(vcpu);
}

void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
{
	vcpu->arch.power_off = false;
	spin_lock(&vcpu->arch.mp_state_lock);
	__kvm_riscv_vcpu_power_off(vcpu);
	spin_unlock(&vcpu->arch.mp_state_lock);
}

void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
{
	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
	kvm_vcpu_wake_up(vcpu);
}

void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
{
	spin_lock(&vcpu->arch.mp_state_lock);
	__kvm_riscv_vcpu_power_on(vcpu);
	spin_unlock(&vcpu->arch.mp_state_lock);
}

bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu)
{
	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
}

int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	if (vcpu->arch.power_off)
		mp_state->mp_state = KVM_MP_STATE_STOPPED;
	else
		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
	*mp_state = READ_ONCE(vcpu->arch.mp_state);

	return 0;
}
@@ -450,17 +468,21 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{
	int ret = 0;

	spin_lock(&vcpu->arch.mp_state_lock);

	switch (mp_state->mp_state) {
	case KVM_MP_STATE_RUNNABLE:
		vcpu->arch.power_off = false;
		WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
		break;
	case KVM_MP_STATE_STOPPED:
		kvm_riscv_vcpu_power_off(vcpu);
		__kvm_riscv_vcpu_power_off(vcpu);
		break;
	default:
		ret = -EINVAL;
	}

	spin_unlock(&vcpu->arch.mp_state_lock);

	return ret;
}

@@ -561,11 +583,11 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
		if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
			kvm_vcpu_srcu_read_unlock(vcpu);
			rcuwait_wait_event(wait,
				(!vcpu->arch.power_off) && (!vcpu->arch.pause),
				(!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
				TASK_INTERRUPTIBLE);
			kvm_vcpu_srcu_read_lock(vcpu);

			if (vcpu->arch.power_off || vcpu->arch.pause) {
			if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) {
				/*
				 * Awaken to handle a signal, request to
				 * sleep again later.
Loading