Unverified Commit f622579d authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13831 fix CVE-2024-50192

Merge Pull Request from: @ci-robot 
 
PR sync from: Zheng Qixing <zhengqixing@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/J7THVCYUJUVGL5QFCGYR3CZPOHQIAOBT/ 
fix CVE-2024-50192.

Marc Zyngier (2):
  irqchip/gic-v4: Don't allow a VMOVP on a dying VPE
  irqchip/gic-v4: Correctly deal with set_affinity on lazily-mapped VPEs


-- 
2.39.2
 
https://gitee.com/src-openeuler/kernel/issues/IB2YWE 
 
Link:https://gitee.com/openeuler/kernel/pulls/13831

 

Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 3152ab40 3424b485
Loading
Loading
Loading
Loading
+22 −6
Original line number Diff line number Diff line
@@ -1107,8 +1107,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
	its_encode_valid(cmd, desc->its_vmapp_cmd.valid);

	if (!desc->its_vmapp_cmd.valid) {
		if (is_v4_1(its)) {
		alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
		if (is_v4_1(its)) {
			its_encode_alloc(cmd, alloc);
			/*
			 * Unmapping a VPE is self-synchronizing on GICv4.1,
@@ -1129,13 +1129,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
	its_encode_vpt_addr(cmd, vpt_addr);
	its_encode_vpt_size(cmd, LPI_NRBITS - 1);

	alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);

	if (!is_v4_1(its))
		goto out;

	vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));

	alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);

	its_encode_alloc(cmd, alloc);

	/*
@@ -4252,6 +4252,23 @@ static int its_vpe_set_affinity(struct irq_data *d,
	unsigned long flags;
	int from, cpu;

	/*
	 * Check if we're racing against a VPE being destroyed, for
	 * which we don't want to allow a VMOVP.
	 */
	if (!atomic_read(&vpe->vmapp_count)) {
		if (gic_requires_eager_mapping())
			return -EINVAL;

		/*
		 * If we lazily map the VPEs, this isn't an error and
		 * we can exit cleanly.
		 */
		cpu = cpumask_first(mask_val);
		irq_data_update_effective_affinity(d, cpumask_of(cpu));
		return IRQ_SET_MASK_OK_DONE;
	}

	/*
	 * Changing affinity is mega expensive, so let's be as lazy as
	 * we can and only do it if we really have to. Also, if mapped
@@ -4957,9 +4974,8 @@ static int its_vpe_init(struct its_vpe *vpe)
	raw_spin_lock_init(&vpe->vpe_lock);
	vpe->vpe_id = vpe_id;
	vpe->vpt_page = vpt_page;
	if (gic_rdists->has_rvpeid)
	atomic_set(&vpe->vmapp_count, 0);
	else
	if (!gic_rdists->has_rvpeid)
		vpe->vpe_proxy_event = -1;

	return 0;
+3 −1
Original line number Diff line number Diff line
@@ -72,10 +72,12 @@ struct its_vpe {
#else
			}                       sgi_config[16];
#endif
			atomic_t vmapp_count;
		};
	};

	/* Track the VPE being mapped */
	atomic_t vmapp_count;

	/*
	 * Ensures mutual exclusion between affinity setting of the
	 * vPE and vLPI operations using vpe->col_idx.