Unverified Commit b72e8e32 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13829 fix CVE-2024-50192

Merge Pull Request from: @ci-robot 
 
PR sync from: Zheng Qixing <zhengqixing@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/OQBJDDUOPQ6Z63PKXVANNRQ2AVF424TK/ 
Fix CVE-2024-50192.

Marc Zyngier (2):
  irqchip/gic-v4: Don't allow a VMOVP on a dying VPE
  irqchip/gic-v4: Correctly deal with set_affinity on lazily-mapped VPEs


-- 
2.39.2
 
https://gitee.com/src-openeuler/kernel/issues/IB2YWE 
 
Link:https://gitee.com/openeuler/kernel/pulls/13829

 

Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Reviewed-by: default avatarLi Nan <linan122@huawei.com>
Signed-off-by: default avatarLi Nan <linan122@huawei.com>
parents c9a2e835 91fc0c13
Loading
Loading
Loading
Loading
+22 −6
Original line number Diff line number Diff line
@@ -1083,8 +1083,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
	its_encode_valid(cmd, desc->its_vmapp_cmd.valid);

	if (!desc->its_vmapp_cmd.valid) {
		if (is_v4_1(its)) {
		alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
		if (is_v4_1(its)) {
			its_encode_alloc(cmd, alloc);
		}

@@ -1098,13 +1098,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
	its_encode_vpt_addr(cmd, vpt_addr);
	its_encode_vpt_size(cmd, LPI_NRBITS - 1);

	alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);

	if (!is_v4_1(its))
		goto out;

	vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));

	alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);

	its_encode_alloc(cmd, alloc);

	/*
@@ -4467,6 +4467,23 @@ static int its_vpe_set_affinity(struct irq_data *d,
	unsigned long flags;
	int from, cpu;

	/*
	 * Check if we're racing against a VPE being destroyed, for
	 * which we don't want to allow a VMOVP.
	 */
	if (!atomic_read(&vpe->vmapp_count)) {
		if (gic_requires_eager_mapping())
			return -EINVAL;

		/*
		 * If we lazily map the VPEs, this isn't an error and
		 * we can exit cleanly.
		 */
		cpu = cpumask_first(mask_val);
		irq_data_update_effective_affinity(d, cpumask_of(cpu));
		return IRQ_SET_MASK_OK_DONE;
	}

	/*
	 * Changing affinity is mega expensive, so let's be as lazy as
	 * we can and only do it if we really have to. Also, if mapped
@@ -5128,9 +5145,8 @@ static int its_vpe_init(struct its_vpe *vpe)
	raw_spin_lock_init(&vpe->vpe_lock);
	vpe->vpe_id = vpe_id;
	vpe->vpt_page = vpt_page;
	if (gic_rdists->has_rvpeid)
	atomic_set(&vpe->vmapp_count, 0);
	else
	if (!gic_rdists->has_rvpeid)
		vpe->vpe_proxy_event = -1;

	return 0;
+3 −1
Original line number Diff line number Diff line
@@ -59,10 +59,12 @@ struct its_vpe {
				bool	group;
			}			sgi_config[32];
			int nr_irqs;
			atomic_t vmapp_count;
		};
	};

	/* Track the VPE being mapped */
	atomic_t vmapp_count;

	/*
	 * Ensures mutual exclusion between affinity setting of the
	 * vPE and vLPI operations using vpe->col_idx.