Unverified Commit 03068159 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!5911 CVE-2023-52484

Merge Pull Request from: @ci-robot 
 
PR sync from: Guo Mengqi <guomengqi3@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/TBAWQUCXBET53PA36MQKZ7T3IKCXODG7/ 
CVE-2023-52484

Nicolin Chen (2):
  iommu/arm-smmu-v3: Fix size calculation in
    arm_smmu_mm_invalidate_range()
  iommu/arm-smmu-v3: Fix soft lockup triggered by
    arm_smmu_mm_invalidate_range


-- 
2.17.1
 
https://gitee.com/src-openeuler/kernel/issues/I94OZS 
 
Link:https://gitee.com/openeuler/kernel/pulls/5911

 

Reviewed-by: default avatarWeilong Chen <chenweilong@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 445f88b6 860efaa2
Loading
Loading
Loading
Loading
+33 −4
Original line number Diff line number Diff line
@@ -181,17 +181,46 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
	}
}

/*
 * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
 * is used as a threshold to replace per-page TLBI commands to issue in the
 * command queue with an address-space TLBI command, when SMMU w/o a range
 * invalidation feature handles too many per-page TLBI commands, which will
 * otherwise result in a soft lockup.
 */
#define CMDQ_MAX_TLBI_OPS		(1 << (PAGE_SHIFT - 3))

static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
					 struct mm_struct *mm,
					 unsigned long start, unsigned long end)
{
	struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
	size_t size = end - start + 1;
	size_t size;

	/*
	 * The mm_types defines vm_end as the first byte after the end address,
	 * different from IOMMU subsystem using the last address of an address
	 * range. So do a simple translation here by calculating size correctly.
	 */
	size = end - start;

	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
		if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
			size = 0;
	}

	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
		if (!size)
			arm_smmu_tlb_inv_asid(smmu_domain->smmu,
					smmu_mn->cd->asid);
		else
			arm_smmu_tlb_inv_range_asid(start, size,
					smmu_mn->cd->asid,
					PAGE_SIZE, false,
					smmu_domain);
	}

	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
		arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
					    PAGE_SIZE, false, smmu_domain);
	arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
	trace_smmu_mm_invalidate(mm->pasid, start, end);
}