Commit d5244993 authored by Rick Edgecombe's avatar Rick Edgecombe Committed by Jinjiang Tu
Browse files

dma-direct: Leak pages on dma_set_decrypted() failure

mainline inclusion
from mainline-v6.9-rc1
commit b9fa16949d18e06bdf728a560f5c8af56d2bdcaf
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9QGIS
CVE: CVE-2024-35939

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b9fa16949d18e06bdf728a560f5c8af56d2bdcaf



--------------------------------

On TDX it is possible for the untrusted host to cause
set_memory_encrypted() or set_memory_decrypted() to fail such that an
error is returned and the resulting memory is shared. Callers need to
take care to handle these errors to avoid returning decrypted (shared)
memory to the page allocator, which could lead to functional or security
issues.

DMA could free decrypted/shared pages if dma_set_decrypted() fails. This
should be a rare case. Just leak the pages in this case instead of
freeing them.

Signed-off-by: default avatarRick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>

Conflicts:
	kernel/dma/direct.c
[Context conflicts.]
Signed-off-by: default avatarJinjiang Tu <tujinjiang@huawei.com>
parent 3b9084bc
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -243,7 +243,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
		err = set_memory_decrypted((unsigned long)ret,
					   PFN_UP(size));
		if (err)
			goto out_free_pages;
			goto out_leak_pages;
	}

	memset(ret, 0, size);
@@ -270,6 +270,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
out_free_pages:
	__dma_direct_free_pages(dev, page, size);
	return NULL;
out_leak_pages:
	return NULL;
}

void dma_direct_free(struct device *dev, size_t size,
@@ -333,7 +335,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
	ret = page_address(page);
	if (force_dma_unencrypted(dev)) {
		if (set_memory_decrypted((unsigned long)ret, PFN_UP(size)))
			goto out_free_pages;
			goto out_leak_pages;
	}
	memset(ret, 0, size);
	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
@@ -341,6 +343,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
out_free_pages:
	__dma_direct_free_pages(dev, page, size);
	return NULL;
out_leak_pages:
	return NULL;
}

void dma_direct_free_pages(struct device *dev, size_t size,