Commit 7981ec65 authored by Alex Sierra's avatar Alex Sierra Committed by Alex Deucher
Browse files

drm/amdkfd: Maintain svm_bo reference in page->zone_device_data



Each zone-device page holds a reference to the SVM BO that manages its
backing storage. This is necessary to correctly hold on to the BO in
case zone_device pages are shared with a child-process.

Signed-off-by: default avatarAlex Sierra <alex.sierra@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3bf8282c
Loading
Loading
Loading
Loading
+8 −2
Original line number Diff line number Diff line
@@ -218,7 +218,8 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
	struct page *page;

	page = pfn_to_page(pfn);
	page->zone_device_data = prange;
	svm_range_bo_ref(prange->svm_bo);
	page->zone_device_data = prange->svm_bo;
	get_page(page);
	lock_page(page);
}
@@ -502,7 +503,12 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,

static void svm_migrate_page_free(struct page *page)
{
	/* Keep this function to avoid warning */
	struct svm_range_bo *svm_bo = page->zone_device_data;

	if (svm_bo) {
		pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref));
		svm_range_bo_unref(svm_bo);
	}
}

static int
+1 −9
Original line number Diff line number Diff line
@@ -312,14 +312,6 @@ static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
	return true;
}

static struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
{
	if (svm_bo)
		kref_get(&svm_bo->kref);

	return svm_bo;
}

static void svm_range_bo_release(struct kref *kref)
{
	struct svm_range_bo *svm_bo;
@@ -358,7 +350,7 @@ static void svm_range_bo_release(struct kref *kref)
	kfree(svm_bo);
}

static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
void svm_range_bo_unref(struct svm_range_bo *svm_bo)
{
	if (!svm_bo)
		return;
+9 −0
Original line number Diff line number Diff line
@@ -150,6 +150,14 @@ static inline void svm_range_unlock(struct svm_range *prange)
	mutex_unlock(&prange->lock);
}

static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
{
	if (svm_bo)
		kref_get(&svm_bo->kref);

	return svm_bo;
}

int svm_range_list_init(struct kfd_process *p);
void svm_range_list_fini(struct kfd_process *p);
int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
@@ -186,6 +194,7 @@ svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
 */
#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)

void svm_range_bo_unref(struct svm_range_bo *svm_bo);
#else

struct kfd_process;