Commit 1f39b1df authored by Thierry Reding's avatar Thierry Reding
Browse files

drm/tegra: Implement buffer object cache



This cache is used to avoid mapping and unmapping buffer objects
unnecessarily. Mappings are cached per client and stay hot until
the buffer object is destroyed.

Signed-off-by: default avatarThierry Reding <treding@nvidia.com>
parent c6aeaf56
Loading
Loading
Loading
Loading
+11 −3
Original line number Diff line number Diff line
@@ -67,6 +67,7 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_
	if (!map)
		return ERR_PTR(-ENOMEM);

	kref_init(&map->ref);
	map->bo = host1x_bo_get(bo);
	map->direction = direction;
	map->dev = dev;
@@ -157,9 +158,6 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_

static void tegra_bo_unpin(struct host1x_bo_mapping *map)
{
	if (!map)
		return;

	if (map->attach) {
		dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
		dma_buf_detach(map->attach->dmabuf, map->attach);
@@ -493,8 +491,18 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
void tegra_bo_free_object(struct drm_gem_object *gem)
{
	struct tegra_drm *tegra = gem->dev->dev_private;
	struct host1x_bo_mapping *mapping, *tmp;
	struct tegra_bo *bo = to_tegra_bo(gem);

	/* remove all mappings of this buffer object from any caches */
	list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
		if (mapping->cache)
			host1x_bo_unpin(mapping);
		else
			dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
				dev_name(mapping->dev));
	}

	if (tegra->domain)
		tegra_bo_iommu_unmap(tegra, bo);

+1 −1
Original line number Diff line number Diff line
@@ -145,7 +145,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
		struct host1x_bo_mapping *map;

		map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE);
		map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
		if (IS_ERR(map)) {
			err = PTR_ERR(map);
			goto unpin;
+1 −0
Original line number Diff line number Diff line
@@ -75,6 +75,7 @@ gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction
	if (!map)
		return ERR_PTR(-ENOMEM);

	kref_init(&map->ref);
	map->bo = host1x_bo_get(bo);
	map->direction = direction;
	map->dev = dev;
+1 −1
Original line number Diff line number Diff line
@@ -201,7 +201,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
		goto put_gem;
	}

	mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction);
	mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction, NULL);
	if (IS_ERR(mapping->map)) {
		err = PTR_ERR(mapping->map);
		goto put_gem;
+78 −0
Original line number Diff line number Diff line
@@ -742,6 +742,7 @@ EXPORT_SYMBOL(host1x_driver_unregister);
 */
void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
{
	host1x_bo_cache_init(&client->cache);
	INIT_LIST_HEAD(&client->list);
	__mutex_init(&client->lock, "host1x client lock", key);
	client->usecount = 0;
@@ -830,6 +831,8 @@ int host1x_client_unregister(struct host1x_client *client)

	mutex_unlock(&clients_lock);

	host1x_bo_cache_destroy(&client->cache);

	return 0;
}
EXPORT_SYMBOL(host1x_client_unregister);
@@ -904,3 +907,78 @@ int host1x_client_resume(struct host1x_client *client)
	return err;
}
EXPORT_SYMBOL(host1x_client_resume);

struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
					enum dma_data_direction dir,
					struct host1x_bo_cache *cache)
{
	struct host1x_bo_mapping *mapping;

	if (cache) {
		mutex_lock(&cache->lock);

		list_for_each_entry(mapping, &cache->mappings, entry) {
			if (mapping->bo == bo && mapping->direction == dir) {
				kref_get(&mapping->ref);
				goto unlock;
			}
		}
	}

	mapping = bo->ops->pin(dev, bo, dir);
	if (IS_ERR(mapping))
		goto unlock;

	spin_lock(&mapping->bo->lock);
	list_add_tail(&mapping->list, &bo->mappings);
	spin_unlock(&mapping->bo->lock);

	if (cache) {
		INIT_LIST_HEAD(&mapping->entry);
		mapping->cache = cache;

		list_add_tail(&mapping->entry, &cache->mappings);

		/* bump reference count to track the copy in the cache */
		kref_get(&mapping->ref);
	}

unlock:
	if (cache)
		mutex_unlock(&cache->lock);

	return mapping;
}
EXPORT_SYMBOL(host1x_bo_pin);

static void __host1x_bo_unpin(struct kref *ref)
{
	struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref);

	/*
	 * When the last reference of the mapping goes away, make sure to remove the mapping from
	 * the cache.
	 */
	if (mapping->cache)
		list_del(&mapping->entry);

	spin_lock(&mapping->bo->lock);
	list_del(&mapping->list);
	spin_unlock(&mapping->bo->lock);

	mapping->bo->ops->unpin(mapping);
}

void host1x_bo_unpin(struct host1x_bo_mapping *mapping)
{
	struct host1x_bo_cache *cache = mapping->cache;

	if (cache)
		mutex_lock(&cache->lock);

	kref_put(&mapping->ref, __host1x_bo_unpin);

	if (cache)
		mutex_unlock(&cache->lock);
}
EXPORT_SYMBOL(host1x_bo_unpin);
Loading