Commit 8bd2f710 authored by Xuan Zhuo's avatar Xuan Zhuo Committed by Michael S. Tsirkin
Browse files

virtio_ring: introduce dma sync api for virtqueue



These API has been introduced:

* virtqueue_dma_need_sync
* virtqueue_dma_sync_single_range_for_cpu
* virtqueue_dma_sync_single_range_for_device

These APIs can be used together with the premapped mechanism to sync the
DMA address.

Signed-off-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Message-Id: <20230810123057.43407-12-xuanzhuo@linux.alibaba.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent b6253b4e
Loading
Loading
Loading
Loading
+76 −0
Original line number Diff line number Diff line
@@ -3175,4 +3175,80 @@ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
}
EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);

/**
 * virtqueue_dma_need_sync - check a dma address needs sync
 * @_vq: the struct virtqueue we're talking about.
 * @addr: DMA address
 *
 * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
 * synchronized
 *
 * return bool
 */
bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	if (!vq->use_dma_api)
		return false;

	return dma_need_sync(vring_dma_dev(vq), addr);
}
EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);

/**
 * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
 * @_vq: the struct virtqueue we're talking about.
 * @addr: DMA address
 * @offset: DMA address offset
 * @size: buf size for sync
 * @dir: DMA direction
 *
 * Before calling this function, use virtqueue_dma_need_sync() to confirm that
 * the DMA address really needs to be synchronized
 *
 */
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
					     dma_addr_t addr,
					     unsigned long offset, size_t size,
					     enum dma_data_direction dir)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	struct device *dev = vring_dma_dev(vq);

	if (!vq->use_dma_api)
		return;

	dma_sync_single_range_for_cpu(dev, addr, offset, size,
				      DMA_BIDIRECTIONAL);
}
EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);

/**
 * virtqueue_dma_sync_single_range_for_device - dma sync for device
 * @_vq: the struct virtqueue we're talking about.
 * @addr: DMA address
 * @offset: DMA address offset
 * @size: buf size for sync
 * @dir: DMA direction
 *
 * Before calling this function, use virtqueue_dma_need_sync() to confirm that
 * the DMA address really needs to be synchronized
 */
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
						dma_addr_t addr,
						unsigned long offset, size_t size,
						enum dma_data_direction dir)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	struct device *dev = vring_dma_dev(vq);

	if (!vq->use_dma_api)
		return;

	dma_sync_single_range_for_device(dev, addr, offset, size,
					 DMA_BIDIRECTIONAL);
}
EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);

MODULE_LICENSE("GPL");
+8 −0
Original line number Diff line number Diff line
@@ -220,4 +220,12 @@ void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
				      size_t size, enum dma_data_direction dir,
				      unsigned long attrs);
int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);

bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
					     unsigned long offset, size_t size,
					     enum dma_data_direction dir);
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
						unsigned long offset, size_t size,
						enum dma_data_direction dir);
#endif /* _LINUX_VIRTIO_H */