Commit 73325f60 authored by Takashi Iwai's avatar Takashi Iwai
Browse files

ALSA: memalloc: Support for non-coherent page allocation

Following to the addition of non-contiguous pages, this patch adds the
new contiguous non-coherent page allocation to the standard memalloc
helper.  Like the previous non-contig type, this non-coherent type is
also directional and requires the explicit sync, too.  Hence the
driver using this type of buffer may need to set
SNDRV_PCM_INFO_EXPLICIT_SYNC flag to the PCM hardware.info as well,
unless it's set up in the managed mode.

Link: https://lore.kernel.org/r/20211017074859.24112-3-tiwai@suse.de


Signed-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent a25684a9
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -50,6 +50,7 @@ struct snd_dma_device {
#endif
#define SNDRV_DMA_TYPE_VMALLOC		7	/* vmalloc'ed buffer */
#define SNDRV_DMA_TYPE_NONCONTIG	8	/* non-coherent SG buffer */
#define SNDRV_DMA_TYPE_NONCOHERENT	9	/* non-coherent buffer */

/*
 * info for buffer allocation
+47 −0
Original line number Diff line number Diff line
@@ -560,6 +560,52 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
};

/*
 * Non-coherent pages allocator
 */
static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
{
	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
	return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
				     dmab->dev.dir, DEFAULT_GFP);
}

static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
{
	dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
			     dmab->addr, dmab->dev.dir);
}

static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
				    struct vm_area_struct *area)
{
	area->vm_page_prot = vm_get_page_prot(area->vm_flags);
	return dma_mmap_pages(dmab->dev.dev, area,
			      area->vm_end - area->vm_start,
			      virt_to_page(dmab->area));
}

static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
				     enum snd_dma_sync_mode mode)
{
	if (mode == SNDRV_DMA_SYNC_CPU) {
		if (dmab->dev.dir != DMA_TO_DEVICE)
			dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
						dmab->bytes, dmab->dev.dir);
	} else {
		if (dmab->dev.dir != DMA_FROM_DEVICE)
			dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
						   dmab->bytes, dmab->dev.dir);
	}
}

static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
	.alloc = snd_dma_noncoherent_alloc,
	.free = snd_dma_noncoherent_free,
	.mmap = snd_dma_noncoherent_mmap,
	.sync = snd_dma_noncoherent_sync,
};

#endif /* CONFIG_HAS_DMA */

/*
@@ -572,6 +618,7 @@ static const struct snd_malloc_ops *dma_ops[] = {
	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
	[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
#ifdef CONFIG_GENERIC_ALLOCATOR
	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
#endif /* CONFIG_GENERIC_ALLOCATOR */