Skip to content

Commit

Permalink
ALSA: core: Move mmap handler into memalloc ops
Browse files Browse the repository at this point in the history
This patch moves the mmap handling code into the common memalloc
handler.  It allows us to reduce the memory-type specific code in PCM
code gracefully.

Link: https://lore.kernel.org/r/20210609162551.7842-5-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
  • Loading branch information
Takashi Iwai committed Jun 10, 2021
1 parent 37af81c commit a202bd1
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 16 deletions.
3 changes: 3 additions & 0 deletions include/sound/memalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

struct device;
struct page;
struct vm_area_struct;

/*
* buffer device info
Expand Down Expand Up @@ -69,6 +70,8 @@ int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
struct snd_dma_buffer *dmab);
void snd_dma_free_pages(struct snd_dma_buffer *dmab);
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area);

dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
Expand Down
36 changes: 36 additions & 0 deletions sound/core/memalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,23 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
}
EXPORT_SYMBOL(snd_dma_free_pages);

/**
* snd_dma_buffer_mmap - perform mmap of the given DMA buffer
* @dmab: buffer allocation information
* @area: VM area information
*/
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);

if (ops && ops->mmap)
return ops->mmap(dmab, area);
else
return -ENOENT;
}
EXPORT_SYMBOL(snd_dma_buffer_mmap);

/**
* snd_sgbuf_get_addr - return the physical address at the corresponding offset
* @dmab: buffer allocation information
Expand Down Expand Up @@ -283,9 +300,20 @@ static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
}

static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return remap_pfn_range(area, area->vm_start,
dmab->addr >> PAGE_SHIFT,
area->vm_end - area->vm_start,
area->vm_page_prot);
}

static const struct snd_malloc_ops snd_dma_iram_ops = {
.alloc = snd_dma_iram_alloc,
.free = snd_dma_iram_free,
.mmap = snd_dma_iram_mmap,
};
#endif /* CONFIG_GENERIC_ALLOCATOR */

Expand Down Expand Up @@ -320,9 +348,17 @@ static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
}

static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
return dma_mmap_coherent(dmab->dev.dev, area,
dmab->area, dmab->addr, dmab->bytes);
}

static const struct snd_malloc_ops snd_dma_dev_ops = {
.alloc = snd_dma_dev_alloc,
.free = snd_dma_dev_free,
.mmap = snd_dma_dev_mmap,
};
#endif /* CONFIG_HAS_DMA */

Expand Down
1 change: 1 addition & 0 deletions sound/core/memalloc_local.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ struct snd_malloc_ops {
struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset);
unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
unsigned int ofs, unsigned int size);
int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area);
};

#ifdef CONFIG_SND_DMA_SGBUF
Expand Down
19 changes: 3 additions & 16 deletions sound/core/pcm_native.c
Original file line number Diff line number Diff line change
Expand Up @@ -3700,22 +3700,9 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
#ifdef CONFIG_GENERIC_ALLOCATOR
if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return remap_pfn_range(area, area->vm_start,
substream->dma_buffer.addr >> PAGE_SHIFT,
area->vm_end - area->vm_start, area->vm_page_prot);
}
#endif /* CONFIG_GENERIC_ALLOCATOR */
if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
(substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
return dma_mmap_coherent(substream->dma_buffer.dev.dev,
area,
substream->runtime->dma_area,
substream->runtime->dma_addr,
substream->runtime->dma_bytes);
if (!substream->ops->page &&
!snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
return 0;
/* mmap with fault handler */
area->vm_ops = &snd_pcm_vm_ops_data_fault;
return 0;
Expand Down

0 comments on commit a202bd1

Please sign in to comment.