ALSA: memalloc: Drop special handling of GFP for CONTINUOUS allocation

Now that all users of snd_dma_continuous_data() is gone, let's drop
this ugly (and dangerous) way.

After this commit, SNDRV_DMA_TYPE_CONTINUOUS may take the standard
device pointer instead of the hacked pointer by the macro above, and
the memalloc core refers to the coherent_dma_mask of the given
device like other SNDRV_DMA_TYPE.  It's still allowed to pass NULL
there, and in that case, the allocation is performed always in the
normal zone.

For SNDRV_DMA_TYPE_VMALLOC, the device pointer is simply ignored.

Link: https://lore.kernel.org/r/20220823115740.14123-5-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Iwai 2022-08-23 13:57:39 +02:00
parent 97557ec97a
commit dd164fbfdc
2 changed files with 48 additions and 68 deletions

View File

@ -26,9 +26,6 @@ struct snd_dma_device {
struct device *dev; /* generic device */ struct device *dev; /* generic device */
}; };
#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
/* /*
* buffer types * buffer types
*/ */

View File

@ -18,25 +18,18 @@
#include <sound/memalloc.h> #include <sound/memalloc.h>
#include "memalloc_local.h" #include "memalloc_local.h"
#define DEFAULT_GFP \
(GFP_KERNEL | \
__GFP_COMP | /* compound page lets parts be mapped */ \
__GFP_NORETRY | /* don't trigger OOM-killer */ \
__GFP_NOWARN) /* no stack trace print - this call is non-critical */
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
#ifdef CONFIG_SND_DMA_SGBUF #ifdef CONFIG_SND_DMA_SGBUF
static void *do_alloc_fallback_pages(struct device *dev, size_t size,
dma_addr_t *addr, bool wc);
static void do_free_fallback_pages(void *p, size_t size, bool wc);
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size); static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
#endif #endif
/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
gfp_t default_gfp)
{
if (!dmab->dev.dev)
return default_gfp;
else
return (__force gfp_t)(unsigned long)dmab->dev.dev;
}
static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
{ {
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
@ -284,24 +277,54 @@ EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
/* /*
* Continuous pages allocator * Continuous pages allocator
*/ */
static void *do_alloc_pages(size_t size, dma_addr_t *addr, gfp_t gfp) static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr,
bool wc)
{ {
void *p = alloc_pages_exact(size, gfp); void *p;
gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
if (p) again:
*addr = page_to_phys(virt_to_page(p)); p = alloc_pages_exact(size, gfp);
if (!p)
return NULL;
*addr = page_to_phys(virt_to_page(p));
if (!dev)
return p;
if ((*addr + size - 1) & ~dev->coherent_dma_mask) {
if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
gfp |= GFP_DMA32;
goto again;
}
if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again;
}
}
#ifdef CONFIG_X86
if (wc)
set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
#endif
return p; return p;
} }
static void do_free_pages(void *p, size_t size, bool wc)
{
#ifdef CONFIG_X86
if (wc)
set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
#endif
free_pages_exact(p, size);
}
static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
{ {
return do_alloc_pages(size, &dmab->addr, return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false);
snd_mem_get_gfp_flags(dmab, GFP_KERNEL));
} }
static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
{ {
free_pages_exact(dmab->area, dmab->bytes); do_free_pages(dmab->area, dmab->bytes, false);
} }
static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
@ -324,9 +347,7 @@ static const struct snd_malloc_ops snd_dma_continuous_ops = {
*/ */
static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
{ {
gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM); return vmalloc(size);
return __vmalloc(size, gfp);
} }
static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
@ -440,12 +461,6 @@ static const struct snd_malloc_ops snd_dma_iram_ops = {
}; };
#endif /* CONFIG_GENERIC_ALLOCATOR */ #endif /* CONFIG_GENERIC_ALLOCATOR */
#define DEFAULT_GFP \
(GFP_KERNEL | \
__GFP_COMP | /* compound page lets parts be mapped */ \
__GFP_NORETRY | /* don't trigger OOM-killer */ \
__GFP_NOWARN) /* no stack trace print - this call is non-critical */
/* /*
* Coherent device pages allocator * Coherent device pages allocator
*/ */
@ -479,12 +494,12 @@ static const struct snd_malloc_ops snd_dma_dev_ops = {
#ifdef CONFIG_SND_DMA_SGBUF #ifdef CONFIG_SND_DMA_SGBUF
static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
{ {
return do_alloc_fallback_pages(dmab->dev.dev, size, &dmab->addr, true); return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
} }
static void snd_dma_wc_free(struct snd_dma_buffer *dmab) static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
{ {
do_free_fallback_pages(dmab->area, dmab->bytes, true); do_free_pages(dmab->area, dmab->bytes, true);
} }
static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
@ -697,37 +712,6 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
.get_chunk_size = snd_dma_noncontig_get_chunk_size, .get_chunk_size = snd_dma_noncontig_get_chunk_size,
}; };
/* manual page allocations with wc setup */
static void *do_alloc_fallback_pages(struct device *dev, size_t size,
dma_addr_t *addr, bool wc)
{
gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
void *p;
again:
p = do_alloc_pages(size, addr, gfp);
if (!p || (*addr + size - 1) & ~dev->coherent_dma_mask) {
if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
gfp |= GFP_DMA32;
goto again;
}
if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again;
}
}
if (p && wc)
set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
return p;
}
static void do_free_fallback_pages(void *p, size_t size, bool wc)
{
if (wc)
set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
free_pages_exact(p, size);
}
/* Fallback SG-buffer allocations for x86 */ /* Fallback SG-buffer allocations for x86 */
struct snd_dma_sg_fallback { struct snd_dma_sg_fallback {
size_t count; size_t count;
@ -742,7 +726,7 @@ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
size_t i; size_t i;
for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++) for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
do_free_fallback_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc); do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
kvfree(sgbuf->pages); kvfree(sgbuf->pages);
kvfree(sgbuf->addrs); kvfree(sgbuf->addrs);
kfree(sgbuf); kfree(sgbuf);
@ -769,8 +753,7 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
goto error; goto error;
for (i = 0; i < count; sgbuf->count++, i++) { for (i = 0; i < count; sgbuf->count++, i++) {
p = do_alloc_fallback_pages(dmab->dev.dev, PAGE_SIZE, p = do_alloc_pages(dmab->dev.dev, PAGE_SIZE, &sgbuf->addrs[i], wc);
&sgbuf->addrs[i], wc);
if (!p) if (!p)
goto error; goto error;
sgbuf->pages[i] = virt_to_page(p); sgbuf->pages[i] = virt_to_page(p);