ALSA: memalloc: Allocate more contiguous pages for fallback case
[ Upstream commitcc26516374
] Currently the fallback SG allocation tries to allocate each single page, and this tends to result in the reverse order of memory addresses when large space is available at boot, as the kernel takes a free page from the top to the bottom in the zone. The end result looks as if non-contiguous (although it actually is). What's worse is that it leads to an overflow of BDL entries for HD-audio. For avoiding such a problem, this patch modifies the allocation code slightly; now it tries to allocate the larger contiguous chunks as much as possible, then reduces to the smaller chunks only if the allocation failed -- a similar strategy as the existing snd_dma_alloc_pages_fallback() function. Along with the trick, drop the unused address array from snd_dma_sg_fallback object. It was needed in the past when dma_alloc_coherent() was used, but with the standard page allocator, it became superfluous and never referred. Fixes:a8d302a0b7
("ALSA: memalloc: Revive x86-specific WC page allocations again") Reviewed-by: Kai Vehmanen <kai.vehmanen@linux.intel.com> Link: https://lore.kernel.org/r/20221114141658.29620-1-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
0a730f51b0
commit
accc7993a7
|
@ -720,7 +720,6 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
|
||||||
struct snd_dma_sg_fallback {
|
struct snd_dma_sg_fallback {
|
||||||
size_t count;
|
size_t count;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
dma_addr_t *addrs;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
|
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
|
||||||
|
@ -732,38 +731,49 @@ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
|
||||||
for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
|
for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
|
||||||
do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
|
do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
|
||||||
kvfree(sgbuf->pages);
|
kvfree(sgbuf->pages);
|
||||||
kvfree(sgbuf->addrs);
|
|
||||||
kfree(sgbuf);
|
kfree(sgbuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
|
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
|
||||||
{
|
{
|
||||||
struct snd_dma_sg_fallback *sgbuf;
|
struct snd_dma_sg_fallback *sgbuf;
|
||||||
struct page **pages;
|
struct page **pagep, *curp;
|
||||||
size_t i, count;
|
size_t chunk, npages;
|
||||||
|
dma_addr_t addr;
|
||||||
void *p;
|
void *p;
|
||||||
bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
|
bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
|
||||||
|
|
||||||
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
|
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
|
||||||
if (!sgbuf)
|
if (!sgbuf)
|
||||||
return NULL;
|
return NULL;
|
||||||
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
size = PAGE_ALIGN(size);
|
||||||
pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
|
sgbuf->count = size >> PAGE_SHIFT;
|
||||||
if (!pages)
|
sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
|
||||||
goto error;
|
if (!sgbuf->pages)
|
||||||
sgbuf->pages = pages;
|
|
||||||
sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
|
|
||||||
if (!sgbuf->addrs)
|
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
for (i = 0; i < count; sgbuf->count++, i++) {
|
pagep = sgbuf->pages;
|
||||||
p = do_alloc_pages(dmab->dev.dev, PAGE_SIZE, &sgbuf->addrs[i], wc);
|
chunk = size;
|
||||||
if (!p)
|
while (size > 0) {
|
||||||
goto error;
|
chunk = min(size, chunk);
|
||||||
sgbuf->pages[i] = virt_to_page(p);
|
p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
|
||||||
|
if (!p) {
|
||||||
|
if (chunk <= PAGE_SIZE)
|
||||||
|
goto error;
|
||||||
|
chunk >>= 1;
|
||||||
|
chunk = PAGE_SIZE << get_order(chunk);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
size -= chunk;
|
||||||
|
/* fill pages */
|
||||||
|
npages = chunk >> PAGE_SHIFT;
|
||||||
|
curp = virt_to_page(p);
|
||||||
|
while (npages--)
|
||||||
|
*pagep++ = curp++;
|
||||||
}
|
}
|
||||||
|
|
||||||
p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
|
p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
|
||||||
if (!p)
|
if (!p)
|
||||||
goto error;
|
goto error;
|
||||||
dmab->private_data = sgbuf;
|
dmab->private_data = sgbuf;
|
||||||
|
|
Loading…
Reference in New Issue