mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 12:16:20 +00:00
dma-coherent: Restore dma_alloc_from_coherent() large alloc fall back policy.
When doing large allocations (larger than the per-device coherent area) the generic memory allocators are silently fallen back on regardless of consideration for the per-device constraints. In the DMA_MEMORY_EXCLUSIVE case falling back on generic memory is not an option, as it tends not to be addressable by the DMA hardware in question. This issue showed up with the 8139too breakage on the Dreamcast, where non-addressable buffers were silently allocated due to the size mismatch calculation -- while it should have simply errored out upon being unable to satisfy the allocation with the given device constraints. This restores fall back behaviour to what it was before the oversized request change caused multiple regressions. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
cdf57cab27
commit
0609697eab
1 changed files with 23 additions and 22 deletions
|
@ -98,7 +98,7 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
||||||
* @size: size of requested memory area
|
* @size: size of requested memory area
|
||||||
* @dma_handle: This will be filled with the correct dma handle
|
* @dma_handle: This will be filled with the correct dma handle
|
||||||
* @ret: This pointer will be filled with the virtual address
|
* @ret: This pointer will be filled with the virtual address
|
||||||
* to allocated area.
|
* to allocated area.
|
||||||
*
|
*
|
||||||
* This function should be only called from per-arch dma_alloc_coherent()
|
* This function should be only called from per-arch dma_alloc_coherent()
|
||||||
* to support allocation from per-device coherent memory pools.
|
* to support allocation from per-device coherent memory pools.
|
||||||
|
@ -118,31 +118,32 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
||||||
mem = dev->dma_mem;
|
mem = dev->dma_mem;
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
*ret = NULL;
|
||||||
|
|
||||||
if (unlikely(size > (mem->size << PAGE_SHIFT)))
|
if (unlikely(size > (mem->size << PAGE_SHIFT)))
|
||||||
return 0;
|
goto err;
|
||||||
|
|
||||||
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
|
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
|
||||||
if (pageno >= 0) {
|
if (unlikely(pageno < 0))
|
||||||
/*
|
goto err;
|
||||||
* Memory was found in the per-device arena.
|
|
||||||
*/
|
/*
|
||||||
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
|
* Memory was found in the per-device area.
|
||||||
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
*/
|
||||||
memset(*ret, 0, size);
|
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
|
||||||
} else if (mem->flags & DMA_MEMORY_EXCLUSIVE) {
|
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
||||||
/*
|
memset(*ret, 0, size);
|
||||||
* The per-device arena is exhausted and we are not
|
|
||||||
* permitted to fall back to generic memory.
|
|
||||||
*/
|
|
||||||
*ret = NULL;
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* The per-device arena is exhausted and we are
|
|
||||||
* permitted to fall back to generic memory.
|
|
||||||
*/
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
err:
|
||||||
|
/*
|
||||||
|
* In the case where the allocation can not be satisfied from the
|
||||||
|
* per-device area, try to fall back to generic memory if the
|
||||||
|
* constraints allow it.
|
||||||
|
*/
|
||||||
|
return mem->flags & DMA_MEMORY_EXCLUSIVE;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_alloc_from_coherent);
|
EXPORT_SYMBOL(dma_alloc_from_coherent);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue