mirror of
https://github.com/adulau/aha.git
synced 2025-01-04 07:03:38 +00:00
[ARM] Convert DMA cache handling to take const void * args
The DMA cache handling functions take virtual addresses, but in the form of unsigned long arguments. This leads to a little confusion about what exactly they take. So, convert them to take const void * instead. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
953233dc99
commit
7ae5a761d2
4 changed files with 16 additions and 18 deletions
|
@ -321,12 +321,12 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||||
DO_STATS ( device_info->bounce_count++ );
|
DO_STATS ( device_info->bounce_count++ );
|
||||||
|
|
||||||
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
||||||
unsigned long ptr;
|
void *ptr = buf->ptr;
|
||||||
|
|
||||||
dev_dbg(dev,
|
dev_dbg(dev,
|
||||||
"%s: copy back safe %p to unsafe %p size %d\n",
|
"%s: copy back safe %p to unsafe %p size %d\n",
|
||||||
__func__, buf->safe, buf->ptr, size);
|
__func__, buf->safe, ptr, size);
|
||||||
memcpy(buf->ptr, buf->safe, size);
|
memcpy(ptr, buf->safe, size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DMA buffers must have the same cache properties
|
* DMA buffers must have the same cache properties
|
||||||
|
@ -336,7 +336,6 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||||
* bidirectional case because we know the cache
|
* bidirectional case because we know the cache
|
||||||
* lines will be coherent with the data written.
|
* lines will be coherent with the data written.
|
||||||
*/
|
*/
|
||||||
ptr = (unsigned long)buf->ptr;
|
|
||||||
dmac_clean_range(ptr, ptr + size);
|
dmac_clean_range(ptr, ptr + size);
|
||||||
outer_clean_range(__pa(ptr), __pa(ptr) + size);
|
outer_clean_range(__pa(ptr), __pa(ptr) + size);
|
||||||
}
|
}
|
||||||
|
|
|
@ -205,10 +205,10 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
|
||||||
* kernel direct-mapped region for device DMA.
|
* kernel direct-mapped region for device DMA.
|
||||||
*/
|
*/
|
||||||
{
|
{
|
||||||
unsigned long kaddr = (unsigned long)page_address(page);
|
void *ptr = page_address(page);
|
||||||
memset(page_address(page), 0, size);
|
memset(ptr, 0, size);
|
||||||
dmac_flush_range(kaddr, kaddr + size);
|
dmac_flush_range(ptr, ptr + size);
|
||||||
outer_flush_range(__pa(kaddr), __pa(kaddr) + size);
|
outer_flush_range(__pa(ptr), __pa(ptr) + size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -481,10 +481,9 @@ core_initcall(consistent_init);
|
||||||
* platforms with CONFIG_DMABOUNCE.
|
* platforms with CONFIG_DMABOUNCE.
|
||||||
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
||||||
*/
|
*/
|
||||||
void consistent_sync(void *vaddr, size_t size, int direction)
|
void consistent_sync(const void *start, size_t size, int direction)
|
||||||
{
|
{
|
||||||
unsigned long start = (unsigned long)vaddr;
|
const void *end = start + size;
|
||||||
unsigned long end = start + size;
|
|
||||||
|
|
||||||
BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end));
|
BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end));
|
||||||
|
|
||||||
|
|
|
@ -185,9 +185,9 @@ struct cpu_cache_fns {
|
||||||
void (*coherent_user_range)(unsigned long, unsigned long);
|
void (*coherent_user_range)(unsigned long, unsigned long);
|
||||||
void (*flush_kern_dcache_page)(void *);
|
void (*flush_kern_dcache_page)(void *);
|
||||||
|
|
||||||
void (*dma_inv_range)(unsigned long, unsigned long);
|
void (*dma_inv_range)(const void *, const void *);
|
||||||
void (*dma_clean_range)(unsigned long, unsigned long);
|
void (*dma_clean_range)(const void *, const void *);
|
||||||
void (*dma_flush_range)(unsigned long, unsigned long);
|
void (*dma_flush_range)(const void *, const void *);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct outer_cache_fns {
|
struct outer_cache_fns {
|
||||||
|
@ -246,9 +246,9 @@ extern void __cpuc_flush_dcache_page(void *);
|
||||||
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
|
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
|
||||||
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
|
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
|
||||||
|
|
||||||
extern void dmac_inv_range(unsigned long, unsigned long);
|
extern void dmac_inv_range(const void *, const void *);
|
||||||
extern void dmac_clean_range(unsigned long, unsigned long);
|
extern void dmac_clean_range(const void *, const void *);
|
||||||
extern void dmac_flush_range(unsigned long, unsigned long);
|
extern void dmac_flush_range(const void *, const void *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* platforms with CONFIG_DMABOUNCE.
|
* platforms with CONFIG_DMABOUNCE.
|
||||||
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
||||||
*/
|
*/
|
||||||
extern void consistent_sync(void *kaddr, size_t size, int rw);
|
extern void consistent_sync(const void *kaddr, size_t size, int rw);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return whether the given device DMA address mask can be supported
|
* Return whether the given device DMA address mask can be supported
|
||||||
|
|
Loading…
Reference in a new issue