x86, ia64: remove duplicated swiotlb code

This adds swiotlb_map_page and swiotlb_unmap_page to lib/swiotlb.c and
remove IA64 and X86's swiotlb_map_page and swiotlb_unmap_page.

This also removes unnecessary swiotlb_map_single, swiotlb_map_single_attrs,
swiotlb_unmap_single and swiotlb_unmap_single_attrs.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
FUJITA Tomonori 2009-01-05 23:59:03 +09:00 committed by Ingo Molnar
parent 160c1d8e40
commit f98eee8ea9
4 changed files with 24 additions and 76 deletions

View file

@ -16,22 +16,6 @@ EXPORT_SYMBOL(swiotlb);
/* Set this to 1 if there is a HW IOMMU in the system */ /* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly; int iommu_detected __read_mostly;
static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return swiotlb_map_single_attrs(dev, page_address(page) + offset, size,
dir, attrs);
}
static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
swiotlb_unmap_single_attrs(dev, dma_handle, size, dir, attrs);
}
struct dma_map_ops swiotlb_dma_ops = { struct dma_map_ops swiotlb_dma_ops = {
.alloc_coherent = swiotlb_alloc_coherent, .alloc_coherent = swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent, .free_coherent = swiotlb_free_coherent,

View file

@ -38,23 +38,6 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
return 0; return 0;
} }
/* these will be moved to lib/swiotlb.c later on */
static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return swiotlb_map_single(dev, page_address(page) + offset, size, dir);
}
static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
swiotlb_unmap_single(dev, dma_handle, size, dir);
}
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags) dma_addr_t *dma_handle, gfp_t flags)
{ {

View file

@ -41,20 +41,13 @@ extern void
swiotlb_free_coherent(struct device *hwdev, size_t size, swiotlb_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle); void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); unsigned long offset, size_t size,
enum dma_data_direction dir,
extern void struct dma_attrs *attrs);
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir); size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
extern dma_addr_t
swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
int dir, struct dma_attrs *attrs);
extern void
swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, struct dma_attrs *attrs);
extern int extern int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,

View file

@ -636,11 +636,14 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* Once the device is given the dma address, the device owns this memory until * Once the device is given the dma address, the device owns this memory until
* either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
*/ */
dma_addr_t dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, unsigned long offset, size_t size,
int dir, struct dma_attrs *attrs) enum dma_data_direction dir,
struct dma_attrs *attrs)
{ {
dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); phys_addr_t phys = page_to_phys(page) + offset;
void *ptr = page_address(page) + offset;
dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
void *map; void *map;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
@ -649,37 +652,30 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
* we can safely return the device addr and not worry about bounce * we can safely return the device addr and not worry about bounce
* buffering it. * buffering it.
*/ */
if (!address_needs_mapping(hwdev, dev_addr, size) && if (!address_needs_mapping(dev, dev_addr, size) &&
!range_needs_mapping(ptr, size)) !range_needs_mapping(ptr, size))
return dev_addr; return dev_addr;
/* /*
* Oh well, have to allocate and map a bounce buffer. * Oh well, have to allocate and map a bounce buffer.
*/ */
map = map_single(hwdev, virt_to_phys(ptr), size, dir); map = map_single(dev, phys, size, dir);
if (!map) { if (!map) {
swiotlb_full(hwdev, size, dir, 1); swiotlb_full(dev, size, dir, 1);
map = io_tlb_overflow_buffer; map = io_tlb_overflow_buffer;
} }
dev_addr = swiotlb_virt_to_bus(hwdev, map); dev_addr = swiotlb_virt_to_bus(dev, map);
/* /*
* Ensure that the address returned is DMA'ble * Ensure that the address returned is DMA'ble
*/ */
if (address_needs_mapping(hwdev, dev_addr, size)) if (address_needs_mapping(dev, dev_addr, size))
panic("map_single: bounce buffer is not DMA'ble"); panic("map_single: bounce buffer is not DMA'ble");
return dev_addr; return dev_addr;
} }
EXPORT_SYMBOL(swiotlb_map_single_attrs); EXPORT_SYMBOL_GPL(swiotlb_map_page);
dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
}
EXPORT_SYMBOL(swiotlb_map_single);
/* /*
* Unmap a single streaming mode DMA translation. The dma_addr and size must * Unmap a single streaming mode DMA translation. The dma_addr and size must
@ -689,9 +685,9 @@ EXPORT_SYMBOL(swiotlb_map_single);
* After this call, reads by the cpu to the buffer are guaranteed to see * After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
void void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir,
size_t size, int dir, struct dma_attrs *attrs) struct dma_attrs *attrs)
{ {
char *dma_addr = swiotlb_bus_to_virt(dev_addr); char *dma_addr = swiotlb_bus_to_virt(dev_addr);
@ -701,15 +697,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
else if (dir == DMA_FROM_DEVICE) else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size); dma_mark_clean(dma_addr, size);
} }
EXPORT_SYMBOL(swiotlb_unmap_single_attrs); EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
int dir)
{
return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
}
EXPORT_SYMBOL(swiotlb_unmap_single);
/* /*
* Make physical memory consistent for a single streaming mode DMA translation * Make physical memory consistent for a single streaming mode DMA translation