mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 04:06:22 +00:00
IB/core: Use the new verbs DMA mapping functions
Convert code in core/ to use the new DMA mapping functions for kernel verbs consumers. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
f2cbb660ed
commit
1527106ff8
3 changed files with 52 additions and 54 deletions
|
@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
|
|||
|
||||
mad_agent = mad_send_wr->send_buf.mad_agent;
|
||||
sge = mad_send_wr->sg_list;
|
||||
sge[0].addr = dma_map_single(mad_agent->device->dma_device,
|
||||
mad_send_wr->send_buf.mad,
|
||||
sge[0].length,
|
||||
DMA_TO_DEVICE);
|
||||
pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
|
||||
sge[0].addr = ib_dma_map_single(mad_agent->device,
|
||||
mad_send_wr->send_buf.mad,
|
||||
sge[0].length,
|
||||
DMA_TO_DEVICE);
|
||||
mad_send_wr->header_mapping = sge[0].addr;
|
||||
|
||||
sge[1].addr = dma_map_single(mad_agent->device->dma_device,
|
||||
ib_get_payload(mad_send_wr),
|
||||
sge[1].length,
|
||||
DMA_TO_DEVICE);
|
||||
pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
|
||||
sge[1].addr = ib_dma_map_single(mad_agent->device,
|
||||
ib_get_payload(mad_send_wr),
|
||||
sge[1].length,
|
||||
DMA_TO_DEVICE);
|
||||
mad_send_wr->payload_mapping = sge[1].addr;
|
||||
|
||||
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
|
||||
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
|
||||
|
@ -1026,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
|
|||
}
|
||||
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
|
||||
if (ret) {
|
||||
dma_unmap_single(mad_agent->device->dma_device,
|
||||
pci_unmap_addr(mad_send_wr, header_mapping),
|
||||
sge[0].length, DMA_TO_DEVICE);
|
||||
dma_unmap_single(mad_agent->device->dma_device,
|
||||
pci_unmap_addr(mad_send_wr, payload_mapping),
|
||||
sge[1].length, DMA_TO_DEVICE);
|
||||
ib_dma_unmap_single(mad_agent->device,
|
||||
mad_send_wr->header_mapping,
|
||||
sge[0].length, DMA_TO_DEVICE);
|
||||
ib_dma_unmap_single(mad_agent->device,
|
||||
mad_send_wr->payload_mapping,
|
||||
sge[1].length, DMA_TO_DEVICE);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -1850,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|||
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
|
||||
mad_list);
|
||||
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
|
||||
dma_unmap_single(port_priv->device->dma_device,
|
||||
pci_unmap_addr(&recv->header, mapping),
|
||||
sizeof(struct ib_mad_private) -
|
||||
sizeof(struct ib_mad_private_header),
|
||||
DMA_FROM_DEVICE);
|
||||
ib_dma_unmap_single(port_priv->device,
|
||||
recv->header.mapping,
|
||||
sizeof(struct ib_mad_private) -
|
||||
sizeof(struct ib_mad_private_header),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* Setup MAD receive work completion from "normal" work completion */
|
||||
recv->header.wc = *wc;
|
||||
|
@ -2080,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
|
|||
qp_info = send_queue->qp_info;
|
||||
|
||||
retry:
|
||||
dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
|
||||
pci_unmap_addr(mad_send_wr, header_mapping),
|
||||
mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
|
||||
dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
|
||||
pci_unmap_addr(mad_send_wr, payload_mapping),
|
||||
mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
|
||||
ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
|
||||
mad_send_wr->header_mapping,
|
||||
mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
|
||||
ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
|
||||
mad_send_wr->payload_mapping,
|
||||
mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
|
||||
queued_send_wr = NULL;
|
||||
spin_lock_irqsave(&send_queue->lock, flags);
|
||||
list_del(&mad_list->list);
|
||||
|
@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
|||
break;
|
||||
}
|
||||
}
|
||||
sg_list.addr = dma_map_single(qp_info->port_priv->
|
||||
device->dma_device,
|
||||
&mad_priv->grh,
|
||||
sizeof *mad_priv -
|
||||
sizeof mad_priv->header,
|
||||
DMA_FROM_DEVICE);
|
||||
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
|
||||
sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
|
||||
&mad_priv->grh,
|
||||
sizeof *mad_priv -
|
||||
sizeof mad_priv->header,
|
||||
DMA_FROM_DEVICE);
|
||||
mad_priv->header.mapping = sg_list.addr;
|
||||
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
|
||||
mad_priv->header.mad_list.mad_queue = recv_queue;
|
||||
|
||||
|
@ -2549,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
|||
list_del(&mad_priv->header.mad_list.list);
|
||||
recv_queue->count--;
|
||||
spin_unlock_irqrestore(&recv_queue->lock, flags);
|
||||
dma_unmap_single(qp_info->port_priv->device->dma_device,
|
||||
pci_unmap_addr(&mad_priv->header,
|
||||
mapping),
|
||||
sizeof *mad_priv -
|
||||
sizeof mad_priv->header,
|
||||
DMA_FROM_DEVICE);
|
||||
ib_dma_unmap_single(qp_info->port_priv->device,
|
||||
mad_priv->header.mapping,
|
||||
sizeof *mad_priv -
|
||||
sizeof mad_priv->header,
|
||||
DMA_FROM_DEVICE);
|
||||
kmem_cache_free(ib_mad_cache, mad_priv);
|
||||
printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
|
||||
break;
|
||||
|
@ -2586,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
|
|||
/* Remove from posted receive MAD list */
|
||||
list_del(&mad_list->list);
|
||||
|
||||
dma_unmap_single(qp_info->port_priv->device->dma_device,
|
||||
pci_unmap_addr(&recv->header, mapping),
|
||||
sizeof(struct ib_mad_private) -
|
||||
sizeof(struct ib_mad_private_header),
|
||||
DMA_FROM_DEVICE);
|
||||
ib_dma_unmap_single(qp_info->port_priv->device,
|
||||
recv->header.mapping,
|
||||
sizeof(struct ib_mad_private) -
|
||||
sizeof(struct ib_mad_private_header),
|
||||
DMA_FROM_DEVICE);
|
||||
kmem_cache_free(ib_mad_cache, recv);
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ struct ib_mad_private_header {
|
|||
struct ib_mad_list_head mad_list;
|
||||
struct ib_mad_recv_wc recv_wc;
|
||||
struct ib_wc wc;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping)
|
||||
u64 mapping;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct ib_mad_private {
|
||||
|
@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
|
|||
struct list_head agent_list;
|
||||
struct ib_mad_agent_private *mad_agent_priv;
|
||||
struct ib_mad_send_buf send_buf;
|
||||
DECLARE_PCI_UNMAP_ADDR(header_mapping)
|
||||
DECLARE_PCI_UNMAP_ADDR(payload_mapping)
|
||||
u64 header_mapping;
|
||||
u64 payload_mapping;
|
||||
struct ib_send_wr send_wr;
|
||||
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
|
||||
__be64 tid;
|
||||
|
|
|
@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
|
|||
int i;
|
||||
|
||||
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
|
||||
dma_unmap_sg(dev->dma_device, chunk->page_list,
|
||||
chunk->nents, DMA_BIDIRECTIONAL);
|
||||
ib_dma_unmap_sg(dev, chunk->page_list,
|
||||
chunk->nents, DMA_BIDIRECTIONAL);
|
||||
for (i = 0; i < chunk->nents; ++i) {
|
||||
if (umem->writable && dirty)
|
||||
set_page_dirty_lock(chunk->page_list[i].page);
|
||||
|
@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
|
|||
chunk->page_list[i].length = PAGE_SIZE;
|
||||
}
|
||||
|
||||
chunk->nmap = dma_map_sg(dev->dma_device,
|
||||
&chunk->page_list[0],
|
||||
chunk->nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
chunk->nmap = ib_dma_map_sg(dev,
|
||||
&chunk->page_list[0],
|
||||
chunk->nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (chunk->nmap <= 0) {
|
||||
for (i = 0; i < chunk->nents; ++i)
|
||||
put_page(chunk->page_list[i].page);
|
||||
|
|
Loading…
Reference in a new issue