IB/srp: Use new verbs IB DMA mapping functions

Convert SRP to use the new verbs DMA mapping functions for kernel
verbs consumers.

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
Ralph Campbell 2006-12-12 14:30:55 -08:00 committed by Roland Dreier
parent 37ccf9df97
commit 85507bcce0
2 changed files with 49 additions and 34 deletions

View file

@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
if (!iu->buf) if (!iu->buf)
goto out_free_iu; goto out_free_iu;
iu->dma = dma_map_single(host->dev->dev->dma_device, iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction);
iu->buf, size, direction); if (ib_dma_mapping_error(host->dev->dev, iu->dma))
if (dma_mapping_error(iu->dma))
goto out_free_buf; goto out_free_buf;
iu->size = size; iu->size = size;
@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
if (!iu) if (!iu)
return; return;
dma_unmap_single(host->dev->dev->dma_device, ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction);
iu->dma, iu->size, iu->direction);
kfree(iu->buf); kfree(iu->buf);
kfree(iu); kfree(iu);
} }
@ -482,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
scat = &req->fake_sg; scat = &req->fake_sg;
} }
dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
scmnd->sc_data_direction); scmnd->sc_data_direction);
} }
static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
@ -595,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
int i, j; int i, j;
int ret; int ret;
struct srp_device *dev = target->srp_host->dev; struct srp_device *dev = target->srp_host->dev;
struct ib_device *ibdev = dev->dev;
if (!dev->fmr_pool) if (!dev->fmr_pool)
return -ENODEV; return -ENODEV;
if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) &&
mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
return -EINVAL; return -EINVAL;
len = page_cnt = 0; len = page_cnt = 0;
for (i = 0; i < sg_cnt; ++i) { for (i = 0; i < sg_cnt; ++i) {
if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
if (i > 0) if (i > 0)
return -EINVAL; return -EINVAL;
else else
++page_cnt; ++page_cnt;
} }
if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
~dev->fmr_page_mask) { ~dev->fmr_page_mask) {
if (i < sg_cnt - 1) if (i < sg_cnt - 1)
return -EINVAL; return -EINVAL;
@ -619,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
++page_cnt; ++page_cnt;
} }
len += sg_dma_len(&scat[i]); len += dma_len;
} }
page_cnt += len >> dev->fmr_page_shift; page_cnt += len >> dev->fmr_page_shift;
@ -631,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
return -ENOMEM; return -ENOMEM;
page_cnt = 0; page_cnt = 0;
for (i = 0; i < sg_cnt; ++i) for (i = 0; i < sg_cnt; ++i) {
for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
for (j = 0; j < dma_len; j += dev->fmr_page_size)
dma_pages[page_cnt++] = dma_pages[page_cnt++] =
(sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; (ib_sg_dma_address(ibdev, &scat[i]) &
dev->fmr_page_mask) + j;
}
req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
dma_pages, page_cnt, io_addr); dma_pages, page_cnt, io_addr);
@ -644,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
goto out; goto out;
} }
buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
~dev->fmr_page_mask);
buf->key = cpu_to_be32(req->fmr->fmr->rkey); buf->key = cpu_to_be32(req->fmr->fmr->rkey);
buf->len = cpu_to_be32(len); buf->len = cpu_to_be32(len);
@ -663,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_cmd *cmd = req->cmd->buf; struct srp_cmd *cmd = req->cmd->buf;
int len, nents, count; int len, nents, count;
u8 fmt = SRP_DATA_DESC_DIRECT; u8 fmt = SRP_DATA_DESC_DIRECT;
struct srp_device *dev;
struct ib_device *ibdev;
if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
return sizeof (struct srp_cmd); return sizeof (struct srp_cmd);
@ -687,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
} }
count = dma_map_sg(target->srp_host->dev->dev->dma_device, dev = target->srp_host->dev;
scat, nents, scmnd->sc_data_direction); ibdev = dev->dev;
count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
fmt = SRP_DATA_DESC_DIRECT; fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
@ -702,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
*/ */
struct srp_direct_buf *buf = (void *) cmd->add_data; struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(sg_dma_address(scat)); buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); buf->key = cpu_to_be32(dev->mr->rkey);
buf->len = cpu_to_be32(sg_dma_len(scat)); buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
} else if (srp_map_fmr(target, scat, count, req, } else if (srp_map_fmr(target, scat, count, req,
(void *) cmd->add_data)) { (void *) cmd->add_data)) {
/* /*
@ -722,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
count * sizeof (struct srp_direct_buf); count * sizeof (struct srp_direct_buf);
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
buf->desc_list[i].va = buf->desc_list[i].va =
cpu_to_be64(sg_dma_address(&scat[i])); cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
buf->desc_list[i].key = buf->desc_list[i].key =
cpu_to_be32(target->srp_host->dev->mr->rkey); cpu_to_be32(dev->mr->rkey);
buf->desc_list[i].len = buf->desc_list[i].len = cpu_to_be32(dma_len);
cpu_to_be32(sg_dma_len(&scat[i])); datalen += dma_len;
datalen += sg_dma_len(&scat[i]);
} }
if (scmnd->sc_data_direction == DMA_TO_DEVICE) if (scmnd->sc_data_direction == DMA_TO_DEVICE)
@ -808,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{ {
struct ib_device *dev;
struct srp_iu *iu; struct srp_iu *iu;
u8 opcode; u8 opcode;
iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, dev = target->srp_host->dev->dev;
target->max_ti_iu_len, DMA_FROM_DEVICE); ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
opcode = *(u8 *) iu->buf; opcode = *(u8 *) iu->buf;
@ -850,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
break; break;
} }
dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
target->max_ti_iu_len, DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
static void srp_completion(struct ib_cq *cq, void *target_ptr) static void srp_completion(struct ib_cq *cq, void *target_ptr)
@ -969,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
struct srp_request *req; struct srp_request *req;
struct srp_iu *iu; struct srp_iu *iu;
struct srp_cmd *cmd; struct srp_cmd *cmd;
struct ib_device *dev;
int len; int len;
if (target->state == SRP_TARGET_CONNECTING) if (target->state == SRP_TARGET_CONNECTING)
@ -985,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
if (!iu) if (!iu)
goto err; goto err;
dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, dev = target->srp_host->dev->dev;
srp_max_iu_len, DMA_TO_DEVICE); ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
req = list_entry(target->free_reqs.next, struct srp_request, list); req = list_entry(target->free_reqs.next, struct srp_request, list);
@ -1018,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
goto err_unmap; goto err_unmap;
} }
dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
srp_max_iu_len, DMA_TO_DEVICE); DMA_TO_DEVICE);
if (__srp_post_send(target, iu, len)) { if (__srp_post_send(target, iu, len)) {
printk(KERN_ERR PFX "Send failed\n"); printk(KERN_ERR PFX "Send failed\n");

View file

@ -161,7 +161,7 @@ struct srp_target_port {
}; };
struct srp_iu { struct srp_iu {
dma_addr_t dma; u64 dma;
void *buf; void *buf;
size_t size; size_t size;
enum dma_data_direction direction; enum dma_data_direction direction;