IB/iser: Rewrite SG handling for RDMA logic

After dma-mapping an SG list provided by the SCSI midlayer, iser has
to make sure the mapped SG is "aligned for RDMA" in the sense that its
possible to produce one mapping in the HCA IOMMU which represents the
whole SG. Next, the mapped SG is formatted for registration with the HCA.

This patch re-writes the logic that does the above, to make it clearer
and simpler. It also fixes a bug in the being aligned for RDMA checks,
where a "start" check wasn't done but rather only "end" check.

Signed-off-by: Alexander Nezhinsky <alexandern@voltaire.com>
Signed-off-by: Or Gerlitz <ogerlitz@voltaire.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
Or Gerlitz 2009-11-12 11:32:27 -08:00 committed by Roland Dreier
parent 91d3f9bacd
commit c1ccaf2478

View file

@ -209,6 +209,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
mem_copy->copy_buf = NULL; mem_copy->copy_buf = NULL;
} }
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
/** /**
* iser_sg_to_page_vec - Translates scatterlist entries to physical addresses * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
* and returns the length of resulting physical address array (may be less than * and returns the length of resulting physical address array (may be less than
@ -221,62 +223,52 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
* where --few fragments of the same page-- are present in the SG as * where --few fragments of the same page-- are present in the SG as
* consecutive elements. Also, it handles one entry SG. * consecutive elements. Also, it handles one entry SG.
*/ */
static int iser_sg_to_page_vec(struct iser_data_buf *data, static int iser_sg_to_page_vec(struct iser_data_buf *data,
struct iser_page_vec *page_vec, struct iser_page_vec *page_vec,
struct ib_device *ibdev) struct ib_device *ibdev)
{ {
struct scatterlist *sgl = (struct scatterlist *)data->buf; struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg; u64 start_addr, end_addr, page, chunk_start = 0;
u64 first_addr, last_addr, page;
int end_aligned;
unsigned int cur_page = 0;
unsigned long total_sz = 0; unsigned long total_sz = 0;
int i; unsigned int dma_len;
int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
/* compute the offset of first element */ /* compute the offset of first element */
page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
new_chunk = 1;
cur_page = 0;
for_each_sg(sgl, sg, data->dma_nents, i) { for_each_sg(sgl, sg, data->dma_nents, i) {
unsigned int dma_len = ib_sg_dma_len(ibdev, sg); start_addr = ib_sg_dma_address(ibdev, sg);
if (new_chunk)
chunk_start = start_addr;
dma_len = ib_sg_dma_len(ibdev, sg);
end_addr = start_addr + dma_len;
total_sz += dma_len; total_sz += dma_len;
first_addr = ib_sg_dma_address(ibdev, sg); /* collect page fragments until aligned or end of SG list */
last_addr = first_addr + dma_len; if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
new_chunk = 0;
end_aligned = !(last_addr & ~MASK_4K); continue;
/* continue to collect page fragments till aligned or SG ends */
while (!end_aligned && (i + 1 < data->dma_nents)) {
sg = sg_next(sg);
i++;
dma_len = ib_sg_dma_len(ibdev, sg);
total_sz += dma_len;
last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
end_aligned = !(last_addr & ~MASK_4K);
} }
new_chunk = 1;
/* handle the 1st page in the 1st DMA element */ /* address of the first page in the contiguous chunk;
if (cur_page == 0) { masking relevant for the very first SG entry,
page = first_addr & MASK_4K; which might be unaligned */
page_vec->pages[cur_page] = page; page = chunk_start & MASK_4K;
cur_page++; do {
page_vec->pages[cur_page++] = page;
page += SIZE_4K; page += SIZE_4K;
} else } while (page < end_addr);
page = first_addr;
for (; page < last_addr; page += SIZE_4K) {
page_vec->pages[cur_page] = page;
cur_page++;
}
} }
page_vec->data_size = total_sz; page_vec->data_size = total_sz;
iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page); iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
return cur_page; return cur_page;
} }
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
/** /**
* iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@ -284,42 +276,40 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
* the number of entries which are aligned correctly. Supports the case where * the number of entries which are aligned correctly. Supports the case where
* consecutive SG elements are actually fragments of the same physcial page. * consecutive SG elements are actually fragments of the same physcial page.
*/ */
static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, static int iser_data_buf_aligned_len(struct iser_data_buf *data,
struct ib_device *ibdev) struct ib_device *ibdev)
{ {
struct scatterlist *sgl, *sg; struct scatterlist *sgl, *sg, *next_sg = NULL;
u64 end_addr, next_addr; u64 start_addr, end_addr;
int i, cnt; int i, ret_len, start_check = 0;
unsigned int ret_len = 0;
if (data->dma_nents == 1)
return 1;
sgl = (struct scatterlist *)data->buf; sgl = (struct scatterlist *)data->buf;
start_addr = ib_sg_dma_address(ibdev, sgl);
cnt = 0;
for_each_sg(sgl, sg, data->dma_nents, i) { for_each_sg(sgl, sg, data->dma_nents, i) {
/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " if (start_check && !IS_4K_ALIGNED(start_addr))
"offset: %ld sz: %ld\n", i, break;
(unsigned long)sg_phys(sg),
(unsigned long)sg->offset, next_sg = sg_next(sg);
(unsigned long)sg->length); */ if (!next_sg)
end_addr = ib_sg_dma_address(ibdev, sg) + break;
ib_sg_dma_len(ibdev, sg);
/* iser_dbg("Checking sg iobuf end address " end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
"0x%08lX\n", end_addr); */ start_addr = ib_sg_dma_address(ibdev, next_sg);
if (i + 1 < data->dma_nents) {
next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); if (end_addr == start_addr) {
/* are i, i+1 fragments of the same page? */ start_check = 0;
if (end_addr == next_addr) { continue;
cnt++; } else
continue; start_check = 1;
} else if (!IS_4K_ALIGNED(end_addr)) {
ret_len = cnt + 1; if (!IS_4K_ALIGNED(end_addr))
break; break;
}
}
cnt++;
} }
if (i == data->dma_nents) ret_len = (next_sg) ? i : i+1;
ret_len = cnt; /* loop ended */
iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
ret_len, data->dma_nents, data); ret_len, data->dma_nents, data);
return ret_len; return ret_len;