mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
async_pq: kill a stray dma_map() call and other cleanups
- update the kernel doc for async_syndrome to indicate what NULL in the source list means - whitespace fixups Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
6629542e79
commit
5676470f06
1 changed files with 8 additions and 7 deletions
|
@ -181,10 +181,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
|
||||||
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
|
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
|
||||||
* PAGE_SIZE as a temporary buffer of this size is used in the
|
* PAGE_SIZE as a temporary buffer of this size is used in the
|
||||||
* synchronous path. 'disks' always accounts for both destination
|
* synchronous path. 'disks' always accounts for both destination
|
||||||
* buffers.
|
* buffers. If any source buffers (blocks[i] where i < disks - 2) are
|
||||||
|
* set to NULL those buffers will be replaced with the raid6_zero_page
|
||||||
|
* in the synchronous path and omitted in the hardware-asynchronous
|
||||||
|
* path.
|
||||||
*
|
*
|
||||||
* 'blocks' note: if submit->scribble is NULL then the contents of
|
* 'blocks' note: if submit->scribble is NULL then the contents of
|
||||||
* 'blocks' may be overridden
|
* 'blocks' may be overwritten to perform address conversions
|
||||||
|
* (dma_map_page() or page_address()).
|
||||||
*/
|
*/
|
||||||
struct dma_async_tx_descriptor *
|
struct dma_async_tx_descriptor *
|
||||||
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
|
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
|
||||||
|
@ -283,13 +287,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
|
||||||
if (!P(blocks, disks))
|
if (!P(blocks, disks))
|
||||||
dma_flags |= DMA_PREP_PQ_DISABLE_P;
|
dma_flags |= DMA_PREP_PQ_DISABLE_P;
|
||||||
else
|
else
|
||||||
pq[0] = dma_map_page(dev, P(blocks,disks),
|
pq[0] = dma_map_page(dev, P(blocks, disks),
|
||||||
offset, len,
|
offset, len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (!Q(blocks, disks))
|
if (!Q(blocks, disks))
|
||||||
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
|
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
|
||||||
else
|
else
|
||||||
pq[1] = dma_map_page(dev, Q(blocks,disks),
|
pq[1] = dma_map_page(dev, Q(blocks, disks),
|
||||||
offset, len,
|
offset, len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
|
@ -303,9 +307,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
|
||||||
coefs[src_cnt] = raid6_gfexp[i];
|
coefs[src_cnt] = raid6_gfexp[i];
|
||||||
src_cnt++;
|
src_cnt++;
|
||||||
}
|
}
|
||||||
pq[1] = dma_map_page(dev, Q(blocks,disks),
|
|
||||||
offset, len,
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
|
tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
|
||||||
|
|
Loading…
Reference in a new issue