block: separate out padding from alignment

Block layer alignment was used for two different purposes - memory
alignment and padding.  This causes problems in lower layers because
drivers which only require memory alignment ends up with adjusted
rq->data_len.  Separate out padding such that padding occurs iff
driver explicitly requests it.

Tomo: restorethe code to update bio in blk_rq_map_user
      introduced by the commit 40b01b9bbd
      according to padding alignment.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Tejun Heo 2008-03-04 11:18:17 +01:00 committed by Jens Axboe
parent 7a85f8896f
commit e3790c7d42
4 changed files with 34 additions and 8 deletions

View file

@ -43,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len) void __user *ubuf, unsigned int len)
{ {
unsigned long uaddr; unsigned long uaddr;
unsigned int alignment;
struct bio *bio, *orig_bio; struct bio *bio, *orig_bio;
int reading, ret; int reading, ret;
@ -53,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers * direct dma. else, set up kernel bounce buffers
*/ */
uaddr = (unsigned long) ubuf; uaddr = (unsigned long) ubuf;
if (!(uaddr & queue_dma_alignment(q)) && alignment = queue_dma_alignment(q) | q->dma_pad_mask;
!(len & queue_dma_alignment(q))) if (!(uaddr & alignment) && !(len & alignment))
bio = bio_map_user(q, NULL, uaddr, len, reading); bio = bio_map_user(q, NULL, uaddr, len, reading);
else else
bio = bio_copy_user(q, uaddr, len, reading); bio = bio_copy_user(q, uaddr, len, reading);
@ -141,15 +142,20 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
/* /*
* __blk_rq_map_user() copies the buffers if starting address * __blk_rq_map_user() copies the buffers if starting address
* or length isn't aligned. As the copied buffer is always * or length isn't aligned to dma_pad_mask. As the copied
* page aligned, we know that there's enough room for padding. * buffer is always page aligned, we know that there's enough
* Extend the last bio and update rq->data_len accordingly. * room for padding. Extend the last bio and update
* rq->data_len accordingly.
* *
* On unmap, bio_uncopy_user() will use unmodified * On unmap, bio_uncopy_user() will use unmodified
* bio_map_data pointed to by bio->bi_private. * bio_map_data pointed to by bio->bi_private.
*/ */
if (len & queue_dma_alignment(q)) { if (len & q->dma_pad_mask) {
unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
struct bio *bio = rq->biotail;
bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
bio->bi_size += pad_len;
rq->extra_len += pad_len; rq->extra_len += pad_len;
} }

View file

@ -292,6 +292,23 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
} }
EXPORT_SYMBOL(blk_queue_stack_limits); EXPORT_SYMBOL(blk_queue_stack_limits);
/**
* blk_queue_dma_pad - set pad mask
* @q: the request queue for the device
* @mask: pad mask
*
* Set pad mask. Direct IO requests are padded to the mask specified.
*
* Appending pad buffer to a request modifies ->data_len such that it
* includes the pad buffer. The original requested data length can be
* obtained using blk_rq_raw_data_len().
**/
void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
{
q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_dma_pad);
/** /**
* blk_queue_dma_drain - Set up a drain buffer for excess dma. * blk_queue_dma_drain - Set up a drain buffer for excess dma.
* @q: the request queue for the device * @q: the request queue for the device

View file

@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
struct request_queue *q = sdev->request_queue; struct request_queue *q = sdev->request_queue;
void *buf; void *buf;
/* set the min alignment */ /* set the min alignment and padding */
blk_queue_update_dma_alignment(sdev->request_queue, blk_queue_update_dma_alignment(sdev->request_queue,
ATA_DMA_PAD_SZ - 1); ATA_DMA_PAD_SZ - 1);
blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1);
/* configure draining */ /* configure draining */
buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);

View file

@ -362,6 +362,7 @@ struct request_queue
unsigned long seg_boundary_mask; unsigned long seg_boundary_mask;
void *dma_drain_buffer; void *dma_drain_buffer;
unsigned int dma_drain_size; unsigned int dma_drain_size;
unsigned int dma_pad_mask;
unsigned int dma_alignment; unsigned int dma_alignment;
struct blk_queue_tag *queue_tags; struct blk_queue_tag *queue_tags;
@ -701,6 +702,7 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q, extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed, dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size); void *buf, unsigned int size);