mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
bio: fix bio_copy_kern() handling of bio->bv_len
The commit 68154e90c9
introduced
bio_copy_kern() to add bounce support to blk_rq_map_kern.
bio_copy_kern() uses bio->bv_len to copy data for READ commands after
the completion but it doesn't work with a request that partially
completed. SCSI always completes a PC request as a whole but seems
some don't.
This patch fixes bio_copy_kern to handle the above case. As
bio_copy_user does, bio_copy_kern uses struct bio_map_data to store
struct bio_vec.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Reported-by: Nix <nix@esperi.org.uk>
Tested-by: Nix <nix@esperi.org.uk>
Cc: stable@kernel.org
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
48fd4f93a0
commit
76029ff37f
1 changed files with 28 additions and 10 deletions
38
fs/bio.c
38
fs/bio.c
|
@ -469,20 +469,21 @@ static void bio_free_map_data(struct bio_map_data *bmd)
|
|||
kfree(bmd);
|
||||
}
|
||||
|
||||
static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count)
|
||||
static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
|
||||
struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
|
||||
|
||||
if (!bmd)
|
||||
return NULL;
|
||||
|
||||
bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
|
||||
bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
|
||||
if (!bmd->iovecs) {
|
||||
kfree(bmd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, GFP_KERNEL);
|
||||
bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
|
||||
if (bmd->sgvecs)
|
||||
return bmd;
|
||||
|
||||
|
@ -596,7 +597,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
|
|||
len += iov[i].iov_len;
|
||||
}
|
||||
|
||||
bmd = bio_alloc_map_data(nr_pages, iov_count);
|
||||
bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL);
|
||||
if (!bmd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -942,19 +943,22 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
|
|||
{
|
||||
struct bio_vec *bvec;
|
||||
const int read = bio_data_dir(bio) == READ;
|
||||
char *p = bio->bi_private;
|
||||
struct bio_map_data *bmd = bio->bi_private;
|
||||
int i;
|
||||
char *p = bmd->sgvecs[0].iov_base;
|
||||
|
||||
__bio_for_each_segment(bvec, bio, i, 0) {
|
||||
char *addr = page_address(bvec->bv_page);
|
||||
int len = bmd->iovecs[i].bv_len;
|
||||
|
||||
if (read && !err)
|
||||
memcpy(p, addr, bvec->bv_len);
|
||||
memcpy(p, addr, len);
|
||||
|
||||
__free_page(bvec->bv_page);
|
||||
p += bvec->bv_len;
|
||||
p += len;
|
||||
}
|
||||
|
||||
bio_free_map_data(bmd);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
|
@ -978,11 +982,21 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
|
|||
const int nr_pages = end - start;
|
||||
struct bio *bio;
|
||||
struct bio_vec *bvec;
|
||||
struct bio_map_data *bmd;
|
||||
int i, ret;
|
||||
struct sg_iovec iov;
|
||||
|
||||
iov.iov_base = data;
|
||||
iov.iov_len = len;
|
||||
|
||||
bmd = bio_alloc_map_data(nr_pages, 1, gfp_mask);
|
||||
if (!bmd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = -ENOMEM;
|
||||
bio = bio_alloc(gfp_mask, nr_pages);
|
||||
if (!bio)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
goto out_bmd;
|
||||
|
||||
while (len) {
|
||||
struct page *page;
|
||||
|
@ -1016,14 +1030,18 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
|
|||
}
|
||||
}
|
||||
|
||||
bio->bi_private = data;
|
||||
bio->bi_private = bmd;
|
||||
bio->bi_end_io = bio_copy_kern_endio;
|
||||
|
||||
bio_set_map_data(bmd, bio, &iov, 1);
|
||||
return bio;
|
||||
cleanup:
|
||||
bio_for_each_segment(bvec, bio, i)
|
||||
__free_page(bvec->bv_page);
|
||||
|
||||
bio_put(bio);
|
||||
out_bmd:
|
||||
bio_free_map_data(bmd);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue