mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
bio: add support for inlining a number of bio_vecs inside the bio
When we go and allocate a bio for IO, we actually do two allocations. One for the bio itself, and one for the bi_io_vec that holds the actual pages we are interested in. This feature inlines a definable amount of io vecs inside the bio itself, so we eliminate the bio_vec array allocation for IO's up to a certain size. It defaults to 4 vecs, which is typically 16k of IO. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
bb799ca020
commit
392ddc3298
2 changed files with 34 additions and 5 deletions
27
fs/bio.c
27
fs/bio.c
|
@ -31,6 +31,12 @@
|
|||
|
||||
DEFINE_TRACE(block_split);
|
||||
|
||||
/*
|
||||
* Test patch to inline a certain number of bi_io_vec's inside the bio
|
||||
* itself, to shrink a bio data allocation from two mempool calls to one
|
||||
*/
|
||||
#define BIO_INLINE_VECS 4
|
||||
|
||||
static mempool_t *bio_split_pool __read_mostly;
|
||||
|
||||
/*
|
||||
|
@ -241,7 +247,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
|
|||
{
|
||||
void *p;
|
||||
|
||||
if (bio->bi_io_vec)
|
||||
if (bio_has_allocated_vec(bio))
|
||||
bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
|
||||
|
||||
if (bio_integrity(bio))
|
||||
|
@ -267,7 +273,8 @@ static void bio_fs_destructor(struct bio *bio)
|
|||
|
||||
static void bio_kmalloc_destructor(struct bio *bio)
|
||||
{
|
||||
kfree(bio->bi_io_vec);
|
||||
if (bio_has_allocated_vec(bio))
|
||||
kfree(bio->bi_io_vec);
|
||||
kfree(bio);
|
||||
}
|
||||
|
||||
|
@ -314,7 +321,16 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
|||
if (likely(nr_iovecs)) {
|
||||
unsigned long uninitialized_var(idx);
|
||||
|
||||
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
|
||||
if (nr_iovecs <= BIO_INLINE_VECS) {
|
||||
idx = 0;
|
||||
bvl = bio->bi_inline_vecs;
|
||||
nr_iovecs = BIO_INLINE_VECS;
|
||||
memset(bvl, 0, BIO_INLINE_VECS * sizeof(*bvl));
|
||||
} else {
|
||||
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx,
|
||||
bs);
|
||||
nr_iovecs = bvec_nr_vecs(idx);
|
||||
}
|
||||
if (unlikely(!bvl)) {
|
||||
if (bs)
|
||||
mempool_free(bio, bs->bio_pool);
|
||||
|
@ -324,7 +340,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
|||
goto out;
|
||||
}
|
||||
bio->bi_flags |= idx << BIO_POOL_OFFSET;
|
||||
bio->bi_max_vecs = bvec_nr_vecs(idx);
|
||||
bio->bi_max_vecs = nr_iovecs;
|
||||
}
|
||||
bio->bi_io_vec = bvl;
|
||||
}
|
||||
|
@ -1525,6 +1541,7 @@ void bioset_free(struct bio_set *bs)
|
|||
*/
|
||||
struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
|
||||
{
|
||||
unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
|
||||
struct bio_set *bs;
|
||||
|
||||
bs = kzalloc(sizeof(*bs), GFP_KERNEL);
|
||||
|
@ -1533,7 +1550,7 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
|
|||
|
||||
bs->front_pad = front_pad;
|
||||
|
||||
bs->bio_slab = bio_find_or_create_slab(front_pad);
|
||||
bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
|
||||
if (!bs->bio_slab) {
|
||||
kfree(bs);
|
||||
return NULL;
|
||||
|
|
|
@ -102,6 +102,13 @@ struct bio {
|
|||
#endif
|
||||
|
||||
bio_destructor_t *bi_destructor; /* destructor */
|
||||
|
||||
/*
|
||||
* We can inline a number of vecs at the end of the bio, to avoid
|
||||
* double allocations for a small number of bio_vecs. This member
|
||||
* MUST obviously be kept at the very end of the bio.
|
||||
*/
|
||||
struct bio_vec bi_inline_vecs[0];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -213,6 +220,11 @@ static inline void *bio_data(struct bio *bio)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline int bio_has_allocated_vec(struct bio *bio)
|
||||
{
|
||||
return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
|
||||
}
|
||||
|
||||
/*
|
||||
* will die
|
||||
*/
|
||||
|
|
Loading…
Reference in a new issue