mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
block: Skip I/O merges when disabled
The block I/O + elevator + I/O scheduler code spend a lot of time trying to merge I/Os -- rightfully so under "normal" circumstances. However, if one were to know that the incoming I/O stream was /very/ random in nature, the cycles are wasted. This patch adds a per-request_queue tunable that (when set) disables merge attempts (beyond the simple one-hit cache check), thus freeing up a non-trivial amount of CPU cycles. Signed-off-by: Alan D. Brunelle <alan.brunelle@hp.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
d7e3c3249e
commit
ac9fafa124
3 changed files with 31 additions and 0 deletions
|
@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
|
|||
return queue_var_show(max_hw_sectors_kb, (page));
|
||||
}
|
||||
|
||||
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(blk_queue_nomerges(q), page);
|
||||
}
|
||||
|
||||
static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
unsigned long nm;
|
||||
ssize_t ret = queue_var_store(&nm, page, count);
|
||||
|
||||
if (nm)
|
||||
set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
|
||||
else
|
||||
clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static struct queue_sysfs_entry queue_requests_entry = {
|
||||
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
|
||||
|
@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
|
|||
.show = queue_hw_sector_size_show,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_nomerges_entry = {
|
||||
.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_nomerges_show,
|
||||
.store = queue_nomerges_store,
|
||||
};
|
||||
|
||||
static struct attribute *default_attrs[] = {
|
||||
&queue_requests_entry.attr,
|
||||
&queue_ra_entry.attr,
|
||||
|
@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = {
|
|||
&queue_max_sectors_entry.attr,
|
||||
&queue_iosched_entry.attr,
|
||||
&queue_hw_sector_size_entry.attr,
|
||||
&queue_nomerges_entry.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -488,6 +488,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
|||
}
|
||||
}
|
||||
|
||||
if (blk_queue_nomerges(q))
|
||||
return ELEVATOR_NO_MERGE;
|
||||
|
||||
/*
|
||||
* See if our hash lookup can find a potential backmerge.
|
||||
*/
|
||||
|
|
|
@ -408,6 +408,7 @@ struct request_queue
|
|||
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
|
||||
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
|
||||
#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
|
||||
#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
|
||||
|
||||
static inline void queue_flag_set_unlocked(unsigned int flag,
|
||||
struct request_queue *q)
|
||||
|
@ -476,6 +477,7 @@ enum {
|
|||
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
|
||||
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
|
||||
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
|
||||
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
|
||||
#define blk_queue_flushing(q) ((q)->ordseq)
|
||||
|
||||
#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
|
||||
|
|
Loading…
Reference in a new issue