From 573412b29586e58477adb70e022193a337763319 Mon Sep 17 00:00:00 2001 From: Corrado Zoccolo Date: Sun, 6 Dec 2009 11:48:52 +0100 Subject: [PATCH 01/10] cfq-iosched: reduce write depth only if sync was delayed The introduction of ramp-up formula for async queue depths has slowed down dirty page reclaim, by reducing async write performance. This patch makes sure the formula kicks in only when sync request was recently delayed. Signed-off-by: Corrado Zoccolo Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index cfb0b2f5f63..5009af490a0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -283,7 +283,7 @@ struct cfq_data { */ struct cfq_queue oom_cfqq; - unsigned long last_end_sync_rq; + unsigned long last_delayed_sync; /* List of cfq groups being managed on this device*/ struct hlist_head cfqg_list; @@ -2264,7 +2264,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) * based on the last sync IO we serviced */ if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { - unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; + unsigned long last_sync = jiffies - cfqd->last_delayed_sync; unsigned int depth; depth = last_sync / cfqd->cfq_slice[1]; @@ -3273,7 +3273,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) if (sync) { RQ_CIC(rq)->last_end_request = now; - cfqd->last_end_sync_rq = now; + if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) + cfqd->last_delayed_sync = now; } /* @@ -3711,7 +3712,7 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_latency = 1; cfqd->cfq_group_isolation = 0; cfqd->hw_tag = -1; - cfqd->last_end_sync_rq = jiffies; + cfqd->last_delayed_sync = jiffies - HZ; INIT_RCU_HEAD(&cfqd->rcu); return cfqd; } From 8b43aebdaa4fa3348dafd6f2f5f526bd3e8b84ac Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Sun, 6 Dec 2009 23:50:24 +0100 Subject: [PATCH 02/10] drbd: Following the hmac change to SHASH (see linux commit 8bd1209cfff) Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg Signed-off-by: Jens Axboe --- drivers/block/drbd/drbd_nl.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 436a090b532..4e0726aa53b 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1271,8 +1271,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, goto fail; } - if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) - != CRYPTO_ALG_TYPE_HASH) { + if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { retcode = ERR_AUTH_ALG_ND; goto fail; } From a3b8d92d25212c5b6534ae9b347ed2858de78336 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 7 Dec 2009 22:10:46 +0100 Subject: [PATCH 03/10] block,xd: Delay allocation of DMA buffers until device is known Loading the XD module triggers a warning like WARNING: at mm/page_alloc.c:1805 __alloc_pages_nodemask+0x127/0x48f() Hardware name: System Product Name Modules linked in: Pid: 1, comm: swapper Not tainted 2.6.32-rc8-git5 #1 Call Trace: [] warn_slowpath_common+0x65/0x95 [] warn_slowpath_null+0x12/0x15 [] __alloc_pages_nodemask+0x127/0x48f [] ? get_slab+0x8/0x50 [] alloc_page_interleave+0x2e/0x6e [] alloc_pages_current+0x57/0x99 [] ? xd_init+0x0/0x482 [] __get_free_pages+0xd/0x1e [] xd_init+0x4a/0x482 [] ? loop_init+0x104/0x16a [] ? loop_probe+0x0/0xaf [] ? xd_init+0x0/0x482 [] do_one_initcall+0x51/0x13f [] kernel_init+0x10b/0x15f [] ? kernel_init+0x0/0x15f [] kernel_thread_helper+0x7/0x10 ---[ end trace 686db6333ade6e7a ]--- xd: Out of memory. The warning is because the alloc_pages is called with an order >= MAX_ORDER. The simplistic reason is that get_order(0) returns garbage values when given 0 as a size. The more complex reason is that the XD driver initialisation is broken. It's not clear why this ever worked. XD allocates a buffer for DMA based on the value of xd_maxsectors. This value is determined by the exact type of controller in use but the value is determined *after* an attempt has been made to allocate the buffer. i.e. the requested size of the DMA buffer will always be 0. This patch alters how XD is initialised slightly by allocating the buffer when and if a device has actually been detected. The error paths are updated to suit the new logic. Signed-off-by: Mel Gorman Signed-off-by: Jens Axboe --- drivers/block/xd.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 0877d3628fd..d1fd032e751 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -169,13 +169,6 @@ static int __init xd_init(void) init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog; - if (!xd_dma_buffer) - xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); - if (!xd_dma_buffer) { - printk(KERN_ERR "xd: Out of memory.\n"); - return -ENOMEM; - } - err = -EBUSY; if (register_blkdev(XT_DISK_MAJOR, "xd")) goto out1; @@ -202,6 +195,19 @@ static int __init xd_init(void) xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma); } + /* + * With the drive detected, xd_maxsectors should now be known. + * If xd_maxsectors is 0, nothing was detected and we fall through + * to return -ENODEV + */ + if (!xd_dma_buffer && xd_maxsectors) { + xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); + if (!xd_dma_buffer) { + printk(KERN_ERR "xd: Out of memory.\n"); + goto out3; + } + } + err = -ENODEV; if (!xd_drives) goto out3; @@ -249,15 +255,17 @@ out4: for (i = 0; i < xd_drives; i++) put_disk(xd_gendisk[i]); out3: - release_region(xd_iobase,4); + if (xd_maxsectors) + release_region(xd_iobase,4); + + if (xd_dma_buffer) + xd_dma_mem_free((unsigned long)xd_dma_buffer, + xd_maxsectors * 0x200); out2: blk_cleanup_queue(xd_queue); out1a: unregister_blkdev(XT_DISK_MAJOR, "xd"); out1: - if (xd_dma_buffer) - xd_dma_mem_free((unsigned long)xd_dma_buffer, - xd_maxsectors * 0x200); return err; Enomem: err = -ENOMEM; From b9d8f4c73b1af4cfd53f819bf84c2bce31232275 Mon Sep 17 00:00:00 2001 From: Gui Jianfeng Date: Tue, 8 Dec 2009 08:54:17 +0100 Subject: [PATCH 04/10] cfq: Optimization for close cooperating queue searching It doesn't make any sense to try to find out a close cooperating queue if current cfqq is the only one in the group. Signed-off-by: Gui Jianfeng Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5009af490a0..b19cd684bf1 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1749,6 +1749,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, if (CFQQ_SEEKY(cur_cfqq)) return NULL; + /* + * Don't search priority tree if it's the only queue in the group. + */ + if (cur_cfqq->cfqg->nr_cfqq == 1) + return NULL; + /* * We should notice if some of the queues are cooperating, eg * working closely on the same area of the disk. In that case, From c244bb50a9baa2ec47a458bbafb36b5e559ed5fa Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Tue, 8 Dec 2009 17:52:57 -0500 Subject: [PATCH 05/10] cfq-iosched: Get rid of cfqq wait_busy_done flag o Get rid of wait_busy_done flag. This flag only tells we were doing wait busy on a queue and that queue got request so expire it. That information can easily be obtained by (cfq_cfqq_wait_busy() && queue_is_not_empty). So remove this flag and keep code simple. Signed-off-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index b19cd684bf1..f41fdb5f3e0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -319,7 +319,6 @@ enum cfqq_state_flags { CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ - CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */ }; #define CFQ_CFQQ_FNS(name) \ @@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync); CFQ_CFQQ_FNS(coop); CFQ_CFQQ_FNS(deep); CFQ_CFQQ_FNS(wait_busy); -CFQ_CFQQ_FNS(wait_busy_done); #undef CFQ_CFQQ_FNS #ifdef CONFIG_DEBUG_CFQ_IOSCHED @@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_busy(cfqq); - cfq_clear_cfqq_wait_busy_done(cfqq); /* * store what was left of this slice, if the queue idled/timed out @@ -2134,11 +2131,17 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) if (!cfqd->rq_queued) return NULL; + + /* + * We were waiting for group to get backlogged. Expire the queue + */ + if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) + goto expire; + /* * The active queue has run out of time, expire it and select new. */ - if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq)) - && !cfq_cfqq_must_dispatch(cfqq)) + if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) goto expire; /* @@ -3171,10 +3174,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); if (cfqq == cfqd->active_queue) { - if (cfq_cfqq_wait_busy(cfqq)) { - cfq_clear_cfqq_wait_busy(cfqq); - cfq_mark_cfqq_wait_busy_done(cfqq); - } /* * Remember that we saw a request from this process, but * don't start queuing just yet. Otherwise we risk seeing lots From 7667aa0630407bc07dc38dcc79d29cc0a65553c1 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Tue, 8 Dec 2009 17:52:58 -0500 Subject: [PATCH 06/10] cfq-iosched: Take care of corner cases of group losing share due to deletion If there is a sequential reader running in a group, we wait for next request to come in that group after slice expiry and once new request is in, we expire the queue. Otherwise we delete the group from service tree and group looses its fair share. So far I was marking a queue as wait_busy if it had consumed its slice and it was last queue in the group. But this condition did not cover following two cases. 1.If a request completed and slice has not expired yet. Next request comes in and is dispatched to disk. Now select_queue() hits and slice has expired. This group will be deleted. Because request is still in the disk, this queue will never get a chance to wait_busy. 2.If request completed and slice has not expired yet. Before next request comes in (delay due to think time), select_queue() hits and expires the queue hence group. This queue never got a chance to wait busy. Gui was hitting the boundary condition 1 and not getting fairness numbers proportional to weight. This patch puts the checks for above two conditions and improves the fairness numbers for sequential workload on rotational media. Check in select_queue() takes care of case 1 and additional check in should_wait_busy() takes care of case 2. Reported-by: Gui Jianfeng Signed-off-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 54 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 6 deletions(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f41fdb5f3e0..98b15b98b85 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2141,8 +2141,22 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) /* * The active queue has run out of time, expire it and select new. */ - if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) - goto expire; + if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { + /* + * If slice had not expired at the completion of last request + * we might not have turned on wait_busy flag. Don't expire + * the queue yet. Allow the group to get backlogged. + * + * The very fact that we have used the slice, that means we + * have been idling all along on this queue and it should be + * ok to wait for this request to complete. + */ + if (cfqq->cfqg->nr_cfqq == 1 && cfqq->dispatched + && cfq_should_idle(cfqd, cfqq)) + goto keep_queue; + else + goto expire; + } /* * The active queue has requests and isn't expired, allow it to @@ -3256,6 +3270,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) cfqd->hw_tag = 0; } +static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct cfq_io_context *cic = cfqd->active_cic; + + /* If there are other queues in the group, don't wait */ + if (cfqq->cfqg->nr_cfqq > 1) + return false; + + if (cfq_slice_used(cfqq)) + return true; + + /* if slice left is less than think time, wait busy */ + if (cic && sample_valid(cic->ttime_samples) + && (cfqq->slice_end - jiffies < cic->ttime_mean)) + return true; + + /* + * If think times is less than a jiffy than ttime_mean=0 and above + * will not be true. It might happen that slice has not expired yet + * but will expire soon (4-5 ns) during select_queue(). To cover the + * case where think time is less than a jiffy, mark the queue wait + * busy if only 1 jiffy is left in the slice. + */ + if (cfqq->slice_end - jiffies == 1) + return true; + + return false; +} + static void cfq_completed_request(struct request_queue *q, struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); @@ -3295,11 +3338,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) } /* - * If this queue consumed its slice and this is last queue - * in the group, wait for next request before we expire - * the queue + * Should we wait for next request to come in before we expire + * the queue. */ - if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) { + if (cfq_should_wait_busy(cfqd, cfqq)) { cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; cfq_mark_cfqq_wait_busy(cfqq); } From edc71131c4dc6cc73e2a24aa0a7a79cfce738f12 Mon Sep 17 00:00:00 2001 From: Corrado Zoccolo Date: Wed, 9 Dec 2009 20:56:04 +0100 Subject: [PATCH 07/10] cfq-iosched: commenting non-obvious initialization Added a comment to explain the initialization of last_delayed_sync. Signed-off-by: Corrado Zoccolo Acked-by: Jeff Moyer Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 98b15b98b85..69ecee7f4ad 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3759,6 +3759,10 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_latency = 1; cfqd->cfq_group_isolation = 0; cfqd->hw_tag = -1; + /* + * we optimistically start assuming sync ops weren't delayed in last + * second, in order to have larger depth for async operations. + */ cfqd->last_delayed_sync = jiffies - HZ; INIT_RCU_HEAD(&cfqd->rcu); return cfqd; From 554554f60ad619e1efab01897208bc320b81d9da Mon Sep 17 00:00:00 2001 From: Gui Jianfeng Date: Thu, 10 Dec 2009 09:38:39 +0100 Subject: [PATCH 08/10] cfq: Remove wait_request flag when idle time is being deleted Remove wait_request flag when idle time is being deleted, otherwise it'll hit this path every time when a request is enqueued. Signed-off-by: Gui Jianfeng Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 1 + 1 file changed, 1 insertion(+) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 69ecee7f4ad..96f59ae5b6e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3202,6 +3202,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || cfqd->busy_queues > 1) { del_timer(&cfqd->idle_slice_timer); + cfq_clear_cfqq_wait_request(cfqq); __blk_run_queue(cfqd->queue); } else cfq_mark_cfqq_must_dispatch(cfqq); From 82bbbf28db4beefcd8b897800153e21378270cd1 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Thu, 10 Dec 2009 19:25:41 +0100 Subject: [PATCH 09/10] Fix a CFQ crash in "for-2.6.33" branch of block tree I think my previous patch introduced a bug which can lead to CFQ hitting BUG_ON(). The offending commit in for-2.6.33 branch is. commit 7667aa0630407bc07dc38dcc79d29cc0a65553c1 Author: Vivek Goyal Date: Tue Dec 8 17:52:58 2009 -0500 cfq-iosched: Take care of corner cases of group losing share due to deletion While doing some stress testing on my box, I enountered following. login: [ 3165.148841] BUG: scheduling while atomic: swapper/0/0x10000100 [ 3165.149821] Modules linked in: cfq_iosched dm_multipath qla2xxx igb scsi_transport_fc dm_snapshot [last unloaded: scsi_wait_scan] [ 3165.149821] Pid: 0, comm: swapper Not tainted 2.6.32-block-for-33-merged-new #3 [ 3165.149821] Call Trace: [ 3165.149821] [] __schedule_bug+0x5c/0x60 [ 3165.149821] [] ? __wake_up+0x44/0x4d [ 3165.149821] [] schedule+0xe3/0x7bc [ 3165.149821] [] ? cpumask_next+0x1d/0x1f [ 3165.149821] [] ? cfq_dispatch_requests+0x6ba/0x93e [cfq_iosched] [ 3165.149821] [] __cond_resched+0x2a/0x35 [ 3165.149821] [] ? cfq_dispatch_requests+0x6ba/0x93e [cfq_iosched] [ 3165.149821] [] _cond_resched+0x2c/0x37 [ 3165.149821] [] is_valid_bugaddr+0x16/0x2f [ 3165.149821] [] report_bug+0x18/0xac [ 3165.149821] [] die+0x39/0x63 [ 3165.149821] [] do_trap+0x11a/0x129 [ 3165.149821] [] do_invalid_op+0x96/0x9f [ 3165.149821] [] ? cfq_dispatch_requests+0x6ba/0x93e [cfq_iosched] [ 3165.149821] [] ? enqueue_task+0x5c/0x67 [ 3165.149821] [] ? task_rq_unlock+0x11/0x13 [ 3165.149821] [] ? try_to_wake_up+0x292/0x2a4 [ 3165.149821] [] invalid_op+0x15/0x20 [ 3165.149821] [] ? cfq_dispatch_requests+0x6ba/0x93e [cfq_iosched] [ 3165.149821] [] ? virt_to_head_page+0xe/0x2f [ 3165.149821] [] blk_peek_request+0x191/0x1a7 [ 3165.149821] [] ? kobject_get+0x1a/0x21 [ 3165.149821] [] scsi_request_fn+0x82/0x3df [ 3165.149821] [] ? bio_fs_destructor+0x15/0x17 [ 3165.149821] [] ? virt_to_head_page+0xe/0x2f [ 3165.149821] [] __blk_run_queue+0x42/0x71 [ 3165.149821] [] blk_run_queue+0x26/0x3a [ 3165.149821] [] scsi_run_queue+0x2de/0x375 [ 3165.149821] [] ? put_device+0x17/0x19 [ 3165.149821] [] scsi_next_command+0x3b/0x4b [ 3165.149821] [] scsi_io_completion+0x1c9/0x3f5 [ 3165.149821] [] scsi_finish_command+0xb5/0xbe I think I have hit following BUG_ON() in cfq_dispatch_request(). BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); Please find attached the patch to fix it. I have done some stress testing with it and have not seen it happening again. o We should wait on a queue even after slice expiry only if it is empty. If queue is not empty then continue to expire it. o If we decide to keep the queue then make cfqq=NULL. Otherwise select_queue() will return a valid cfqq and cfq_dispatch_request() can hit following BUG_ON(). BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)) Reviewed-by: Jeff Moyer Signed-off-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 96f59ae5b6e..f3f62394b98 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2151,10 +2151,11 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) * have been idling all along on this queue and it should be * ok to wait for this request to complete. */ - if (cfqq->cfqg->nr_cfqq == 1 && cfqq->dispatched - && cfq_should_idle(cfqd, cfqq)) + if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) + && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { + cfqq = NULL; goto keep_queue; - else + } else goto expire; } From 66ae291978177d5c012015f12b8fbc76dc7d0965 Mon Sep 17 00:00:00 2001 From: Gui Jianfeng Date: Tue, 15 Dec 2009 10:08:45 +0100 Subject: [PATCH 10/10] cfq: set workload as expired if it doesn't have any slice left When a group is resumed, if it doesn't have workload slice left, we should set workload_expires as expired. Otherwise, we might start from where we left in previous group by error. Thanks the idea from Corrado. Signed-off-by: Gui Jianfeng Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f3f62394b98..e2f80463ed0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2113,7 +2113,9 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd) cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; cfqd->serving_type = cfqg->saved_workload; cfqd->serving_prio = cfqg->saved_serving_prio; - } + } else + cfqd->workload_expires = jiffies - 1; + choose_service_tree(cfqd, cfqg); }