mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
Merge branch 'for-2.6.33' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.33' of git://git.kernel.dk/linux-2.6-block:
cfq: set workload as expired if it doesn't have any slice left
Fix a CFQ crash in "for-2.6.33" branch of block tree
cfq: Remove wait_request flag when idle time is being deleted
cfq-iosched: commenting non-obvious initialization
cfq-iosched: Take care of corner cases of group losing share due to deletion
cfq-iosched: Get rid of cfqq wait_busy_done flag
cfq: Optimization for close cooperating queue searching
block,xd: Delay allocation of DMA buffers until device is known
drbd: Following the hmac change to SHASH (see linux commit 8bd1209cff
)
cfq-iosched: reduce write depth only if sync was delayed
This commit is contained in:
commit
51b736b851
3 changed files with 95 additions and 32 deletions
|
@ -283,7 +283,7 @@ struct cfq_data {
|
||||||
*/
|
*/
|
||||||
struct cfq_queue oom_cfqq;
|
struct cfq_queue oom_cfqq;
|
||||||
|
|
||||||
unsigned long last_end_sync_rq;
|
unsigned long last_delayed_sync;
|
||||||
|
|
||||||
/* List of cfq groups being managed on this device*/
|
/* List of cfq groups being managed on this device*/
|
||||||
struct hlist_head cfqg_list;
|
struct hlist_head cfqg_list;
|
||||||
|
@ -319,7 +319,6 @@ enum cfqq_state_flags {
|
||||||
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
|
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
|
||||||
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
|
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
|
||||||
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
|
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
|
||||||
CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define CFQ_CFQQ_FNS(name) \
|
#define CFQ_CFQQ_FNS(name) \
|
||||||
|
@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync);
|
||||||
CFQ_CFQQ_FNS(coop);
|
CFQ_CFQQ_FNS(coop);
|
||||||
CFQ_CFQQ_FNS(deep);
|
CFQ_CFQQ_FNS(deep);
|
||||||
CFQ_CFQQ_FNS(wait_busy);
|
CFQ_CFQQ_FNS(wait_busy);
|
||||||
CFQ_CFQQ_FNS(wait_busy_done);
|
|
||||||
#undef CFQ_CFQQ_FNS
|
#undef CFQ_CFQQ_FNS
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_CFQ_IOSCHED
|
#ifdef CONFIG_DEBUG_CFQ_IOSCHED
|
||||||
|
@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
|
|
||||||
cfq_clear_cfqq_wait_request(cfqq);
|
cfq_clear_cfqq_wait_request(cfqq);
|
||||||
cfq_clear_cfqq_wait_busy(cfqq);
|
cfq_clear_cfqq_wait_busy(cfqq);
|
||||||
cfq_clear_cfqq_wait_busy_done(cfqq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* store what was left of this slice, if the queue idled/timed out
|
* store what was left of this slice, if the queue idled/timed out
|
||||||
|
@ -1749,6 +1746,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
|
||||||
if (CFQQ_SEEKY(cur_cfqq))
|
if (CFQQ_SEEKY(cur_cfqq))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't search priority tree if it's the only queue in the group.
|
||||||
|
*/
|
||||||
|
if (cur_cfqq->cfqg->nr_cfqq == 1)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We should notice if some of the queues are cooperating, eg
|
* We should notice if some of the queues are cooperating, eg
|
||||||
* working closely on the same area of the disk. In that case,
|
* working closely on the same area of the disk. In that case,
|
||||||
|
@ -2110,7 +2113,9 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
|
||||||
cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
|
cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
|
||||||
cfqd->serving_type = cfqg->saved_workload;
|
cfqd->serving_type = cfqg->saved_workload;
|
||||||
cfqd->serving_prio = cfqg->saved_serving_prio;
|
cfqd->serving_prio = cfqg->saved_serving_prio;
|
||||||
}
|
} else
|
||||||
|
cfqd->workload_expires = jiffies - 1;
|
||||||
|
|
||||||
choose_service_tree(cfqd, cfqg);
|
choose_service_tree(cfqd, cfqg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2128,12 +2133,33 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
|
||||||
|
|
||||||
if (!cfqd->rq_queued)
|
if (!cfqd->rq_queued)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We were waiting for group to get backlogged. Expire the queue
|
||||||
|
*/
|
||||||
|
if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
|
||||||
|
goto expire;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The active queue has run out of time, expire it and select new.
|
* The active queue has run out of time, expire it and select new.
|
||||||
*/
|
*/
|
||||||
if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq))
|
if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
|
||||||
&& !cfq_cfqq_must_dispatch(cfqq))
|
/*
|
||||||
|
* If slice had not expired at the completion of last request
|
||||||
|
* we might not have turned on wait_busy flag. Don't expire
|
||||||
|
* the queue yet. Allow the group to get backlogged.
|
||||||
|
*
|
||||||
|
* The very fact that we have used the slice, that means we
|
||||||
|
* have been idling all along on this queue and it should be
|
||||||
|
* ok to wait for this request to complete.
|
||||||
|
*/
|
||||||
|
if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
|
||||||
|
&& cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
|
||||||
|
cfqq = NULL;
|
||||||
|
goto keep_queue;
|
||||||
|
} else
|
||||||
goto expire;
|
goto expire;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The active queue has requests and isn't expired, allow it to
|
* The active queue has requests and isn't expired, allow it to
|
||||||
|
@ -2264,7 +2290,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
* based on the last sync IO we serviced
|
* based on the last sync IO we serviced
|
||||||
*/
|
*/
|
||||||
if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
|
if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
|
||||||
unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
|
unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
|
||||||
unsigned int depth;
|
unsigned int depth;
|
||||||
|
|
||||||
depth = last_sync / cfqd->cfq_slice[1];
|
depth = last_sync / cfqd->cfq_slice[1];
|
||||||
|
@ -3165,10 +3191,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
|
cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
|
||||||
|
|
||||||
if (cfqq == cfqd->active_queue) {
|
if (cfqq == cfqd->active_queue) {
|
||||||
if (cfq_cfqq_wait_busy(cfqq)) {
|
|
||||||
cfq_clear_cfqq_wait_busy(cfqq);
|
|
||||||
cfq_mark_cfqq_wait_busy_done(cfqq);
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* Remember that we saw a request from this process, but
|
* Remember that we saw a request from this process, but
|
||||||
* don't start queuing just yet. Otherwise we risk seeing lots
|
* don't start queuing just yet. Otherwise we risk seeing lots
|
||||||
|
@ -3183,6 +3205,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
|
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
|
||||||
cfqd->busy_queues > 1) {
|
cfqd->busy_queues > 1) {
|
||||||
del_timer(&cfqd->idle_slice_timer);
|
del_timer(&cfqd->idle_slice_timer);
|
||||||
|
cfq_clear_cfqq_wait_request(cfqq);
|
||||||
__blk_run_queue(cfqd->queue);
|
__blk_run_queue(cfqd->queue);
|
||||||
} else
|
} else
|
||||||
cfq_mark_cfqq_must_dispatch(cfqq);
|
cfq_mark_cfqq_must_dispatch(cfqq);
|
||||||
|
@ -3251,6 +3274,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
|
||||||
cfqd->hw_tag = 0;
|
cfqd->hw_tag = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
|
{
|
||||||
|
struct cfq_io_context *cic = cfqd->active_cic;
|
||||||
|
|
||||||
|
/* If there are other queues in the group, don't wait */
|
||||||
|
if (cfqq->cfqg->nr_cfqq > 1)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (cfq_slice_used(cfqq))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/* if slice left is less than think time, wait busy */
|
||||||
|
if (cic && sample_valid(cic->ttime_samples)
|
||||||
|
&& (cfqq->slice_end - jiffies < cic->ttime_mean))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If think times is less than a jiffy than ttime_mean=0 and above
|
||||||
|
* will not be true. It might happen that slice has not expired yet
|
||||||
|
* but will expire soon (4-5 ns) during select_queue(). To cover the
|
||||||
|
* case where think time is less than a jiffy, mark the queue wait
|
||||||
|
* busy if only 1 jiffy is left in the slice.
|
||||||
|
*/
|
||||||
|
if (cfqq->slice_end - jiffies == 1)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
||||||
|
@ -3273,7 +3325,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
if (sync) {
|
if (sync) {
|
||||||
RQ_CIC(rq)->last_end_request = now;
|
RQ_CIC(rq)->last_end_request = now;
|
||||||
cfqd->last_end_sync_rq = now;
|
if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
|
||||||
|
cfqd->last_delayed_sync = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3289,11 +3342,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this queue consumed its slice and this is last queue
|
* Should we wait for next request to come in before we expire
|
||||||
* in the group, wait for next request before we expire
|
* the queue.
|
||||||
* the queue
|
|
||||||
*/
|
*/
|
||||||
if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) {
|
if (cfq_should_wait_busy(cfqd, cfqq)) {
|
||||||
cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
|
cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
|
||||||
cfq_mark_cfqq_wait_busy(cfqq);
|
cfq_mark_cfqq_wait_busy(cfqq);
|
||||||
}
|
}
|
||||||
|
@ -3711,7 +3763,11 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||||
cfqd->cfq_latency = 1;
|
cfqd->cfq_latency = 1;
|
||||||
cfqd->cfq_group_isolation = 0;
|
cfqd->cfq_group_isolation = 0;
|
||||||
cfqd->hw_tag = -1;
|
cfqd->hw_tag = -1;
|
||||||
cfqd->last_end_sync_rq = jiffies;
|
/*
|
||||||
|
* we optimistically start assuming sync ops weren't delayed in last
|
||||||
|
* second, in order to have larger depth for async operations.
|
||||||
|
*/
|
||||||
|
cfqd->last_delayed_sync = jiffies - HZ;
|
||||||
INIT_RCU_HEAD(&cfqd->rcu);
|
INIT_RCU_HEAD(&cfqd->rcu);
|
||||||
return cfqd;
|
return cfqd;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1271,8 +1271,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (crypto_tfm_alg_type(crypto_hash_tfm(tfm))
|
if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) {
|
||||||
!= CRYPTO_ALG_TYPE_HASH) {
|
|
||||||
retcode = ERR_AUTH_ALG_ND;
|
retcode = ERR_AUTH_ALG_ND;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
|
@ -169,13 +169,6 @@ static int __init xd_init(void)
|
||||||
|
|
||||||
init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
|
init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
|
||||||
|
|
||||||
if (!xd_dma_buffer)
|
|
||||||
xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
|
|
||||||
if (!xd_dma_buffer) {
|
|
||||||
printk(KERN_ERR "xd: Out of memory.\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
if (register_blkdev(XT_DISK_MAJOR, "xd"))
|
if (register_blkdev(XT_DISK_MAJOR, "xd"))
|
||||||
goto out1;
|
goto out1;
|
||||||
|
@ -202,6 +195,19 @@ static int __init xd_init(void)
|
||||||
xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
|
xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* With the drive detected, xd_maxsectors should now be known.
|
||||||
|
* If xd_maxsectors is 0, nothing was detected and we fall through
|
||||||
|
* to return -ENODEV
|
||||||
|
*/
|
||||||
|
if (!xd_dma_buffer && xd_maxsectors) {
|
||||||
|
xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
|
||||||
|
if (!xd_dma_buffer) {
|
||||||
|
printk(KERN_ERR "xd: Out of memory.\n");
|
||||||
|
goto out3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
if (!xd_drives)
|
if (!xd_drives)
|
||||||
goto out3;
|
goto out3;
|
||||||
|
@ -249,15 +255,17 @@ out4:
|
||||||
for (i = 0; i < xd_drives; i++)
|
for (i = 0; i < xd_drives; i++)
|
||||||
put_disk(xd_gendisk[i]);
|
put_disk(xd_gendisk[i]);
|
||||||
out3:
|
out3:
|
||||||
|
if (xd_maxsectors)
|
||||||
release_region(xd_iobase,4);
|
release_region(xd_iobase,4);
|
||||||
|
|
||||||
|
if (xd_dma_buffer)
|
||||||
|
xd_dma_mem_free((unsigned long)xd_dma_buffer,
|
||||||
|
xd_maxsectors * 0x200);
|
||||||
out2:
|
out2:
|
||||||
blk_cleanup_queue(xd_queue);
|
blk_cleanup_queue(xd_queue);
|
||||||
out1a:
|
out1a:
|
||||||
unregister_blkdev(XT_DISK_MAJOR, "xd");
|
unregister_blkdev(XT_DISK_MAJOR, "xd");
|
||||||
out1:
|
out1:
|
||||||
if (xd_dma_buffer)
|
|
||||||
xd_dma_mem_free((unsigned long)xd_dma_buffer,
|
|
||||||
xd_maxsectors * 0x200);
|
|
||||||
return err;
|
return err;
|
||||||
Enomem:
|
Enomem:
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
|
|
Loading…
Reference in a new issue