mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'for-linus' into for-2.6.33
This commit is contained in:
commit
c30f33437c
15 changed files with 228 additions and 201 deletions
|
@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
|
||||||
part_stat_inc(cpu, part, merges[rw]);
|
part_stat_inc(cpu, part, merges[rw]);
|
||||||
else {
|
else {
|
||||||
part_round_stats(cpu, part);
|
part_round_stats(cpu, part);
|
||||||
part_inc_in_flight(part);
|
part_inc_in_flight(part, rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
part_stat_unlock();
|
part_stat_unlock();
|
||||||
|
@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
|
||||||
if (now == part->stamp)
|
if (now == part->stamp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (part->in_flight) {
|
if (part_in_flight(part)) {
|
||||||
__part_stat_add(cpu, part, time_in_queue,
|
__part_stat_add(cpu, part, time_in_queue,
|
||||||
part->in_flight * (now - part->stamp));
|
part_in_flight(part) * (now - part->stamp));
|
||||||
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
|
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
|
||||||
}
|
}
|
||||||
part->stamp = now;
|
part->stamp = now;
|
||||||
|
@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
|
||||||
part_stat_inc(cpu, part, ios[rw]);
|
part_stat_inc(cpu, part, ios[rw]);
|
||||||
part_stat_add(cpu, part, ticks[rw], duration);
|
part_stat_add(cpu, part, ticks[rw], duration);
|
||||||
part_round_stats(cpu, part);
|
part_round_stats(cpu, part);
|
||||||
part_dec_in_flight(part);
|
part_dec_in_flight(part, rw);
|
||||||
|
|
||||||
part_stat_unlock();
|
part_stat_unlock();
|
||||||
}
|
}
|
||||||
|
@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kblockd_schedule_work);
|
EXPORT_SYMBOL(kblockd_schedule_work);
|
||||||
|
|
||||||
int kblockd_schedule_delayed_work(struct request_queue *q,
|
|
||||||
struct delayed_work *work,
|
|
||||||
unsigned long delay)
|
|
||||||
{
|
|
||||||
return queue_delayed_work(kblockd_workqueue, work, delay);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
|
|
||||||
|
|
||||||
int __init blk_dev_init(void)
|
int __init blk_dev_init(void)
|
||||||
{
|
{
|
||||||
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
||||||
|
|
|
@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
|
||||||
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
||||||
|
|
||||||
part_round_stats(cpu, part);
|
part_round_stats(cpu, part);
|
||||||
part_dec_in_flight(part);
|
part_dec_in_flight(part, rq_data_dir(req));
|
||||||
|
|
||||||
part_stat_unlock();
|
part_stat_unlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -242,7 +242,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||||
/**
|
/**
|
||||||
* blk_queue_max_discard_sectors - set max sectors for a single discard
|
* blk_queue_max_discard_sectors - set max sectors for a single discard
|
||||||
* @q: the request queue for the device
|
* @q: the request queue for the device
|
||||||
* @max_discard: maximum number of sectors to discard
|
* @max_discard_sectors: maximum number of sectors to discard
|
||||||
**/
|
**/
|
||||||
void blk_queue_max_discard_sectors(struct request_queue *q,
|
void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||||
unsigned int max_discard_sectors)
|
unsigned int max_discard_sectors)
|
||||||
|
|
|
@ -359,7 +359,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
|
||||||
max_depth -= 2;
|
max_depth -= 2;
|
||||||
if (!max_depth)
|
if (!max_depth)
|
||||||
max_depth = 1;
|
max_depth = 1;
|
||||||
if (q->in_flight[0] > max_depth)
|
if (q->in_flight[BLK_RW_ASYNC] > max_depth)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -150,7 +150,7 @@ struct cfq_data {
|
||||||
* idle window management
|
* idle window management
|
||||||
*/
|
*/
|
||||||
struct timer_list idle_slice_timer;
|
struct timer_list idle_slice_timer;
|
||||||
struct delayed_work unplug_work;
|
struct work_struct unplug_work;
|
||||||
|
|
||||||
struct cfq_queue *active_queue;
|
struct cfq_queue *active_queue;
|
||||||
struct cfq_io_context *active_cic;
|
struct cfq_io_context *active_cic;
|
||||||
|
@ -230,7 +230,7 @@ CFQ_CFQQ_FNS(coop);
|
||||||
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
|
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
|
||||||
|
|
||||||
static void cfq_dispatch_insert(struct request_queue *, struct request *);
|
static void cfq_dispatch_insert(struct request_queue *, struct request *);
|
||||||
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
|
static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
|
||||||
struct io_context *, gfp_t);
|
struct io_context *, gfp_t);
|
||||||
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
|
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
|
||||||
struct io_context *);
|
struct io_context *);
|
||||||
|
@ -241,40 +241,35 @@ static inline int rq_in_driver(struct cfq_data *cfqd)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
|
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
|
||||||
int is_sync)
|
bool is_sync)
|
||||||
{
|
{
|
||||||
return cic->cfqq[!!is_sync];
|
return cic->cfqq[is_sync];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void cic_set_cfqq(struct cfq_io_context *cic,
|
static inline void cic_set_cfqq(struct cfq_io_context *cic,
|
||||||
struct cfq_queue *cfqq, int is_sync)
|
struct cfq_queue *cfqq, bool is_sync)
|
||||||
{
|
{
|
||||||
cic->cfqq[!!is_sync] = cfqq;
|
cic->cfqq[is_sync] = cfqq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We regard a request as SYNC, if it's either a read or has the SYNC bit
|
* We regard a request as SYNC, if it's either a read or has the SYNC bit
|
||||||
* set (in which case it could also be direct WRITE).
|
* set (in which case it could also be direct WRITE).
|
||||||
*/
|
*/
|
||||||
static inline int cfq_bio_sync(struct bio *bio)
|
static inline bool cfq_bio_sync(struct bio *bio)
|
||||||
{
|
{
|
||||||
if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
|
return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||||
return 1;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* scheduler run of queue, if there are requests pending and no one in the
|
* scheduler run of queue, if there are requests pending and no one in the
|
||||||
* driver that will restart queueing
|
* driver that will restart queueing
|
||||||
*/
|
*/
|
||||||
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
|
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
|
||||||
unsigned long delay)
|
|
||||||
{
|
{
|
||||||
if (cfqd->busy_queues) {
|
if (cfqd->busy_queues) {
|
||||||
cfq_log(cfqd, "schedule dispatch");
|
cfq_log(cfqd, "schedule dispatch");
|
||||||
kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
|
kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
|
||||||
delay);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q)
|
||||||
* if a queue is marked sync and has sync io queued. A sync queue with async
|
* if a queue is marked sync and has sync io queued. A sync queue with async
|
||||||
* io only, should not get full sync slice length.
|
* io only, should not get full sync slice length.
|
||||||
*/
|
*/
|
||||||
static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
|
static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
|
||||||
unsigned short prio)
|
unsigned short prio)
|
||||||
{
|
{
|
||||||
const int base_slice = cfqd->cfq_slice[sync];
|
const int base_slice = cfqd->cfq_slice[sync];
|
||||||
|
@ -318,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
* isn't valid until the first request from the dispatch is activated
|
* isn't valid until the first request from the dispatch is activated
|
||||||
* and the slice time set.
|
* and the slice time set.
|
||||||
*/
|
*/
|
||||||
static inline int cfq_slice_used(struct cfq_queue *cfqq)
|
static inline bool cfq_slice_used(struct cfq_queue *cfqq)
|
||||||
{
|
{
|
||||||
if (cfq_cfqq_slice_new(cfqq))
|
if (cfq_cfqq_slice_new(cfqq))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -493,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
|
||||||
* we will service the queues.
|
* we will service the queues.
|
||||||
*/
|
*/
|
||||||
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
int add_front)
|
bool add_front)
|
||||||
{
|
{
|
||||||
struct rb_node **p, *parent;
|
struct rb_node **p, *parent;
|
||||||
struct cfq_queue *__cfqq;
|
struct cfq_queue *__cfqq;
|
||||||
|
@ -509,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
} else
|
} else
|
||||||
rb_key += jiffies;
|
rb_key += jiffies;
|
||||||
} else if (!add_front) {
|
} else if (!add_front) {
|
||||||
|
/*
|
||||||
|
* Get our rb key offset. Subtract any residual slice
|
||||||
|
* value carried from last service. A negative resid
|
||||||
|
* count indicates slice overrun, and this should position
|
||||||
|
* the next service time further away in the tree.
|
||||||
|
*/
|
||||||
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
|
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
|
||||||
rb_key += cfqq->slice_resid;
|
rb_key -= cfqq->slice_resid;
|
||||||
cfqq->slice_resid = 0;
|
cfqq->slice_resid = 0;
|
||||||
} else
|
} else {
|
||||||
rb_key = 0;
|
rb_key = -HZ;
|
||||||
|
__cfqq = cfq_rb_first(&cfqd->service_tree);
|
||||||
|
rb_key += __cfqq ? __cfqq->rb_key : jiffies;
|
||||||
|
}
|
||||||
|
|
||||||
if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
|
if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
|
||||||
/*
|
/*
|
||||||
|
@ -547,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
n = &(*p)->rb_left;
|
n = &(*p)->rb_left;
|
||||||
else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
|
else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
|
||||||
n = &(*p)->rb_right;
|
n = &(*p)->rb_right;
|
||||||
else if (rb_key < __cfqq->rb_key)
|
else if (time_before(rb_key, __cfqq->rb_key))
|
||||||
n = &(*p)->rb_left;
|
n = &(*p)->rb_left;
|
||||||
else
|
else
|
||||||
n = &(*p)->rb_right;
|
n = &(*p)->rb_right;
|
||||||
|
@ -827,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
|
||||||
* reposition in fifo if next is older than rq
|
* reposition in fifo if next is older than rq
|
||||||
*/
|
*/
|
||||||
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
|
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
|
||||||
time_before(next->start_time, rq->start_time))
|
time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
|
||||||
list_move(&rq->queuelist, &next->queuelist);
|
list_move(&rq->queuelist, &next->queuelist);
|
||||||
|
rq_set_fifo_time(rq, rq_fifo_time(next));
|
||||||
|
}
|
||||||
|
|
||||||
cfq_remove_request(next);
|
cfq_remove_request(next);
|
||||||
}
|
}
|
||||||
|
@ -844,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
|
||||||
* Disallow merge of a sync bio into an async request.
|
* Disallow merge of a sync bio into an async request.
|
||||||
*/
|
*/
|
||||||
if (cfq_bio_sync(bio) && !rq_is_sync(rq))
|
if (cfq_bio_sync(bio) && !rq_is_sync(rq))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lookup the cfqq that this bio will be queued with. Allow
|
* Lookup the cfqq that this bio will be queued with. Allow
|
||||||
|
@ -852,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
|
||||||
*/
|
*/
|
||||||
cic = cfq_cic_lookup(cfqd, current->io_context);
|
cic = cfq_cic_lookup(cfqd, current->io_context);
|
||||||
if (!cic)
|
if (!cic)
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
|
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
|
||||||
if (cfqq == RQ_CFQQ(rq))
|
return cfqq == RQ_CFQQ(rq);
|
||||||
return 1;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
||||||
|
@ -886,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
int timed_out)
|
bool timed_out)
|
||||||
{
|
{
|
||||||
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
|
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
|
||||||
|
|
||||||
|
@ -914,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
|
static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
|
||||||
{
|
{
|
||||||
struct cfq_queue *cfqq = cfqd->active_queue;
|
struct cfq_queue *cfqq = cfqd->active_queue;
|
||||||
|
|
||||||
|
@ -1026,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
|
||||||
*/
|
*/
|
||||||
static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
|
static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
|
||||||
struct cfq_queue *cur_cfqq,
|
struct cfq_queue *cur_cfqq,
|
||||||
int probe)
|
bool probe)
|
||||||
{
|
{
|
||||||
struct cfq_queue *cfqq;
|
struct cfq_queue *cfqq;
|
||||||
|
|
||||||
|
@ -1090,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
||||||
if (!cic || !atomic_read(&cic->ioc->nr_tasks))
|
if (!cic || !atomic_read(&cic->ioc->nr_tasks))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If our average think time is larger than the remaining time
|
||||||
|
* slice, then don't idle. This avoids overrunning the allotted
|
||||||
|
* time slice.
|
||||||
|
*/
|
||||||
|
if (sample_valid(cic->ttime_samples) &&
|
||||||
|
(cfqq->slice_end - jiffies < cic->ttime_mean))
|
||||||
|
return;
|
||||||
|
|
||||||
cfq_mark_cfqq_wait_request(cfqq);
|
cfq_mark_cfqq_wait_request(cfqq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1129,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
|
||||||
*/
|
*/
|
||||||
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
|
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
|
||||||
{
|
{
|
||||||
struct cfq_data *cfqd = cfqq->cfqd;
|
struct request *rq = NULL;
|
||||||
struct request *rq;
|
|
||||||
int fifo;
|
|
||||||
|
|
||||||
if (cfq_cfqq_fifo_expire(cfqq))
|
if (cfq_cfqq_fifo_expire(cfqq))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1141,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
|
||||||
if (list_empty(&cfqq->fifo))
|
if (list_empty(&cfqq->fifo))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
fifo = cfq_cfqq_sync(cfqq);
|
|
||||||
rq = rq_entry_fifo(cfqq->fifo.next);
|
rq = rq_entry_fifo(cfqq->fifo.next);
|
||||||
|
if (time_before(jiffies, rq_fifo_time(rq)))
|
||||||
if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
|
|
||||||
rq = NULL;
|
rq = NULL;
|
||||||
|
|
||||||
cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
|
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1248,67 +1256,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
|
||||||
return dispatched;
|
return dispatched;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
* Dispatch a request from cfqq, moving them to the request queue
|
|
||||||
* dispatch list.
|
|
||||||
*/
|
|
||||||
static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
||||||
{
|
{
|
||||||
struct request *rq;
|
|
||||||
|
|
||||||
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* follow expired path, else get first next available
|
|
||||||
*/
|
|
||||||
rq = cfq_check_fifo(cfqq);
|
|
||||||
if (!rq)
|
|
||||||
rq = cfqq->next_rq;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* insert request into driver dispatch list
|
|
||||||
*/
|
|
||||||
cfq_dispatch_insert(cfqd->queue, rq);
|
|
||||||
|
|
||||||
if (!cfqd->active_cic) {
|
|
||||||
struct cfq_io_context *cic = RQ_CIC(rq);
|
|
||||||
|
|
||||||
atomic_long_inc(&cic->ioc->refcount);
|
|
||||||
cfqd->active_cic = cic;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Find the cfqq that we need to service and move a request from that to the
|
|
||||||
* dispatch list
|
|
||||||
*/
|
|
||||||
static int cfq_dispatch_requests(struct request_queue *q, int force)
|
|
||||||
{
|
|
||||||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
||||||
struct cfq_queue *cfqq;
|
|
||||||
unsigned int max_dispatch;
|
unsigned int max_dispatch;
|
||||||
|
|
||||||
if (!cfqd->busy_queues)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (unlikely(force))
|
|
||||||
return cfq_forced_dispatch(cfqd);
|
|
||||||
|
|
||||||
cfqq = cfq_select_queue(cfqd);
|
|
||||||
if (!cfqq)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Drain async requests before we start sync IO
|
* Drain async requests before we start sync IO
|
||||||
*/
|
*/
|
||||||
if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
|
if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is an async queue and we have sync IO in flight, let it wait
|
* If this is an async queue and we have sync IO in flight, let it wait
|
||||||
*/
|
*/
|
||||||
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
|
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
max_dispatch = cfqd->cfq_quantum;
|
max_dispatch = cfqd->cfq_quantum;
|
||||||
if (cfq_class_idle(cfqq))
|
if (cfq_class_idle(cfqq))
|
||||||
|
@ -1322,13 +1284,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
||||||
* idle queue must always only have a single IO in flight
|
* idle queue must always only have a single IO in flight
|
||||||
*/
|
*/
|
||||||
if (cfq_class_idle(cfqq))
|
if (cfq_class_idle(cfqq))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have other queues, don't allow more IO from this one
|
* We have other queues, don't allow more IO from this one
|
||||||
*/
|
*/
|
||||||
if (cfqd->busy_queues > 1)
|
if (cfqd->busy_queues > 1)
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sole queue user, allow bigger slice
|
* Sole queue user, allow bigger slice
|
||||||
|
@ -1352,13 +1314,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
||||||
max_dispatch = depth;
|
max_dispatch = depth;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cfqq->dispatched >= max_dispatch)
|
/*
|
||||||
|
* If we're below the current max, allow a dispatch
|
||||||
|
*/
|
||||||
|
return cfqq->dispatched < max_dispatch;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Dispatch a request from cfqq, moving them to the request queue
|
||||||
|
* dispatch list.
|
||||||
|
*/
|
||||||
|
static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
|
{
|
||||||
|
struct request *rq;
|
||||||
|
|
||||||
|
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
|
||||||
|
|
||||||
|
if (!cfq_may_dispatch(cfqd, cfqq))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* follow expired path, else get first next available
|
||||||
|
*/
|
||||||
|
rq = cfq_check_fifo(cfqq);
|
||||||
|
if (!rq)
|
||||||
|
rq = cfqq->next_rq;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* insert request into driver dispatch list
|
||||||
|
*/
|
||||||
|
cfq_dispatch_insert(cfqd->queue, rq);
|
||||||
|
|
||||||
|
if (!cfqd->active_cic) {
|
||||||
|
struct cfq_io_context *cic = RQ_CIC(rq);
|
||||||
|
|
||||||
|
atomic_long_inc(&cic->ioc->refcount);
|
||||||
|
cfqd->active_cic = cic;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find the cfqq that we need to service and move a request from that to the
|
||||||
|
* dispatch list
|
||||||
|
*/
|
||||||
|
static int cfq_dispatch_requests(struct request_queue *q, int force)
|
||||||
|
{
|
||||||
|
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||||
|
struct cfq_queue *cfqq;
|
||||||
|
|
||||||
|
if (!cfqd->busy_queues)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (unlikely(force))
|
||||||
|
return cfq_forced_dispatch(cfqd);
|
||||||
|
|
||||||
|
cfqq = cfq_select_queue(cfqd);
|
||||||
|
if (!cfqq)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Dispatch a request from this cfqq
|
* Dispatch a request from this cfqq, if it is allowed
|
||||||
*/
|
*/
|
||||||
cfq_dispatch_request(cfqd, cfqq);
|
if (!cfq_dispatch_request(cfqd, cfqq))
|
||||||
|
return 0;
|
||||||
|
|
||||||
cfqq->slice_dispatch++;
|
cfqq->slice_dispatch++;
|
||||||
cfq_clear_cfqq_must_dispatch(cfqq);
|
cfq_clear_cfqq_must_dispatch(cfqq);
|
||||||
|
|
||||||
|
@ -1399,7 +1420,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
||||||
|
|
||||||
if (unlikely(cfqd->active_queue == cfqq)) {
|
if (unlikely(cfqd->active_queue == cfqq)) {
|
||||||
__cfq_slice_expired(cfqd, cfqq, 0);
|
__cfq_slice_expired(cfqd, cfqq, 0);
|
||||||
cfq_schedule_dispatch(cfqd, 0);
|
cfq_schedule_dispatch(cfqd);
|
||||||
}
|
}
|
||||||
|
|
||||||
kmem_cache_free(cfq_pool, cfqq);
|
kmem_cache_free(cfq_pool, cfqq);
|
||||||
|
@ -1494,7 +1515,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
{
|
{
|
||||||
if (unlikely(cfqq == cfqd->active_queue)) {
|
if (unlikely(cfqq == cfqd->active_queue)) {
|
||||||
__cfq_slice_expired(cfqd, cfqq, 0);
|
__cfq_slice_expired(cfqd, cfqq, 0);
|
||||||
cfq_schedule_dispatch(cfqd, 0);
|
cfq_schedule_dispatch(cfqd);
|
||||||
}
|
}
|
||||||
|
|
||||||
cfq_put_queue(cfqq);
|
cfq_put_queue(cfqq);
|
||||||
|
@ -1658,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
pid_t pid, int is_sync)
|
pid_t pid, bool is_sync)
|
||||||
{
|
{
|
||||||
RB_CLEAR_NODE(&cfqq->rb_node);
|
RB_CLEAR_NODE(&cfqq->rb_node);
|
||||||
RB_CLEAR_NODE(&cfqq->p_node);
|
RB_CLEAR_NODE(&cfqq->p_node);
|
||||||
|
@ -1678,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cfq_queue *
|
static struct cfq_queue *
|
||||||
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
|
cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
|
||||||
struct io_context *ioc, gfp_t gfp_mask)
|
struct io_context *ioc, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct cfq_queue *cfqq, *new_cfqq = NULL;
|
struct cfq_queue *cfqq, *new_cfqq = NULL;
|
||||||
|
@ -1742,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cfq_queue *
|
static struct cfq_queue *
|
||||||
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
|
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
|
||||||
gfp_t gfp_mask)
|
gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
const int ioprio = task_ioprio(ioc);
|
const int ioprio = task_ioprio(ioc);
|
||||||
|
@ -1977,7 +1998,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
(!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
|
(!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
|
||||||
enable_idle = 0;
|
enable_idle = 0;
|
||||||
else if (sample_valid(cic->ttime_samples)) {
|
else if (sample_valid(cic->ttime_samples)) {
|
||||||
if (cic->ttime_mean > cfqd->cfq_slice_idle)
|
unsigned int slice_idle = cfqd->cfq_slice_idle;
|
||||||
|
if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
|
||||||
|
slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
|
||||||
|
if (cic->ttime_mean > slice_idle)
|
||||||
enable_idle = 0;
|
enable_idle = 0;
|
||||||
else
|
else
|
||||||
enable_idle = 1;
|
enable_idle = 1;
|
||||||
|
@ -1996,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
* Check if new_cfqq should preempt the currently active queue. Return 0 for
|
* Check if new_cfqq should preempt the currently active queue. Return 0 for
|
||||||
* no or if we aren't sure, a 1 will cause a preempt.
|
* no or if we aren't sure, a 1 will cause a preempt.
|
||||||
*/
|
*/
|
||||||
static int
|
static bool
|
||||||
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
||||||
struct request *rq)
|
struct request *rq)
|
||||||
{
|
{
|
||||||
|
@ -2004,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
||||||
|
|
||||||
cfqq = cfqd->active_queue;
|
cfqq = cfqd->active_queue;
|
||||||
if (!cfqq)
|
if (!cfqq)
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
if (cfq_slice_used(cfqq))
|
if (cfq_slice_used(cfqq))
|
||||||
return 1;
|
return true;
|
||||||
|
|
||||||
if (cfq_class_idle(new_cfqq))
|
if (cfq_class_idle(new_cfqq))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
if (cfq_class_idle(cfqq))
|
if (cfq_class_idle(cfqq))
|
||||||
return 1;
|
return true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the new request is sync, but the currently running queue is
|
* if the new request is sync, but the currently running queue is
|
||||||
* not, let the sync request have priority.
|
* not, let the sync request have priority.
|
||||||
*/
|
*/
|
||||||
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
|
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
|
||||||
return 1;
|
return true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* So both queues are sync. Let the new request get disk time if
|
* So both queues are sync. Let the new request get disk time if
|
||||||
* it's a metadata request and the current queue is doing regular IO.
|
* it's a metadata request and the current queue is doing regular IO.
|
||||||
*/
|
*/
|
||||||
if (rq_is_meta(rq) && !cfqq->meta_pending)
|
if (rq_is_meta(rq) && !cfqq->meta_pending)
|
||||||
return 1;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
|
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
|
||||||
*/
|
*/
|
||||||
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
|
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
|
||||||
return 1;
|
return true;
|
||||||
|
|
||||||
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
|
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if this request is as-good as one we would expect from the
|
* if this request is as-good as one we would expect from the
|
||||||
* current cfqq, let it preempt
|
* current cfqq, let it preempt
|
||||||
*/
|
*/
|
||||||
if (cfq_rq_close(cfqd, rq))
|
if (cfq_rq_close(cfqd, rq))
|
||||||
return 1;
|
return true;
|
||||||
|
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2130,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
cfq_add_rq_rb(rq);
|
cfq_add_rq_rb(rq);
|
||||||
|
|
||||||
|
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
|
||||||
list_add_tail(&rq->queuelist, &cfqq->fifo);
|
list_add_tail(&rq->queuelist, &cfqq->fifo);
|
||||||
|
|
||||||
cfq_rq_enqueued(cfqd, cfqq, rq);
|
cfq_rq_enqueued(cfqd, cfqq, rq);
|
||||||
|
@ -2211,7 +2236,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!rq_in_driver(cfqd))
|
if (!rq_in_driver(cfqd))
|
||||||
cfq_schedule_dispatch(cfqd, 0);
|
cfq_schedule_dispatch(cfqd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2309,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||||
struct cfq_io_context *cic;
|
struct cfq_io_context *cic;
|
||||||
const int rw = rq_data_dir(rq);
|
const int rw = rq_data_dir(rq);
|
||||||
const int is_sync = rq_is_sync(rq);
|
const bool is_sync = rq_is_sync(rq);
|
||||||
struct cfq_queue *cfqq;
|
struct cfq_queue *cfqq;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -2341,7 +2366,7 @@ queue_fail:
|
||||||
if (cic)
|
if (cic)
|
||||||
put_io_context(cic->ioc);
|
put_io_context(cic->ioc);
|
||||||
|
|
||||||
cfq_schedule_dispatch(cfqd, 0);
|
cfq_schedule_dispatch(cfqd);
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
cfq_log(cfqd, "set_request fail");
|
cfq_log(cfqd, "set_request fail");
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -2350,7 +2375,7 @@ queue_fail:
|
||||||
static void cfq_kick_queue(struct work_struct *work)
|
static void cfq_kick_queue(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct cfq_data *cfqd =
|
struct cfq_data *cfqd =
|
||||||
container_of(work, struct cfq_data, unplug_work.work);
|
container_of(work, struct cfq_data, unplug_work);
|
||||||
struct request_queue *q = cfqd->queue;
|
struct request_queue *q = cfqd->queue;
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
@ -2404,7 +2429,7 @@ static void cfq_idle_slice_timer(unsigned long data)
|
||||||
expire:
|
expire:
|
||||||
cfq_slice_expired(cfqd, timed_out);
|
cfq_slice_expired(cfqd, timed_out);
|
||||||
out_kick:
|
out_kick:
|
||||||
cfq_schedule_dispatch(cfqd, 0);
|
cfq_schedule_dispatch(cfqd);
|
||||||
out_cont:
|
out_cont:
|
||||||
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
|
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -2412,7 +2437,7 @@ out_cont:
|
||||||
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
|
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
|
||||||
{
|
{
|
||||||
del_timer_sync(&cfqd->idle_slice_timer);
|
del_timer_sync(&cfqd->idle_slice_timer);
|
||||||
cancel_delayed_work_sync(&cfqd->unplug_work);
|
cancel_work_sync(&cfqd->unplug_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_put_async_queues(struct cfq_data *cfqd)
|
static void cfq_put_async_queues(struct cfq_data *cfqd)
|
||||||
|
@ -2494,7 +2519,7 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||||
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
|
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
|
||||||
cfqd->idle_slice_timer.data = (unsigned long) cfqd;
|
cfqd->idle_slice_timer.data = (unsigned long) cfqd;
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
|
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
|
||||||
|
|
||||||
cfqd->cfq_quantum = cfq_quantum;
|
cfqd->cfq_quantum = cfq_quantum;
|
||||||
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
|
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
|
||||||
|
|
|
@ -1053,9 +1053,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
strlcpy(elevator_name, name, sizeof(elevator_name));
|
strlcpy(elevator_name, name, sizeof(elevator_name));
|
||||||
strstrip(elevator_name);
|
e = elevator_get(strstrip(elevator_name));
|
||||||
|
|
||||||
e = elevator_get(elevator_name);
|
|
||||||
if (!e) {
|
if (!e) {
|
||||||
printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
|
printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
|
||||||
static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
|
static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
|
||||||
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
|
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
|
||||||
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
|
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
|
||||||
|
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
|
||||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||||
static struct device_attribute dev_attr_fail =
|
static struct device_attribute dev_attr_fail =
|
||||||
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
|
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
|
||||||
|
@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = {
|
||||||
&dev_attr_alignment_offset.attr,
|
&dev_attr_alignment_offset.attr,
|
||||||
&dev_attr_capability.attr,
|
&dev_attr_capability.attr,
|
||||||
&dev_attr_stat.attr,
|
&dev_attr_stat.attr,
|
||||||
|
&dev_attr_inflight.attr,
|
||||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||||
&dev_attr_fail.attr,
|
&dev_attr_fail.attr,
|
||||||
#endif
|
#endif
|
||||||
|
@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
|
||||||
part_stat_read(hd, merges[1]),
|
part_stat_read(hd, merges[1]),
|
||||||
(unsigned long long)part_stat_read(hd, sectors[1]),
|
(unsigned long long)part_stat_read(hd, sectors[1]),
|
||||||
jiffies_to_msecs(part_stat_read(hd, ticks[1])),
|
jiffies_to_msecs(part_stat_read(hd, ticks[1])),
|
||||||
hd->in_flight,
|
part_in_flight(hd),
|
||||||
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
|
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
|
||||||
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
|
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
|
||||||
);
|
);
|
||||||
|
|
|
@ -68,6 +68,12 @@ MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
|
||||||
MODULE_VERSION("3.6.20");
|
MODULE_VERSION("3.6.20");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
|
static int cciss_allow_hpsa;
|
||||||
|
module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
|
||||||
|
MODULE_PARM_DESC(cciss_allow_hpsa,
|
||||||
|
"Prevent cciss driver from accessing hardware known to be "
|
||||||
|
" supported by the hpsa driver");
|
||||||
|
|
||||||
#include "cciss_cmd.h"
|
#include "cciss_cmd.h"
|
||||||
#include "cciss.h"
|
#include "cciss.h"
|
||||||
#include <linux/cciss_ioctl.h>
|
#include <linux/cciss_ioctl.h>
|
||||||
|
@ -101,8 +107,6 @@ static const struct pci_device_id cciss_pci_device_id[] = {
|
||||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
|
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
|
||||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
|
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
|
||||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
|
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
|
||||||
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
|
||||||
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
|
|
||||||
{0,}
|
{0,}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -123,8 +127,6 @@ static struct board_type products[] = {
|
||||||
{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
|
{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
|
||||||
{0x40910E11, "Smart Array 6i", &SA5_access},
|
{0x40910E11, "Smart Array 6i", &SA5_access},
|
||||||
{0x3225103C, "Smart Array P600", &SA5_access},
|
{0x3225103C, "Smart Array P600", &SA5_access},
|
||||||
{0x3223103C, "Smart Array P800", &SA5_access},
|
|
||||||
{0x3234103C, "Smart Array P400", &SA5_access},
|
|
||||||
{0x3235103C, "Smart Array P400i", &SA5_access},
|
{0x3235103C, "Smart Array P400i", &SA5_access},
|
||||||
{0x3211103C, "Smart Array E200i", &SA5_access},
|
{0x3211103C, "Smart Array E200i", &SA5_access},
|
||||||
{0x3212103C, "Smart Array E200", &SA5_access},
|
{0x3212103C, "Smart Array E200", &SA5_access},
|
||||||
|
@ -132,6 +134,10 @@ static struct board_type products[] = {
|
||||||
{0x3214103C, "Smart Array E200i", &SA5_access},
|
{0x3214103C, "Smart Array E200i", &SA5_access},
|
||||||
{0x3215103C, "Smart Array E200i", &SA5_access},
|
{0x3215103C, "Smart Array E200i", &SA5_access},
|
||||||
{0x3237103C, "Smart Array E500", &SA5_access},
|
{0x3237103C, "Smart Array E500", &SA5_access},
|
||||||
|
/* controllers below this line are also supported by the hpsa driver. */
|
||||||
|
#define HPSA_BOUNDARY 0x3223103C
|
||||||
|
{0x3223103C, "Smart Array P800", &SA5_access},
|
||||||
|
{0x3234103C, "Smart Array P400", &SA5_access},
|
||||||
{0x323D103C, "Smart Array P700m", &SA5_access},
|
{0x323D103C, "Smart Array P700m", &SA5_access},
|
||||||
{0x3241103C, "Smart Array P212", &SA5_access},
|
{0x3241103C, "Smart Array P212", &SA5_access},
|
||||||
{0x3243103C, "Smart Array P410", &SA5_access},
|
{0x3243103C, "Smart Array P410", &SA5_access},
|
||||||
|
@ -140,7 +146,6 @@ static struct board_type products[] = {
|
||||||
{0x3249103C, "Smart Array P812", &SA5_access},
|
{0x3249103C, "Smart Array P812", &SA5_access},
|
||||||
{0x324A103C, "Smart Array P712m", &SA5_access},
|
{0x324A103C, "Smart Array P712m", &SA5_access},
|
||||||
{0x324B103C, "Smart Array P711m", &SA5_access},
|
{0x324B103C, "Smart Array P711m", &SA5_access},
|
||||||
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* How long to wait (in milliseconds) for board to go into simple mode */
|
/* How long to wait (in milliseconds) for board to go into simple mode */
|
||||||
|
@ -3754,7 +3759,27 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||||
__u64 cfg_offset;
|
__u64 cfg_offset;
|
||||||
__u32 cfg_base_addr;
|
__u32 cfg_base_addr;
|
||||||
__u64 cfg_base_addr_index;
|
__u64 cfg_base_addr_index;
|
||||||
int i, err;
|
int i, prod_index, err;
|
||||||
|
|
||||||
|
subsystem_vendor_id = pdev->subsystem_vendor;
|
||||||
|
subsystem_device_id = pdev->subsystem_device;
|
||||||
|
board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
|
||||||
|
subsystem_vendor_id);
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(products); i++) {
|
||||||
|
/* Stand aside for hpsa driver on request */
|
||||||
|
if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
|
||||||
|
return -ENODEV;
|
||||||
|
if (board_id == products[i].board_id)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
prod_index = i;
|
||||||
|
if (prod_index == ARRAY_SIZE(products)) {
|
||||||
|
dev_warn(&pdev->dev,
|
||||||
|
"unrecognized board ID: 0x%08lx, ignoring.\n",
|
||||||
|
(unsigned long) board_id);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
/* check to see if controller has been disabled */
|
/* check to see if controller has been disabled */
|
||||||
/* BEFORE trying to enable it */
|
/* BEFORE trying to enable it */
|
||||||
|
@ -3778,11 +3803,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
subsystem_vendor_id = pdev->subsystem_vendor;
|
|
||||||
subsystem_device_id = pdev->subsystem_device;
|
|
||||||
board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
|
|
||||||
subsystem_vendor_id);
|
|
||||||
|
|
||||||
#ifdef CCISS_DEBUG
|
#ifdef CCISS_DEBUG
|
||||||
printk("command = %x\n", command);
|
printk("command = %x\n", command);
|
||||||
printk("irq = %x\n", pdev->irq);
|
printk("irq = %x\n", pdev->irq);
|
||||||
|
@ -3868,14 +3888,9 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||||
* leave a little room for ioctl calls.
|
* leave a little room for ioctl calls.
|
||||||
*/
|
*/
|
||||||
c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
|
c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
|
||||||
for (i = 0; i < ARRAY_SIZE(products); i++) {
|
c->product_name = products[prod_index].product_name;
|
||||||
if (board_id == products[i].board_id) {
|
c->access = *(products[prod_index].access);
|
||||||
c->product_name = products[i].product_name;
|
|
||||||
c->access = *(products[i].access);
|
|
||||||
c->nr_cmds = c->max_commands - 4;
|
c->nr_cmds = c->max_commands - 4;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
|
if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
|
||||||
(readb(&c->cfgtable->Signature[1]) != 'I') ||
|
(readb(&c->cfgtable->Signature[1]) != 'I') ||
|
||||||
(readb(&c->cfgtable->Signature[2]) != 'S') ||
|
(readb(&c->cfgtable->Signature[2]) != 'S') ||
|
||||||
|
@ -3884,27 +3899,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto err_out_free_res;
|
goto err_out_free_res;
|
||||||
}
|
}
|
||||||
/* We didn't find the controller in our list. We know the
|
|
||||||
* signature is valid. If it's an HP device let's try to
|
|
||||||
* bind to the device and fire it up. Otherwise we bail.
|
|
||||||
*/
|
|
||||||
if (i == ARRAY_SIZE(products)) {
|
|
||||||
if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
|
|
||||||
c->product_name = products[i-1].product_name;
|
|
||||||
c->access = *(products[i-1].access);
|
|
||||||
c->nr_cmds = c->max_commands - 4;
|
|
||||||
printk(KERN_WARNING "cciss: This is an unknown "
|
|
||||||
"Smart Array controller.\n"
|
|
||||||
"cciss: Please update to the latest driver "
|
|
||||||
"available from www.hp.com.\n");
|
|
||||||
} else {
|
|
||||||
printk(KERN_WARNING "cciss: Sorry, I don't know how"
|
|
||||||
" to access the Smart Array controller %08lx\n"
|
|
||||||
, (unsigned long)board_id);
|
|
||||||
err = -ENODEV;
|
|
||||||
goto err_out_free_res;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
{
|
{
|
||||||
/* Need to enable prefetch in the SCSI core for 6400 in x86 */
|
/* Need to enable prefetch in the SCSI core for 6400 in x86 */
|
||||||
|
@ -4254,7 +4248,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
||||||
mutex_init(&hba[i]->busy_shutting_down);
|
mutex_init(&hba[i]->busy_shutting_down);
|
||||||
|
|
||||||
if (cciss_pci_init(hba[i], pdev) != 0)
|
if (cciss_pci_init(hba[i], pdev) != 0)
|
||||||
goto clean0;
|
goto clean_no_release_regions;
|
||||||
|
|
||||||
sprintf(hba[i]->devname, "cciss%d", i);
|
sprintf(hba[i]->devname, "cciss%d", i);
|
||||||
hba[i]->ctlr = i;
|
hba[i]->ctlr = i;
|
||||||
|
@ -4391,13 +4385,14 @@ clean2:
|
||||||
clean1:
|
clean1:
|
||||||
cciss_destroy_hba_sysfs_entry(hba[i]);
|
cciss_destroy_hba_sysfs_entry(hba[i]);
|
||||||
clean0:
|
clean0:
|
||||||
|
pci_release_regions(pdev);
|
||||||
|
clean_no_release_regions:
|
||||||
hba[i]->busy_initializing = 0;
|
hba[i]->busy_initializing = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Deliberately omit pci_disable_device(): it does something nasty to
|
* Deliberately omit pci_disable_device(): it does something nasty to
|
||||||
* Smart Array controllers that pci_enable_device does not undo
|
* Smart Array controllers that pci_enable_device does not undo
|
||||||
*/
|
*/
|
||||||
pci_release_regions(pdev);
|
|
||||||
pci_set_drvdata(pdev, NULL);
|
pci_set_drvdata(pdev, NULL);
|
||||||
free_hba(i);
|
free_hba(i);
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -130,7 +130,7 @@ struct mapped_device {
|
||||||
/*
|
/*
|
||||||
* A list of ios that arrived while we were suspended.
|
* A list of ios that arrived while we were suspended.
|
||||||
*/
|
*/
|
||||||
atomic_t pending;
|
atomic_t pending[2];
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
struct bio_list deferred;
|
struct bio_list deferred;
|
||||||
|
@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io)
|
||||||
{
|
{
|
||||||
struct mapped_device *md = io->md;
|
struct mapped_device *md = io->md;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
int rw = bio_data_dir(io->bio);
|
||||||
|
|
||||||
io->start_time = jiffies;
|
io->start_time = jiffies;
|
||||||
|
|
||||||
cpu = part_stat_lock();
|
cpu = part_stat_lock();
|
||||||
part_round_stats(cpu, &dm_disk(md)->part0);
|
part_round_stats(cpu, &dm_disk(md)->part0);
|
||||||
part_stat_unlock();
|
part_stat_unlock();
|
||||||
dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
|
dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void end_io_acct(struct dm_io *io)
|
static void end_io_acct(struct dm_io *io)
|
||||||
|
@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io)
|
||||||
* After this is decremented the bio must not be touched if it is
|
* After this is decremented the bio must not be touched if it is
|
||||||
* a barrier.
|
* a barrier.
|
||||||
*/
|
*/
|
||||||
dm_disk(md)->part0.in_flight = pending =
|
dm_disk(md)->part0.in_flight[rw] = pending =
|
||||||
atomic_dec_return(&md->pending);
|
atomic_dec_return(&md->pending[rw]);
|
||||||
|
pending += atomic_read(&md->pending[rw^0x1]);
|
||||||
|
|
||||||
/* nudge anyone waiting on suspend queue */
|
/* nudge anyone waiting on suspend queue */
|
||||||
if (!pending)
|
if (!pending)
|
||||||
|
@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor)
|
||||||
if (!md->disk)
|
if (!md->disk)
|
||||||
goto bad_disk;
|
goto bad_disk;
|
||||||
|
|
||||||
atomic_set(&md->pending, 0);
|
atomic_set(&md->pending[0], 0);
|
||||||
|
atomic_set(&md->pending[1], 0);
|
||||||
init_waitqueue_head(&md->wait);
|
init_waitqueue_head(&md->wait);
|
||||||
INIT_WORK(&md->work, dm_wq_work);
|
INIT_WORK(&md->work, dm_wq_work);
|
||||||
init_waitqueue_head(&md->eventq);
|
init_waitqueue_head(&md->eventq);
|
||||||
|
@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
} else if (!atomic_read(&md->pending))
|
} else if (!atomic_read(&md->pending[0]) &&
|
||||||
|
!atomic_read(&md->pending[1]))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (interruptible == TASK_INTERRUPTIBLE &&
|
if (interruptible == TASK_INTERRUPTIBLE &&
|
||||||
|
|
|
@ -248,11 +248,19 @@ ssize_t part_stat_show(struct device *dev,
|
||||||
part_stat_read(p, merges[WRITE]),
|
part_stat_read(p, merges[WRITE]),
|
||||||
(unsigned long long)part_stat_read(p, sectors[WRITE]),
|
(unsigned long long)part_stat_read(p, sectors[WRITE]),
|
||||||
jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
|
jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
|
||||||
p->in_flight,
|
part_in_flight(p),
|
||||||
jiffies_to_msecs(part_stat_read(p, io_ticks)),
|
jiffies_to_msecs(part_stat_read(p, io_ticks)),
|
||||||
jiffies_to_msecs(part_stat_read(p, time_in_queue)));
|
jiffies_to_msecs(part_stat_read(p, time_in_queue)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ssize_t part_inflight_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct hd_struct *p = dev_to_part(dev);
|
||||||
|
|
||||||
|
return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||||
ssize_t part_fail_show(struct device *dev,
|
ssize_t part_fail_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
|
@ -281,6 +289,7 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
|
||||||
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
|
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
|
||||||
static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
|
static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
|
||||||
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
|
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
|
||||||
|
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
|
||||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||||
static struct device_attribute dev_attr_fail =
|
static struct device_attribute dev_attr_fail =
|
||||||
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
|
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
|
||||||
|
@ -292,6 +301,7 @@ static struct attribute *part_attrs[] = {
|
||||||
&dev_attr_size.attr,
|
&dev_attr_size.attr,
|
||||||
&dev_attr_alignment_offset.attr,
|
&dev_attr_alignment_offset.attr,
|
||||||
&dev_attr_stat.attr,
|
&dev_attr_stat.attr,
|
||||||
|
&dev_attr_inflight.attr,
|
||||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||||
&dev_attr_fail.attr,
|
&dev_attr_fail.attr,
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1172,11 +1172,7 @@ static inline void put_dev_sector(Sector p)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct work_struct;
|
struct work_struct;
|
||||||
struct delayed_work;
|
|
||||||
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
|
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
|
||||||
int kblockd_schedule_delayed_work(struct request_queue *q,
|
|
||||||
struct delayed_work *work,
|
|
||||||
unsigned long delay);
|
|
||||||
|
|
||||||
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
||||||
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
|
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
|
||||||
|
|
|
@ -98,7 +98,7 @@ struct hd_struct {
|
||||||
int make_it_fail;
|
int make_it_fail;
|
||||||
#endif
|
#endif
|
||||||
unsigned long stamp;
|
unsigned long stamp;
|
||||||
int in_flight;
|
int in_flight[2];
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
struct disk_stats *dkstats;
|
struct disk_stats *dkstats;
|
||||||
#else
|
#else
|
||||||
|
@ -322,18 +322,23 @@ static inline void free_part_stats(struct hd_struct *part)
|
||||||
#define part_stat_sub(cpu, gendiskp, field, subnd) \
|
#define part_stat_sub(cpu, gendiskp, field, subnd) \
|
||||||
part_stat_add(cpu, gendiskp, field, -subnd)
|
part_stat_add(cpu, gendiskp, field, -subnd)
|
||||||
|
|
||||||
static inline void part_inc_in_flight(struct hd_struct *part)
|
static inline void part_inc_in_flight(struct hd_struct *part, int rw)
|
||||||
{
|
{
|
||||||
part->in_flight++;
|
part->in_flight[rw]++;
|
||||||
if (part->partno)
|
if (part->partno)
|
||||||
part_to_disk(part)->part0.in_flight++;
|
part_to_disk(part)->part0.in_flight[rw]++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void part_dec_in_flight(struct hd_struct *part)
|
static inline void part_dec_in_flight(struct hd_struct *part, int rw)
|
||||||
{
|
{
|
||||||
part->in_flight--;
|
part->in_flight[rw]--;
|
||||||
if (part->partno)
|
if (part->partno)
|
||||||
part_to_disk(part)->part0.in_flight--;
|
part_to_disk(part)->part0.in_flight[rw]--;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int part_in_flight(struct hd_struct *part)
|
||||||
|
{
|
||||||
|
return part->in_flight[0] + part->in_flight[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* block/blk-core.c */
|
/* block/blk-core.c */
|
||||||
|
@ -546,6 +551,8 @@ extern ssize_t part_size_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf);
|
struct device_attribute *attr, char *buf);
|
||||||
extern ssize_t part_stat_show(struct device *dev,
|
extern ssize_t part_stat_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf);
|
struct device_attribute *attr, char *buf);
|
||||||
|
extern ssize_t part_inflight_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf);
|
||||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||||
extern ssize_t part_fail_show(struct device *dev,
|
extern ssize_t part_fail_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf);
|
struct device_attribute *attr, char *buf);
|
||||||
|
|
|
@ -6720,9 +6720,6 @@ EXPORT_SYMBOL(yield);
|
||||||
/*
|
/*
|
||||||
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
|
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
|
||||||
* that process accounting knows that this is a task in IO wait state.
|
* that process accounting knows that this is a task in IO wait state.
|
||||||
*
|
|
||||||
* But don't do that if it is a deliberate, throttling IO wait (this task
|
|
||||||
* has set its backing_dev_info: the queue against which it should throttle)
|
|
||||||
*/
|
*/
|
||||||
void __sched io_schedule(void)
|
void __sched io_schedule(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -92,7 +92,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
|
||||||
"BdiDirtyThresh: %8lu kB\n"
|
"BdiDirtyThresh: %8lu kB\n"
|
||||||
"DirtyThresh: %8lu kB\n"
|
"DirtyThresh: %8lu kB\n"
|
||||||
"BackgroundThresh: %8lu kB\n"
|
"BackgroundThresh: %8lu kB\n"
|
||||||
"WriteBack threads:%8lu\n"
|
"WritebackThreads: %8lu\n"
|
||||||
"b_dirty: %8lu\n"
|
"b_dirty: %8lu\n"
|
||||||
"b_io: %8lu\n"
|
"b_io: %8lu\n"
|
||||||
"b_more_io: %8lu\n"
|
"b_more_io: %8lu\n"
|
||||||
|
|
|
@ -566,7 +566,8 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||||
if (pages_written >= write_chunk)
|
if (pages_written >= write_chunk)
|
||||||
break; /* We've done our duty */
|
break; /* We've done our duty */
|
||||||
|
|
||||||
schedule_timeout_interruptible(pause);
|
__set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
io_schedule_timeout(pause);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Increase the delay for each loop, up to our previous
|
* Increase the delay for each loop, up to our previous
|
||||||
|
|
Loading…
Reference in a new issue