mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 03:06:10 +00:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: (41 commits) Revert "Seperate read and write statistics of in_flight requests" cfq-iosched: don't delay async queue if it hasn't dispatched at all block: Topology ioctls cfq-iosched: use assigned slice sync value, not default cfq-iosched: rename 'desktop' sysfs entry to 'low_latency' cfq-iosched: implement slower async initiate and queue ramp up cfq-iosched: delay async IO dispatch, if sync IO was just done cfq-iosched: add a knob for desktop interactiveness Add a tracepoint for block request remapping block: allow large discard requests block: use normal I/O path for discard requests swapfile: avoid NULL pointer dereference in swapon when s_bdev is NULL fs/bio.c: move EXPORT* macros to line after function Add missing blk_trace_remove_sysfs to be in pair with blk_trace_init_sysfs cciss: fix build when !PROC_FS block: Do not clamp max_hw_sectors for stacking devices block: Set max_sectors correctly for stacking devices cciss: cciss_host_attr_groups should be const cciss: Dynamically allocate the drive_info_struct for each logical drive. cciss: Add usage_count attribute to each logical drive in /sys ...
This commit is contained in:
commit
58e57fbd1c
26 changed files with 1002 additions and 483 deletions
|
@ -31,3 +31,31 @@ Date: March 2009
|
|||
Kernel Version: 2.6.30
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: A symbolic link to /sys/block/cciss!cXdY
|
||||
|
||||
Where: /sys/bus/pci/devices/<dev>/ccissX/rescan
|
||||
Date: August 2009
|
||||
Kernel Version: 2.6.31
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: Kicks of a rescan of the controller to discover logical
|
||||
drive topology changes.
|
||||
|
||||
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/lunid
|
||||
Date: August 2009
|
||||
Kernel Version: 2.6.31
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: Displays the 8-byte LUN ID used to address logical
|
||||
drive Y of controller X.
|
||||
|
||||
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/raid_level
|
||||
Date: August 2009
|
||||
Kernel Version: 2.6.31
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: Displays the RAID level of logical drive Y of
|
||||
controller X.
|
||||
|
||||
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/usage_count
|
||||
Date: August 2009
|
||||
Kernel Version: 2.6.31
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: Displays the usage count (number of opens) of logical drive Y
|
||||
of controller X.
|
||||
|
|
|
@ -350,6 +350,7 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
|
|||
|
||||
if (bio->bi_private)
|
||||
complete(bio->bi_private);
|
||||
__free_page(bio_page(bio));
|
||||
|
||||
bio_put(bio);
|
||||
}
|
||||
|
@ -372,30 +373,50 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
int type = flags & DISCARD_FL_BARRIER ?
|
||||
DISCARD_BARRIER : DISCARD_NOBARRIER;
|
||||
struct bio *bio;
|
||||
struct page *page;
|
||||
int ret = 0;
|
||||
|
||||
if (!q)
|
||||
return -ENXIO;
|
||||
|
||||
if (!q->prepare_discard_fn)
|
||||
if (!blk_queue_discard(q))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
while (nr_sects && !ret) {
|
||||
struct bio *bio = bio_alloc(gfp_mask, 0);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
unsigned int sector_size = q->limits.logical_block_size;
|
||||
unsigned int max_discard_sectors =
|
||||
min(q->limits.max_discard_sectors, UINT_MAX >> 9);
|
||||
|
||||
bio = bio_alloc(gfp_mask, 1);
|
||||
if (!bio)
|
||||
goto out;
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_end_io = blkdev_discard_end_io;
|
||||
bio->bi_bdev = bdev;
|
||||
if (flags & DISCARD_FL_WAIT)
|
||||
bio->bi_private = &wait;
|
||||
|
||||
bio->bi_sector = sector;
|
||||
/*
|
||||
* Add a zeroed one-sector payload as that's what
|
||||
* our current implementations need. If we'll ever need
|
||||
* more the interface will need revisiting.
|
||||
*/
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!page)
|
||||
goto out_free_bio;
|
||||
if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
|
||||
goto out_free_page;
|
||||
|
||||
if (nr_sects > queue_max_hw_sectors(q)) {
|
||||
bio->bi_size = queue_max_hw_sectors(q) << 9;
|
||||
nr_sects -= queue_max_hw_sectors(q);
|
||||
sector += queue_max_hw_sectors(q);
|
||||
/*
|
||||
* And override the bio size - the way discard works we
|
||||
* touch many more blocks on disk than the actual payload
|
||||
* length.
|
||||
*/
|
||||
if (nr_sects > max_discard_sectors) {
|
||||
bio->bi_size = max_discard_sectors << 9;
|
||||
nr_sects -= max_discard_sectors;
|
||||
sector += max_discard_sectors;
|
||||
} else {
|
||||
bio->bi_size = nr_sects << 9;
|
||||
nr_sects = 0;
|
||||
|
@ -414,5 +435,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|||
bio_put(bio);
|
||||
}
|
||||
return ret;
|
||||
out_free_page:
|
||||
__free_page(page);
|
||||
out_free_bio:
|
||||
bio_put(bio);
|
||||
out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_discard);
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "blk.h"
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
|
||||
|
||||
static int __make_request(struct request_queue *q, struct bio *bio);
|
||||
|
@ -69,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
|
|||
part_stat_inc(cpu, part, merges[rw]);
|
||||
else {
|
||||
part_round_stats(cpu, part);
|
||||
part_inc_in_flight(part, rw);
|
||||
part_inc_in_flight(part);
|
||||
}
|
||||
|
||||
part_stat_unlock();
|
||||
|
@ -1031,7 +1032,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
|
|||
|
||||
if (part->in_flight) {
|
||||
__part_stat_add(cpu, part, time_in_queue,
|
||||
part_in_flight(part) * (now - part->stamp));
|
||||
part->in_flight * (now - part->stamp));
|
||||
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
|
||||
}
|
||||
part->stamp = now;
|
||||
|
@ -1124,7 +1125,6 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
|||
req->cmd_flags |= REQ_DISCARD;
|
||||
if (bio_rw_flagged(bio, BIO_RW_BARRIER))
|
||||
req->cmd_flags |= REQ_SOFTBARRIER;
|
||||
req->q->prepare_discard_fn(req->q, req);
|
||||
} else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
|
||||
req->cmd_flags |= REQ_HARDBARRIER;
|
||||
|
||||
|
@ -1437,7 +1437,8 @@ static inline void __generic_make_request(struct bio *bio)
|
|||
goto end_io;
|
||||
}
|
||||
|
||||
if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
|
||||
if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
|
||||
nr_sectors > queue_max_hw_sectors(q))) {
|
||||
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
|
||||
bdevname(bio->bi_bdev, b),
|
||||
bio_sectors(bio),
|
||||
|
@ -1470,7 +1471,7 @@ static inline void __generic_make_request(struct bio *bio)
|
|||
goto end_io;
|
||||
|
||||
if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
|
||||
!q->prepare_discard_fn) {
|
||||
!blk_queue_discard(q)) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto end_io;
|
||||
}
|
||||
|
@ -1738,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
|
|||
part_stat_inc(cpu, part, ios[rw]);
|
||||
part_stat_add(cpu, part, ticks[rw], duration);
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part, rw);
|
||||
part_dec_in_flight(part);
|
||||
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
@ -2491,6 +2492,14 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
|
|||
}
|
||||
EXPORT_SYMBOL(kblockd_schedule_work);
|
||||
|
||||
int kblockd_schedule_delayed_work(struct request_queue *q,
|
||||
struct delayed_work *work,
|
||||
unsigned long delay)
|
||||
{
|
||||
return queue_delayed_work(kblockd_workqueue, work, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
|
||||
|
||||
int __init blk_dev_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
||||
|
|
|
@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
|
|||
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
||||
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part, rq_data_dir(req));
|
||||
part_dec_in_flight(part);
|
||||
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
|
|
@ -33,23 +33,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_queue_prep_rq);
|
||||
|
||||
/**
|
||||
* blk_queue_set_discard - set a discard_sectors function for queue
|
||||
* @q: queue
|
||||
* @dfn: prepare_discard function
|
||||
*
|
||||
* It's possible for a queue to register a discard callback which is used
|
||||
* to transform a discard request into the appropriate type for the
|
||||
* hardware. If none is registered, then discard requests are failed
|
||||
* with %EOPNOTSUPP.
|
||||
*
|
||||
*/
|
||||
void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
|
||||
{
|
||||
q->prepare_discard_fn = dfn;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_set_discard);
|
||||
|
||||
/**
|
||||
* blk_queue_merge_bvec - set a merge_bvec function for queue
|
||||
* @q: queue
|
||||
|
@ -111,7 +94,9 @@ void blk_set_default_limits(struct queue_limits *lim)
|
|||
lim->max_hw_segments = MAX_HW_SEGMENTS;
|
||||
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
||||
lim->max_segment_size = MAX_SEGMENT_SIZE;
|
||||
lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS;
|
||||
lim->max_sectors = BLK_DEF_MAX_SECTORS;
|
||||
lim->max_hw_sectors = INT_MAX;
|
||||
lim->max_discard_sectors = SAFE_MAX_SECTORS;
|
||||
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
|
||||
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
|
||||
lim->alignment_offset = 0;
|
||||
|
@ -164,6 +149,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
|
|||
q->unplug_timer.data = (unsigned long)q;
|
||||
|
||||
blk_set_default_limits(&q->limits);
|
||||
blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
|
||||
|
||||
/*
|
||||
* If the caller didn't supply a lock, fall back to our embedded
|
||||
|
@ -253,6 +239,18 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_max_discard_sectors - set max sectors for a single discard
|
||||
* @q: the request queue for the device
|
||||
* @max_discard: maximum number of sectors to discard
|
||||
**/
|
||||
void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||
unsigned int max_discard_sectors)
|
||||
{
|
||||
q->limits.max_discard_sectors = max_discard_sectors;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
|
||||
* @q: the request queue for the device
|
||||
|
|
|
@ -452,6 +452,7 @@ int blk_register_queue(struct gendisk *disk)
|
|||
if (ret) {
|
||||
kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->kobj);
|
||||
blk_trace_remove_sysfs(disk_to_dev(disk));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -465,11 +466,11 @@ void blk_unregister_queue(struct gendisk *disk)
|
|||
if (WARN_ON(!q))
|
||||
return;
|
||||
|
||||
if (q->request_fn) {
|
||||
if (q->request_fn)
|
||||
elv_unregister_queue(q);
|
||||
|
||||
kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->kobj);
|
||||
kobject_put(&disk_to_dev(disk)->kobj);
|
||||
}
|
||||
kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->kobj);
|
||||
blk_trace_remove_sysfs(disk_to_dev(disk));
|
||||
kobject_put(&disk_to_dev(disk)->kobj);
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ struct cfq_data {
|
|||
* idle window management
|
||||
*/
|
||||
struct timer_list idle_slice_timer;
|
||||
struct work_struct unplug_work;
|
||||
struct delayed_work unplug_work;
|
||||
|
||||
struct cfq_queue *active_queue;
|
||||
struct cfq_io_context *active_cic;
|
||||
|
@ -173,6 +173,7 @@ struct cfq_data {
|
|||
unsigned int cfq_slice[2];
|
||||
unsigned int cfq_slice_async_rq;
|
||||
unsigned int cfq_slice_idle;
|
||||
unsigned int cfq_latency;
|
||||
|
||||
struct list_head cic_list;
|
||||
|
||||
|
@ -180,6 +181,8 @@ struct cfq_data {
|
|||
* Fallback dummy cfqq for extreme OOM conditions
|
||||
*/
|
||||
struct cfq_queue oom_cfqq;
|
||||
|
||||
unsigned long last_end_sync_rq;
|
||||
};
|
||||
|
||||
enum cfqq_state_flags {
|
||||
|
@ -265,11 +268,13 @@ static inline int cfq_bio_sync(struct bio *bio)
|
|||
* scheduler run of queue, if there are requests pending and no one in the
|
||||
* driver that will restart queueing
|
||||
*/
|
||||
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
|
||||
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
|
||||
unsigned long delay)
|
||||
{
|
||||
if (cfqd->busy_queues) {
|
||||
cfq_log(cfqd, "schedule dispatch");
|
||||
kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
|
||||
kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
|
||||
delay);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1326,12 +1331,30 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
|||
return 0;
|
||||
|
||||
/*
|
||||
* we are the only queue, allow up to 4 times of 'quantum'
|
||||
* Sole queue user, allow bigger slice
|
||||
*/
|
||||
if (cfqq->dispatched >= 4 * max_dispatch)
|
||||
return 0;
|
||||
max_dispatch *= 4;
|
||||
}
|
||||
|
||||
/*
|
||||
* Async queues must wait a bit before being allowed dispatch.
|
||||
* We also ramp up the dispatch depth gradually for async IO,
|
||||
* based on the last sync IO we serviced
|
||||
*/
|
||||
if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
|
||||
unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
|
||||
unsigned int depth;
|
||||
|
||||
depth = last_sync / cfqd->cfq_slice[1];
|
||||
if (!depth && !cfqq->dispatched)
|
||||
depth = 1;
|
||||
if (depth < max_dispatch)
|
||||
max_dispatch = depth;
|
||||
}
|
||||
|
||||
if (cfqq->dispatched >= max_dispatch)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Dispatch a request from this cfqq
|
||||
*/
|
||||
|
@ -1376,7 +1399,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
|||
|
||||
if (unlikely(cfqd->active_queue == cfqq)) {
|
||||
__cfq_slice_expired(cfqd, cfqq, 0);
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
}
|
||||
|
||||
kmem_cache_free(cfq_pool, cfqq);
|
||||
|
@ -1471,7 +1494,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|||
{
|
||||
if (unlikely(cfqq == cfqd->active_queue)) {
|
||||
__cfq_slice_expired(cfqd, cfqq, 0);
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
}
|
||||
|
||||
cfq_put_queue(cfqq);
|
||||
|
@ -1951,7 +1974,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
|
||||
|
||||
if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
|
||||
(cfqd->hw_tag && CIC_SEEKY(cic)))
|
||||
(!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
|
||||
enable_idle = 0;
|
||||
else if (sample_valid(cic->ttime_samples)) {
|
||||
if (cic->ttime_mean > cfqd->cfq_slice_idle)
|
||||
|
@ -2157,8 +2180,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|||
if (cfq_cfqq_sync(cfqq))
|
||||
cfqd->sync_flight--;
|
||||
|
||||
if (sync)
|
||||
if (sync) {
|
||||
RQ_CIC(rq)->last_end_request = now;
|
||||
cfqd->last_end_sync_rq = now;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is the active queue, check if it needs to be expired,
|
||||
|
@ -2186,7 +2211,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|||
}
|
||||
|
||||
if (!rq_in_driver(cfqd))
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2316,7 +2341,7 @@ queue_fail:
|
|||
if (cic)
|
||||
put_io_context(cic->ioc);
|
||||
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
cfq_log(cfqd, "set_request fail");
|
||||
return 1;
|
||||
|
@ -2325,7 +2350,7 @@ queue_fail:
|
|||
static void cfq_kick_queue(struct work_struct *work)
|
||||
{
|
||||
struct cfq_data *cfqd =
|
||||
container_of(work, struct cfq_data, unplug_work);
|
||||
container_of(work, struct cfq_data, unplug_work.work);
|
||||
struct request_queue *q = cfqd->queue;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
@ -2379,7 +2404,7 @@ static void cfq_idle_slice_timer(unsigned long data)
|
|||
expire:
|
||||
cfq_slice_expired(cfqd, timed_out);
|
||||
out_kick:
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
out_cont:
|
||||
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
|
||||
}
|
||||
|
@ -2387,7 +2412,7 @@ out_cont:
|
|||
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
|
||||
{
|
||||
del_timer_sync(&cfqd->idle_slice_timer);
|
||||
cancel_work_sync(&cfqd->unplug_work);
|
||||
cancel_delayed_work_sync(&cfqd->unplug_work);
|
||||
}
|
||||
|
||||
static void cfq_put_async_queues(struct cfq_data *cfqd)
|
||||
|
@ -2469,7 +2494,7 @@ static void *cfq_init_queue(struct request_queue *q)
|
|||
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
|
||||
cfqd->idle_slice_timer.data = (unsigned long) cfqd;
|
||||
|
||||
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
|
||||
INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
|
||||
|
||||
cfqd->cfq_quantum = cfq_quantum;
|
||||
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
|
||||
|
@ -2480,8 +2505,9 @@ static void *cfq_init_queue(struct request_queue *q)
|
|||
cfqd->cfq_slice[1] = cfq_slice_sync;
|
||||
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
|
||||
cfqd->cfq_slice_idle = cfq_slice_idle;
|
||||
cfqd->cfq_latency = 1;
|
||||
cfqd->hw_tag = 1;
|
||||
|
||||
cfqd->last_end_sync_rq = jiffies;
|
||||
return cfqd;
|
||||
}
|
||||
|
||||
|
@ -2549,6 +2575,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
|
|||
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
|
||||
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
|
||||
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
|
||||
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
|
||||
#undef SHOW_FUNCTION
|
||||
|
||||
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
||||
|
@ -2580,6 +2607,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
|
|||
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
|
||||
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
|
||||
UINT_MAX, 0);
|
||||
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
|
||||
#undef STORE_FUNCTION
|
||||
|
||||
#define CFQ_ATTR(name) \
|
||||
|
@ -2595,6 +2623,7 @@ static struct elv_fs_entry cfq_attrs[] = {
|
|||
CFQ_ATTR(slice_async),
|
||||
CFQ_ATTR(slice_async_rq),
|
||||
CFQ_ATTR(slice_idle),
|
||||
CFQ_ATTR(low_latency),
|
||||
__ATTR_NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -21,6 +21,11 @@ static int compat_put_int(unsigned long arg, int val)
|
|||
return put_user(val, (compat_int_t __user *)compat_ptr(arg));
|
||||
}
|
||||
|
||||
static int compat_put_uint(unsigned long arg, unsigned int val)
|
||||
{
|
||||
return put_user(val, (compat_uint_t __user *)compat_ptr(arg));
|
||||
}
|
||||
|
||||
static int compat_put_long(unsigned long arg, long val)
|
||||
{
|
||||
return put_user(val, (compat_long_t __user *)compat_ptr(arg));
|
||||
|
@ -734,6 +739,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
switch (cmd) {
|
||||
case HDIO_GETGEO:
|
||||
return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
|
||||
case BLKPBSZGET:
|
||||
return compat_put_uint(arg, bdev_physical_block_size(bdev));
|
||||
case BLKIOMIN:
|
||||
return compat_put_uint(arg, bdev_io_min(bdev));
|
||||
case BLKIOOPT:
|
||||
return compat_put_uint(arg, bdev_io_opt(bdev));
|
||||
case BLKALIGNOFF:
|
||||
return compat_put_int(arg, bdev_alignment_offset(bdev));
|
||||
case BLKFLSBUF:
|
||||
case BLKROSET:
|
||||
case BLKDISCARD:
|
||||
|
|
|
@ -869,7 +869,6 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
|
|||
static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
|
||||
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
|
||||
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
|
||||
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
static struct device_attribute dev_attr_fail =
|
||||
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
|
||||
|
@ -889,7 +888,6 @@ static struct attribute *disk_attrs[] = {
|
|||
&dev_attr_alignment_offset.attr,
|
||||
&dev_attr_capability.attr,
|
||||
&dev_attr_stat.attr,
|
||||
&dev_attr_inflight.attr,
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
&dev_attr_fail.attr,
|
||||
#endif
|
||||
|
@ -1055,7 +1053,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
|
|||
part_stat_read(hd, merges[1]),
|
||||
(unsigned long long)part_stat_read(hd, sectors[1]),
|
||||
jiffies_to_msecs(part_stat_read(hd, ticks[1])),
|
||||
part_in_flight(hd),
|
||||
hd->in_flight,
|
||||
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
|
||||
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
|
||||
);
|
||||
|
|
|
@ -138,6 +138,11 @@ static int put_int(unsigned long arg, int val)
|
|||
return put_user(val, (int __user *)arg);
|
||||
}
|
||||
|
||||
static int put_uint(unsigned long arg, unsigned int val)
|
||||
{
|
||||
return put_user(val, (unsigned int __user *)arg);
|
||||
}
|
||||
|
||||
static int put_long(unsigned long arg, long val)
|
||||
{
|
||||
return put_user(val, (long __user *)arg);
|
||||
|
@ -263,10 +268,18 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
|
||||
case BLKROGET:
|
||||
return put_int(arg, bdev_read_only(bdev) != 0);
|
||||
case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
|
||||
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
|
||||
return put_int(arg, block_size(bdev));
|
||||
case BLKSSZGET: /* get block device hardware sector size */
|
||||
case BLKSSZGET: /* get block device logical block size */
|
||||
return put_int(arg, bdev_logical_block_size(bdev));
|
||||
case BLKPBSZGET: /* get block device physical block size */
|
||||
return put_uint(arg, bdev_physical_block_size(bdev));
|
||||
case BLKIOMIN:
|
||||
return put_uint(arg, bdev_io_min(bdev));
|
||||
case BLKIOOPT:
|
||||
return put_uint(arg, bdev_io_opt(bdev));
|
||||
case BLKALIGNOFF:
|
||||
return put_int(arg, bdev_alignment_offset(bdev));
|
||||
case BLKSECTGET:
|
||||
return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
|
||||
case BLKRASET:
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/timer.h>
|
||||
|
@ -6422,16 +6423,10 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
DAC960_ProcReadStatus implements reading /proc/rd/status.
|
||||
*/
|
||||
|
||||
static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
|
||||
int Count, int *EOF, void *Data)
|
||||
static int dac960_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned char *StatusMessage = "OK\n";
|
||||
int ControllerNumber, BytesAvailable;
|
||||
int ControllerNumber;
|
||||
for (ControllerNumber = 0;
|
||||
ControllerNumber < DAC960_ControllerCount;
|
||||
ControllerNumber++)
|
||||
|
@ -6444,52 +6439,49 @@ static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
|
|||
break;
|
||||
}
|
||||
}
|
||||
BytesAvailable = strlen(StatusMessage) - Offset;
|
||||
if (Count >= BytesAvailable)
|
||||
{
|
||||
Count = BytesAvailable;
|
||||
*EOF = true;
|
||||
}
|
||||
if (Count <= 0) return 0;
|
||||
*Start = Page;
|
||||
memcpy(Page, &StatusMessage[Offset], Count);
|
||||
return Count;
|
||||
seq_puts(m, StatusMessage);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status.
|
||||
*/
|
||||
|
||||
static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset,
|
||||
int Count, int *EOF, void *Data)
|
||||
static int dac960_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
|
||||
int BytesAvailable = Controller->InitialStatusLength - Offset;
|
||||
if (Count >= BytesAvailable)
|
||||
{
|
||||
Count = BytesAvailable;
|
||||
*EOF = true;
|
||||
}
|
||||
if (Count <= 0) return 0;
|
||||
*Start = Page;
|
||||
memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count);
|
||||
return Count;
|
||||
return single_open(file, dac960_proc_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations dac960_proc_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dac960_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
/*
|
||||
DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status.
|
||||
*/
|
||||
|
||||
static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset,
|
||||
int Count, int *EOF, void *Data)
|
||||
static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
|
||||
DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
|
||||
seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dac960_initial_status_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data);
|
||||
}
|
||||
|
||||
static const struct file_operations dac960_initial_status_proc_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dac960_initial_status_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int dac960_current_status_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
|
||||
unsigned char *StatusMessage =
|
||||
"No Rebuild or Consistency Check in Progress\n";
|
||||
int ProgressMessageLength = strlen(StatusMessage);
|
||||
int BytesAvailable;
|
||||
if (jiffies != Controller->LastCurrentStatusTime)
|
||||
{
|
||||
Controller->CurrentStatusLength = 0;
|
||||
|
@ -6513,49 +6505,41 @@ static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset,
|
|||
}
|
||||
Controller->LastCurrentStatusTime = jiffies;
|
||||
}
|
||||
BytesAvailable = Controller->CurrentStatusLength - Offset;
|
||||
if (Count >= BytesAvailable)
|
||||
{
|
||||
Count = BytesAvailable;
|
||||
*EOF = true;
|
||||
}
|
||||
if (Count <= 0) return 0;
|
||||
*Start = Page;
|
||||
memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count);
|
||||
return Count;
|
||||
seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command.
|
||||
*/
|
||||
|
||||
static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset,
|
||||
int Count, int *EOF, void *Data)
|
||||
static int dac960_current_status_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
|
||||
int BytesAvailable = Controller->UserStatusLength - Offset;
|
||||
if (Count >= BytesAvailable)
|
||||
{
|
||||
Count = BytesAvailable;
|
||||
*EOF = true;
|
||||
}
|
||||
if (Count <= 0) return 0;
|
||||
*Start = Page;
|
||||
memcpy(Page, &Controller->UserStatusBuffer[Offset], Count);
|
||||
return Count;
|
||||
return single_open(file, dac960_current_status_proc_show, PDE(inode)->data);
|
||||
}
|
||||
|
||||
static const struct file_operations dac960_current_status_proc_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dac960_current_status_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
/*
|
||||
DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command.
|
||||
*/
|
||||
static int dac960_user_command_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
|
||||
|
||||
static int DAC960_ProcWriteUserCommand(struct file *file,
|
||||
seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dac960_user_command_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, dac960_user_command_proc_show, PDE(inode)->data);
|
||||
}
|
||||
|
||||
static ssize_t dac960_user_command_proc_write(struct file *file,
|
||||
const char __user *Buffer,
|
||||
unsigned long Count, void *Data)
|
||||
size_t Count, loff_t *pos)
|
||||
{
|
||||
DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
|
||||
DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data;
|
||||
unsigned char CommandBuffer[80];
|
||||
int Length;
|
||||
if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
|
||||
|
@ -6572,6 +6556,14 @@ static int DAC960_ProcWriteUserCommand(struct file *file,
|
|||
? Count : -EBUSY);
|
||||
}
|
||||
|
||||
static const struct file_operations dac960_user_command_proc_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dac960_user_command_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = dac960_user_command_proc_write,
|
||||
};
|
||||
|
||||
/*
|
||||
DAC960_CreateProcEntries creates the /proc/rd/... entries for the
|
||||
|
@ -6586,23 +6578,17 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
|
|||
|
||||
if (DAC960_ProcDirectoryEntry == NULL) {
|
||||
DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
|
||||
StatusProcEntry = create_proc_read_entry("status", 0,
|
||||
StatusProcEntry = proc_create("status", 0,
|
||||
DAC960_ProcDirectoryEntry,
|
||||
DAC960_ProcReadStatus, NULL);
|
||||
&dac960_proc_fops);
|
||||
}
|
||||
|
||||
sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
|
||||
ControllerProcEntry = proc_mkdir(Controller->ControllerName,
|
||||
DAC960_ProcDirectoryEntry);
|
||||
create_proc_read_entry("initial_status", 0, ControllerProcEntry,
|
||||
DAC960_ProcReadInitialStatus, Controller);
|
||||
create_proc_read_entry("current_status", 0, ControllerProcEntry,
|
||||
DAC960_ProcReadCurrentStatus, Controller);
|
||||
UserCommandProcEntry =
|
||||
create_proc_read_entry("user_command", S_IWUSR | S_IRUSR,
|
||||
ControllerProcEntry, DAC960_ProcReadUserCommand,
|
||||
Controller);
|
||||
UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand;
|
||||
proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
|
||||
proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
|
||||
UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
|
||||
Controller->ControllerProcEntry = ControllerProcEntry;
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,6 +2,7 @@
|
|||
#define CCISS_H
|
||||
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include "cciss_cmd.h"
|
||||
|
||||
|
@ -29,7 +30,7 @@ struct access_method {
|
|||
};
|
||||
typedef struct _drive_info_struct
|
||||
{
|
||||
__u32 LunID;
|
||||
unsigned char LunID[8];
|
||||
int usage_count;
|
||||
struct request_queue *queue;
|
||||
sector_t nr_blocks;
|
||||
|
@ -51,6 +52,7 @@ typedef struct _drive_info_struct
|
|||
char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
|
||||
char model[MODEL_LEN + 1]; /* SCSI model string */
|
||||
char rev[REV_LEN + 1]; /* SCSI revision string */
|
||||
char device_initialized; /* indicates whether dev is initialized */
|
||||
} drive_info_struct;
|
||||
|
||||
struct ctlr_info
|
||||
|
@ -86,7 +88,7 @@ struct ctlr_info
|
|||
BYTE cciss_read_capacity;
|
||||
|
||||
// information about each logical volume
|
||||
drive_info_struct drv[CISS_MAX_LUN];
|
||||
drive_info_struct *drv[CISS_MAX_LUN];
|
||||
|
||||
struct access_method access;
|
||||
|
||||
|
@ -108,6 +110,8 @@ struct ctlr_info
|
|||
int nr_frees;
|
||||
int busy_configuring;
|
||||
int busy_initializing;
|
||||
int busy_scanning;
|
||||
struct mutex busy_shutting_down;
|
||||
|
||||
/* This element holds the zero based queue number of the last
|
||||
* queue to be started. It is used for fairness.
|
||||
|
@ -122,8 +126,8 @@ struct ctlr_info
|
|||
/* and saved for later processing */
|
||||
#endif
|
||||
unsigned char alive;
|
||||
struct completion *rescan_wait;
|
||||
struct task_struct *cciss_scan_thread;
|
||||
struct list_head scan_list;
|
||||
struct completion scan_wait;
|
||||
struct device dev;
|
||||
};
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/blkpg.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -177,7 +178,6 @@ static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
|
|||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static void ida_procinit(int i);
|
||||
static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
|
||||
#else
|
||||
static void ida_procinit(int i) {}
|
||||
#endif
|
||||
|
@ -206,6 +206,7 @@ static const struct block_device_operations ida_fops = {
|
|||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
static struct proc_dir_entry *proc_array;
|
||||
static const struct file_operations ida_proc_fops;
|
||||
|
||||
/*
|
||||
* Get us a file in /proc/array that says something about each controller.
|
||||
|
@ -218,19 +219,16 @@ static void __init ida_procinit(int i)
|
|||
if (!proc_array) return;
|
||||
}
|
||||
|
||||
create_proc_read_entry(hba[i]->devname, 0, proc_array,
|
||||
ida_proc_get_info, hba[i]);
|
||||
proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Report information about this controller.
|
||||
*/
|
||||
static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
|
||||
static int ida_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
off_t pos = 0;
|
||||
off_t len = 0;
|
||||
int size, i, ctlr;
|
||||
ctlr_info_t *h = (ctlr_info_t*)data;
|
||||
int i, ctlr;
|
||||
ctlr_info_t *h = (ctlr_info_t*)m->private;
|
||||
drv_info_t *drv;
|
||||
#ifdef CPQ_PROC_PRINT_QUEUES
|
||||
cmdlist_t *c;
|
||||
|
@ -238,7 +236,7 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
|
|||
#endif
|
||||
|
||||
ctlr = h->ctlr;
|
||||
size = sprintf(buffer, "%s: Compaq %s Controller\n"
|
||||
seq_printf(m, "%s: Compaq %s Controller\n"
|
||||
" Board ID: 0x%08lx\n"
|
||||
" Firmware Revision: %c%c%c%c\n"
|
||||
" Controller Sig: 0x%08lx\n"
|
||||
|
@ -258,55 +256,54 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
|
|||
h->log_drives, h->phys_drives,
|
||||
h->Qdepth, h->maxQsinceinit);
|
||||
|
||||
pos += size; len += size;
|
||||
|
||||
size = sprintf(buffer+len, "Logical Drive Info:\n");
|
||||
pos += size; len += size;
|
||||
seq_puts(m, "Logical Drive Info:\n");
|
||||
|
||||
for(i=0; i<h->log_drives; i++) {
|
||||
drv = &h->drv[i];
|
||||
size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
|
||||
seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
|
||||
ctlr, i, drv->blk_size, drv->nr_blks);
|
||||
pos += size; len += size;
|
||||
}
|
||||
|
||||
#ifdef CPQ_PROC_PRINT_QUEUES
|
||||
spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
|
||||
size = sprintf(buffer+len, "\nCurrent Queues:\n");
|
||||
pos += size; len += size;
|
||||
seq_puts(m, "\nCurrent Queues:\n");
|
||||
|
||||
c = h->reqQ;
|
||||
size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
|
||||
seq_printf(m, "reqQ = %p", c);
|
||||
if (c) c=c->next;
|
||||
while(c && c != h->reqQ) {
|
||||
size = sprintf(buffer+len, "->%p", c);
|
||||
pos += size; len += size;
|
||||
seq_printf(m, "->%p", c);
|
||||
c=c->next;
|
||||
}
|
||||
|
||||
c = h->cmpQ;
|
||||
size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
|
||||
seq_printf(m, "\ncmpQ = %p", c);
|
||||
if (c) c=c->next;
|
||||
while(c && c != h->cmpQ) {
|
||||
size = sprintf(buffer+len, "->%p", c);
|
||||
pos += size; len += size;
|
||||
seq_printf(m, "->%p", c);
|
||||
c=c->next;
|
||||
}
|
||||
|
||||
size = sprintf(buffer+len, "\n"); pos += size; len += size;
|
||||
seq_putc(m, '\n');
|
||||
spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
|
||||
#endif
|
||||
size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
|
||||
seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
|
||||
h->nr_allocs, h->nr_frees);
|
||||
pos += size; len += size;
|
||||
|
||||
*eof = 1;
|
||||
*start = buffer+offset;
|
||||
len -= offset;
|
||||
if (len>length)
|
||||
len = length;
|
||||
return len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ida_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, ida_proc_show, PDE(inode)->data);
|
||||
}
|
||||
|
||||
static const struct file_operations ida_proc_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ida_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
module_param_array(eisa, int, NULL, 0);
|
||||
|
|
|
@ -130,7 +130,7 @@ struct mapped_device {
|
|||
/*
|
||||
* A list of ios that arrived while we were suspended.
|
||||
*/
|
||||
atomic_t pending[2];
|
||||
atomic_t pending;
|
||||
wait_queue_head_t wait;
|
||||
struct work_struct work;
|
||||
struct bio_list deferred;
|
||||
|
@ -453,14 +453,13 @@ static void start_io_acct(struct dm_io *io)
|
|||
{
|
||||
struct mapped_device *md = io->md;
|
||||
int cpu;
|
||||
int rw = bio_data_dir(io->bio);
|
||||
|
||||
io->start_time = jiffies;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part_round_stats(cpu, &dm_disk(md)->part0);
|
||||
part_stat_unlock();
|
||||
dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
|
||||
dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
|
||||
}
|
||||
|
||||
static void end_io_acct(struct dm_io *io)
|
||||
|
@ -480,9 +479,8 @@ static void end_io_acct(struct dm_io *io)
|
|||
* After this is decremented the bio must not be touched if it is
|
||||
* a barrier.
|
||||
*/
|
||||
dm_disk(md)->part0.in_flight[rw] = pending =
|
||||
atomic_dec_return(&md->pending[rw]);
|
||||
pending += atomic_read(&md->pending[rw^0x1]);
|
||||
dm_disk(md)->part0.in_flight = pending =
|
||||
atomic_dec_return(&md->pending);
|
||||
|
||||
/* nudge anyone waiting on suspend queue */
|
||||
if (!pending)
|
||||
|
@ -1787,8 +1785,7 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
if (!md->disk)
|
||||
goto bad_disk;
|
||||
|
||||
atomic_set(&md->pending[0], 0);
|
||||
atomic_set(&md->pending[1], 0);
|
||||
atomic_set(&md->pending, 0);
|
||||
init_waitqueue_head(&md->wait);
|
||||
INIT_WORK(&md->work, dm_wq_work);
|
||||
init_waitqueue_head(&md->eventq);
|
||||
|
@ -2091,8 +2088,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
|
|||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
} else if (!atomic_read(&md->pending[0]) &&
|
||||
!atomic_read(&md->pending[1]))
|
||||
} else if (!atomic_read(&md->pending))
|
||||
break;
|
||||
|
||||
if (interruptible == TASK_INTERRUPTIBLE &&
|
||||
|
|
|
@ -32,14 +32,6 @@ struct mtd_blkcore_priv {
|
|||
spinlock_t queue_lock;
|
||||
};
|
||||
|
||||
static int blktrans_discard_request(struct request_queue *q,
|
||||
struct request *req)
|
||||
{
|
||||
req->cmd_type = REQ_TYPE_LINUX_BLOCK;
|
||||
req->cmd[0] = REQ_LB_OP_DISCARD;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
||||
struct mtd_blktrans_dev *dev,
|
||||
struct request *req)
|
||||
|
@ -52,10 +44,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||
|
||||
buf = req->buffer;
|
||||
|
||||
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
|
||||
req->cmd[0] == REQ_LB_OP_DISCARD)
|
||||
return tr->discard(dev, block, nsect);
|
||||
|
||||
if (!blk_fs_request(req))
|
||||
return -EIO;
|
||||
|
||||
|
@ -63,6 +51,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||
get_capacity(req->rq_disk))
|
||||
return -EIO;
|
||||
|
||||
if (blk_discard_rq(req))
|
||||
return tr->discard(dev, block, nsect);
|
||||
|
||||
switch(rq_data_dir(req)) {
|
||||
case READ:
|
||||
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
||||
|
@ -380,8 +371,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
|
|||
tr->blkcore_priv->rq->queuedata = tr;
|
||||
blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
|
||||
if (tr->discard)
|
||||
blk_queue_set_discard(tr->blkcore_priv->rq,
|
||||
blktrans_discard_request);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
|
||||
tr->blkcore_priv->rq);
|
||||
|
||||
tr->blkshift = ffs(tr->blksize) - 1;
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ static int dst_request(struct request_queue *q, struct bio *bio)
|
|||
struct dst_node *n = q->queuedata;
|
||||
int err = -EIO;
|
||||
|
||||
if (bio_empty_barrier(bio) && !q->prepare_discard_fn) {
|
||||
if (bio_empty_barrier(bio) && !blk_queue_discard(q)) {
|
||||
/*
|
||||
* This is a dirty^Wnice hack, but if we complete this
|
||||
* operation with -EOPNOTSUPP like intended, XFS
|
||||
|
|
49
fs/bio.c
49
fs/bio.c
|
@ -249,6 +249,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
|
|||
|
||||
mempool_free(p, bs->bio_pool);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_free);
|
||||
|
||||
void bio_init(struct bio *bio)
|
||||
{
|
||||
|
@ -257,6 +258,7 @@ void bio_init(struct bio *bio)
|
|||
bio->bi_comp_cpu = -1;
|
||||
atomic_set(&bio->bi_cnt, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_init);
|
||||
|
||||
/**
|
||||
* bio_alloc_bioset - allocate a bio for I/O
|
||||
|
@ -311,6 +313,7 @@ err_free:
|
|||
mempool_free(p, bs->bio_pool);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_alloc_bioset);
|
||||
|
||||
static void bio_fs_destructor(struct bio *bio)
|
||||
{
|
||||
|
@ -337,6 +340,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
|
|||
|
||||
return bio;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_alloc);
|
||||
|
||||
static void bio_kmalloc_destructor(struct bio *bio)
|
||||
{
|
||||
|
@ -380,6 +384,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
|
|||
|
||||
return bio;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_kmalloc);
|
||||
|
||||
void zero_fill_bio(struct bio *bio)
|
||||
{
|
||||
|
@ -416,6 +421,7 @@ void bio_put(struct bio *bio)
|
|||
bio->bi_destructor(bio);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bio_put);
|
||||
|
||||
inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
|
@ -424,6 +430,7 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
|
|||
|
||||
return bio->bi_phys_segments;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_phys_segments);
|
||||
|
||||
/**
|
||||
* __bio_clone - clone a bio
|
||||
|
@ -451,6 +458,7 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
|
|||
bio->bi_size = bio_src->bi_size;
|
||||
bio->bi_idx = bio_src->bi_idx;
|
||||
}
|
||||
EXPORT_SYMBOL(__bio_clone);
|
||||
|
||||
/**
|
||||
* bio_clone - clone a bio
|
||||
|
@ -482,6 +490,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
|
|||
|
||||
return b;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_clone);
|
||||
|
||||
/**
|
||||
* bio_get_nr_vecs - return approx number of vecs
|
||||
|
@ -505,6 +514,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
|
|||
|
||||
return nr_pages;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_get_nr_vecs);
|
||||
|
||||
static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
|
||||
*page, unsigned int len, unsigned int offset,
|
||||
|
@ -635,6 +645,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
|
|||
return __bio_add_page(q, bio, page, len, offset,
|
||||
queue_max_hw_sectors(q));
|
||||
}
|
||||
EXPORT_SYMBOL(bio_add_pc_page);
|
||||
|
||||
/**
|
||||
* bio_add_page - attempt to add page to bio
|
||||
|
@ -655,6 +666,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
|
|||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
|
||||
}
|
||||
EXPORT_SYMBOL(bio_add_page);
|
||||
|
||||
struct bio_map_data {
|
||||
struct bio_vec *iovecs;
|
||||
|
@ -776,6 +788,7 @@ int bio_uncopy_user(struct bio *bio)
|
|||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_uncopy_user);
|
||||
|
||||
/**
|
||||
* bio_copy_user_iov - copy user data to bio
|
||||
|
@ -920,6 +933,7 @@ struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
|
|||
|
||||
return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_copy_user);
|
||||
|
||||
static struct bio *__bio_map_user_iov(struct request_queue *q,
|
||||
struct block_device *bdev,
|
||||
|
@ -1050,6 +1064,7 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
|
|||
|
||||
return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_map_user);
|
||||
|
||||
/**
|
||||
* bio_map_user_iov - map user sg_iovec table into bio
|
||||
|
@ -1117,13 +1132,13 @@ void bio_unmap_user(struct bio *bio)
|
|||
__bio_unmap_user(bio);
|
||||
bio_put(bio);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_unmap_user);
|
||||
|
||||
static void bio_map_kern_endio(struct bio *bio, int err)
|
||||
{
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
|
||||
static struct bio *__bio_map_kern(struct request_queue *q, void *data,
|
||||
unsigned int len, gfp_t gfp_mask)
|
||||
{
|
||||
|
@ -1189,6 +1204,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
|
|||
bio_put(bio);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_map_kern);
|
||||
|
||||
static void bio_copy_kern_endio(struct bio *bio, int err)
|
||||
{
|
||||
|
@ -1250,6 +1266,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
|
|||
|
||||
return bio;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_copy_kern);
|
||||
|
||||
/*
|
||||
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
|
||||
|
@ -1400,6 +1417,7 @@ void bio_endio(struct bio *bio, int error)
|
|||
if (bio->bi_end_io)
|
||||
bio->bi_end_io(bio, error);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_endio);
|
||||
|
||||
void bio_pair_release(struct bio_pair *bp)
|
||||
{
|
||||
|
@ -1410,6 +1428,7 @@ void bio_pair_release(struct bio_pair *bp)
|
|||
mempool_free(bp, bp->bio2.bi_private);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bio_pair_release);
|
||||
|
||||
static void bio_pair_end_1(struct bio *bi, int err)
|
||||
{
|
||||
|
@ -1477,6 +1496,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
|
|||
|
||||
return bp;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_split);
|
||||
|
||||
/**
|
||||
* bio_sector_offset - Find hardware sector offset in bio
|
||||
|
@ -1547,6 +1567,7 @@ void bioset_free(struct bio_set *bs)
|
|||
|
||||
kfree(bs);
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_free);
|
||||
|
||||
/**
|
||||
* bioset_create - Create a bio_set
|
||||
|
@ -1592,6 +1613,7 @@ bad:
|
|||
bioset_free(bs);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_create);
|
||||
|
||||
static void __init biovec_init_slabs(void)
|
||||
{
|
||||
|
@ -1636,29 +1658,4 @@ static int __init init_bio(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
subsys_initcall(init_bio);
|
||||
|
||||
EXPORT_SYMBOL(bio_alloc);
|
||||
EXPORT_SYMBOL(bio_kmalloc);
|
||||
EXPORT_SYMBOL(bio_put);
|
||||
EXPORT_SYMBOL(bio_free);
|
||||
EXPORT_SYMBOL(bio_endio);
|
||||
EXPORT_SYMBOL(bio_init);
|
||||
EXPORT_SYMBOL(__bio_clone);
|
||||
EXPORT_SYMBOL(bio_clone);
|
||||
EXPORT_SYMBOL(bio_phys_segments);
|
||||
EXPORT_SYMBOL(bio_add_page);
|
||||
EXPORT_SYMBOL(bio_add_pc_page);
|
||||
EXPORT_SYMBOL(bio_get_nr_vecs);
|
||||
EXPORT_SYMBOL(bio_map_user);
|
||||
EXPORT_SYMBOL(bio_unmap_user);
|
||||
EXPORT_SYMBOL(bio_map_kern);
|
||||
EXPORT_SYMBOL(bio_copy_kern);
|
||||
EXPORT_SYMBOL(bio_pair_release);
|
||||
EXPORT_SYMBOL(bio_split);
|
||||
EXPORT_SYMBOL(bio_copy_user);
|
||||
EXPORT_SYMBOL(bio_uncopy_user);
|
||||
EXPORT_SYMBOL(bioset_create);
|
||||
EXPORT_SYMBOL(bioset_free);
|
||||
EXPORT_SYMBOL(bio_alloc_bioset);
|
||||
|
|
|
@ -248,19 +248,11 @@ ssize_t part_stat_show(struct device *dev,
|
|||
part_stat_read(p, merges[WRITE]),
|
||||
(unsigned long long)part_stat_read(p, sectors[WRITE]),
|
||||
jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
|
||||
part_in_flight(p),
|
||||
p->in_flight,
|
||||
jiffies_to_msecs(part_stat_read(p, io_ticks)),
|
||||
jiffies_to_msecs(part_stat_read(p, time_in_queue)));
|
||||
}
|
||||
|
||||
ssize_t part_inflight_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct hd_struct *p = dev_to_part(dev);
|
||||
|
||||
return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
ssize_t part_fail_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
@ -289,7 +281,6 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
|
|||
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
|
||||
static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
|
||||
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
|
||||
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
static struct device_attribute dev_attr_fail =
|
||||
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
|
||||
|
@ -301,7 +292,6 @@ static struct attribute *part_attrs[] = {
|
|||
&dev_attr_size.attr,
|
||||
&dev_attr_alignment_offset.attr,
|
||||
&dev_attr_stat.attr,
|
||||
&dev_attr_inflight.attr,
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
&dev_attr_fail.attr,
|
||||
#endif
|
||||
|
|
|
@ -82,7 +82,6 @@ enum rq_cmd_type_bits {
|
|||
enum {
|
||||
REQ_LB_OP_EJECT = 0x40, /* eject request */
|
||||
REQ_LB_OP_FLUSH = 0x41, /* flush request */
|
||||
REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -261,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
|
|||
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
|
||||
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
|
||||
typedef void (unplug_fn) (struct request_queue *);
|
||||
typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
|
||||
|
||||
struct bio_vec;
|
||||
struct bvec_merge_data {
|
||||
|
@ -313,6 +311,7 @@ struct queue_limits {
|
|||
unsigned int alignment_offset;
|
||||
unsigned int io_min;
|
||||
unsigned int io_opt;
|
||||
unsigned int max_discard_sectors;
|
||||
|
||||
unsigned short logical_block_size;
|
||||
unsigned short max_hw_segments;
|
||||
|
@ -340,7 +339,6 @@ struct request_queue
|
|||
make_request_fn *make_request_fn;
|
||||
prep_rq_fn *prep_rq_fn;
|
||||
unplug_fn *unplug_fn;
|
||||
prepare_discard_fn *prepare_discard_fn;
|
||||
merge_bvec_fn *merge_bvec_fn;
|
||||
prepare_flush_fn *prepare_flush_fn;
|
||||
softirq_done_fn *softirq_done_fn;
|
||||
|
@ -460,6 +458,7 @@ struct request_queue
|
|||
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
||||
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
|
||||
#define QUEUE_FLAG_CQ 16 /* hardware does queuing */
|
||||
#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_CLUSTER) | \
|
||||
|
@ -591,6 +590,7 @@ enum {
|
|||
#define blk_queue_flushing(q) ((q)->ordseq)
|
||||
#define blk_queue_stackable(q) \
|
||||
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
|
||||
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
|
||||
|
||||
#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
|
||||
#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
|
@ -929,6 +929,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
|||
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||
unsigned int max_discard_sectors);
|
||||
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_alignment_offset(struct request_queue *q,
|
||||
|
@ -955,7 +957,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
|
|||
extern void blk_queue_dma_alignment(struct request_queue *, int);
|
||||
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
|
||||
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
|
||||
extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
|
||||
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
|
||||
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
|
||||
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
||||
|
@ -1080,25 +1081,37 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
|
|||
return q->limits.physical_block_size;
|
||||
}
|
||||
|
||||
static inline int bdev_physical_block_size(struct block_device *bdev)
|
||||
{
|
||||
return queue_physical_block_size(bdev_get_queue(bdev));
|
||||
}
|
||||
|
||||
static inline unsigned int queue_io_min(struct request_queue *q)
|
||||
{
|
||||
return q->limits.io_min;
|
||||
}
|
||||
|
||||
static inline int bdev_io_min(struct block_device *bdev)
|
||||
{
|
||||
return queue_io_min(bdev_get_queue(bdev));
|
||||
}
|
||||
|
||||
static inline unsigned int queue_io_opt(struct request_queue *q)
|
||||
{
|
||||
return q->limits.io_opt;
|
||||
}
|
||||
|
||||
static inline int bdev_io_opt(struct block_device *bdev)
|
||||
{
|
||||
return queue_io_opt(bdev_get_queue(bdev));
|
||||
}
|
||||
|
||||
static inline int queue_alignment_offset(struct request_queue *q)
|
||||
{
|
||||
if (q && q->limits.misaligned)
|
||||
if (q->limits.misaligned)
|
||||
return -1;
|
||||
|
||||
if (q && q->limits.alignment_offset)
|
||||
return q->limits.alignment_offset;
|
||||
|
||||
return 0;
|
||||
return q->limits.alignment_offset;
|
||||
}
|
||||
|
||||
static inline int queue_sector_alignment_offset(struct request_queue *q,
|
||||
|
@ -1108,6 +1121,19 @@ static inline int queue_sector_alignment_offset(struct request_queue *q,
|
|||
& (q->limits.io_min - 1);
|
||||
}
|
||||
|
||||
static inline int bdev_alignment_offset(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q->limits.misaligned)
|
||||
return -1;
|
||||
|
||||
if (bdev != bdev->bd_contains)
|
||||
return bdev->bd_part->alignment_offset;
|
||||
|
||||
return q->limits.alignment_offset;
|
||||
}
|
||||
|
||||
static inline int queue_dma_alignment(struct request_queue *q)
|
||||
{
|
||||
return q ? q->dma_alignment : 511;
|
||||
|
@ -1146,7 +1172,11 @@ static inline void put_dev_sector(Sector p)
|
|||
}
|
||||
|
||||
struct work_struct;
|
||||
struct delayed_work;
|
||||
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
|
||||
int kblockd_schedule_delayed_work(struct request_queue *q,
|
||||
struct delayed_work *work,
|
||||
unsigned long delay);
|
||||
|
||||
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
||||
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
|
||||
|
|
|
@ -198,6 +198,7 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
|||
char __user *arg);
|
||||
extern int blk_trace_startstop(struct request_queue *q, int start);
|
||||
extern int blk_trace_remove(struct request_queue *q);
|
||||
extern void blk_trace_remove_sysfs(struct device *dev);
|
||||
extern int blk_trace_init_sysfs(struct device *dev);
|
||||
|
||||
extern struct attribute_group blk_trace_attr_group;
|
||||
|
@ -211,6 +212,7 @@ extern struct attribute_group blk_trace_attr_group;
|
|||
# define blk_trace_startstop(q, start) (-ENOTTY)
|
||||
# define blk_trace_remove(q) (-ENOTTY)
|
||||
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
|
||||
# define blk_trace_remove_sysfs(dev) do { } while (0)
|
||||
static inline int blk_trace_init_sysfs(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
@ -300,6 +300,10 @@ struct inodes_stat_t {
|
|||
#define BLKTRACESTOP _IO(0x12,117)
|
||||
#define BLKTRACETEARDOWN _IO(0x12,118)
|
||||
#define BLKDISCARD _IO(0x12,119)
|
||||
#define BLKIOMIN _IO(0x12,120)
|
||||
#define BLKIOOPT _IO(0x12,121)
|
||||
#define BLKALIGNOFF _IO(0x12,122)
|
||||
#define BLKPBSZGET _IO(0x12,123)
|
||||
|
||||
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
|
||||
#define FIBMAP _IO(0x00,1) /* bmap access */
|
||||
|
|
|
@ -98,7 +98,7 @@ struct hd_struct {
|
|||
int make_it_fail;
|
||||
#endif
|
||||
unsigned long stamp;
|
||||
int in_flight[2];
|
||||
int in_flight;
|
||||
#ifdef CONFIG_SMP
|
||||
struct disk_stats *dkstats;
|
||||
#else
|
||||
|
@ -322,23 +322,18 @@ static inline void free_part_stats(struct hd_struct *part)
|
|||
#define part_stat_sub(cpu, gendiskp, field, subnd) \
|
||||
part_stat_add(cpu, gendiskp, field, -subnd)
|
||||
|
||||
static inline void part_inc_in_flight(struct hd_struct *part, int rw)
|
||||
static inline void part_inc_in_flight(struct hd_struct *part)
|
||||
{
|
||||
part->in_flight[rw]++;
|
||||
part->in_flight++;
|
||||
if (part->partno)
|
||||
part_to_disk(part)->part0.in_flight[rw]++;
|
||||
part_to_disk(part)->part0.in_flight++;
|
||||
}
|
||||
|
||||
static inline void part_dec_in_flight(struct hd_struct *part, int rw)
|
||||
static inline void part_dec_in_flight(struct hd_struct *part)
|
||||
{
|
||||
part->in_flight[rw]--;
|
||||
part->in_flight--;
|
||||
if (part->partno)
|
||||
part_to_disk(part)->part0.in_flight[rw]--;
|
||||
}
|
||||
|
||||
static inline int part_in_flight(struct hd_struct *part)
|
||||
{
|
||||
return part->in_flight[0] + part->in_flight[1];
|
||||
part_to_disk(part)->part0.in_flight--;
|
||||
}
|
||||
|
||||
/* block/blk-core.c */
|
||||
|
@ -551,8 +546,6 @@ extern ssize_t part_size_show(struct device *dev,
|
|||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t part_stat_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t part_inflight_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
extern ssize_t part_fail_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
|
|
|
@ -488,6 +488,39 @@ TRACE_EVENT(block_remap,
|
|||
(unsigned long long)__entry->old_sector)
|
||||
);
|
||||
|
||||
TRACE_EVENT(block_rq_remap,
|
||||
|
||||
TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
|
||||
sector_t from),
|
||||
|
||||
TP_ARGS(q, rq, dev, from),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( sector_t, sector )
|
||||
__field( unsigned int, nr_sector )
|
||||
__field( dev_t, old_dev )
|
||||
__field( sector_t, old_sector )
|
||||
__array( char, rwbs, 6 )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = disk_devt(rq->rq_disk);
|
||||
__entry->sector = blk_rq_pos(rq);
|
||||
__entry->nr_sector = blk_rq_sectors(rq);
|
||||
__entry->old_dev = dev;
|
||||
__entry->old_sector = from;
|
||||
blk_fill_rwbs_rq(__entry->rwbs, rq);
|
||||
),
|
||||
|
||||
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
|
||||
(unsigned long long)__entry->sector,
|
||||
__entry->nr_sector,
|
||||
MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
|
||||
(unsigned long long)__entry->old_sector)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_BLOCK_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -855,6 +855,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
|
|||
sizeof(r), &r);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_add_trace_rq_remap - Add a trace for a request-remap operation
|
||||
* @q: queue the io is for
|
||||
* @rq: the source request
|
||||
* @dev: target device
|
||||
* @from: source sector
|
||||
*
|
||||
* Description:
|
||||
* Device mapper remaps request to other devices.
|
||||
* Add a trace for that action.
|
||||
*
|
||||
**/
|
||||
static void blk_add_trace_rq_remap(struct request_queue *q,
|
||||
struct request *rq, dev_t dev,
|
||||
sector_t from)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
struct blk_io_trace_remap r;
|
||||
|
||||
if (likely(!bt))
|
||||
return;
|
||||
|
||||
r.device_from = cpu_to_be32(dev);
|
||||
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
|
||||
r.sector_from = cpu_to_be64(from);
|
||||
|
||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
||||
rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
|
||||
sizeof(r), &r);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_add_driver_data - Add binary message with driver-specific data
|
||||
* @q: queue the io is for
|
||||
|
@ -922,10 +953,13 @@ static void blk_register_tracepoints(void)
|
|||
WARN_ON(ret);
|
||||
ret = register_trace_block_remap(blk_add_trace_remap);
|
||||
WARN_ON(ret);
|
||||
ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
|
||||
WARN_ON(ret);
|
||||
}
|
||||
|
||||
static void blk_unregister_tracepoints(void)
|
||||
{
|
||||
unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
|
||||
unregister_trace_block_remap(blk_add_trace_remap);
|
||||
unregister_trace_block_split(blk_add_trace_split);
|
||||
unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
|
||||
|
@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev)
|
|||
return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
|
||||
}
|
||||
|
||||
void blk_trace_remove_sysfs(struct device *dev)
|
||||
{
|
||||
sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_IO_TRACE */
|
||||
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
|
|
|
@ -1974,12 +1974,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|||
goto bad_swap;
|
||||
}
|
||||
|
||||
if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
|
||||
p->flags |= SWP_SOLIDSTATE;
|
||||
p->cluster_next = 1 + (random32() % p->highest_bit);
|
||||
if (p->bdev) {
|
||||
if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
|
||||
p->flags |= SWP_SOLIDSTATE;
|
||||
p->cluster_next = 1 + (random32() % p->highest_bit);
|
||||
}
|
||||
if (discard_swap(p) == 0)
|
||||
p->flags |= SWP_DISCARDABLE;
|
||||
}
|
||||
if (discard_swap(p) == 0)
|
||||
p->flags |= SWP_DISCARDABLE;
|
||||
|
||||
mutex_lock(&swapon_mutex);
|
||||
spin_lock(&swap_lock);
|
||||
|
|
Loading…
Reference in a new issue