mirror of
https://github.com/adulau/aha.git
synced 2025-01-16 13:03:34 +00:00
block: convert to pos and nr_sectors accessors
With recent cleanups, there is no place where low level driver directly manipulates request fields. This means that the 'hard' request fields always equal the !hard fields. Convert all rq->sectors, nr_sectors and current_nr_sectors references to accessors. While at it, drop superflous blk_rq_pos() < 0 test in swim.c. [ Impact: use pos and nr_sectors accessors ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Tested-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Grant Likely <grant.likely@secretlab.ca> Tested-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Mike Miller <mike.miller@hp.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Eric Moore <Eric.Moore@lsi.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Paul Clements <paul.clements@steeleye.com> Cc: Tim Waugh <tim@cyberelk.net> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Dario Ballabio <ballabio_dario@emc.com> Cc: David S. Miller <davem@davemloft.net> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: unsik Kim <donari75@gmail.com> Cc: Laurent Vivier <Laurent@lvivier.info> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
5b93629b45
commit
83096ebf12
54 changed files with 292 additions and 279 deletions
|
@ -1239,7 +1239,7 @@ static void do_ubd_request(struct request_queue *q)
|
|||
}
|
||||
|
||||
req = dev->request;
|
||||
sector = req->sector;
|
||||
sector = blk_rq_pos(req);
|
||||
while(dev->start_sg < dev->end_sg){
|
||||
struct scatterlist *sg = &dev->sg[dev->start_sg];
|
||||
|
||||
|
|
|
@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
|
|||
data_dir = rq_is_sync(rq1);
|
||||
|
||||
last = ad->last_sector[data_dir];
|
||||
s1 = rq1->sector;
|
||||
s2 = rq2->sector;
|
||||
s1 = blk_rq_pos(rq1);
|
||||
s2 = blk_rq_pos(rq2);
|
||||
|
||||
BUG_ON(data_dir != rq_is_sync(rq2));
|
||||
|
||||
|
@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
|
|||
as_update_thinktime(ad, aic, thinktime);
|
||||
|
||||
/* Calculate read -> read seek distance */
|
||||
if (aic->last_request_pos < rq->sector)
|
||||
seek_dist = rq->sector - aic->last_request_pos;
|
||||
if (aic->last_request_pos < blk_rq_pos(rq))
|
||||
seek_dist = blk_rq_pos(rq) -
|
||||
aic->last_request_pos;
|
||||
else
|
||||
seek_dist = aic->last_request_pos - rq->sector;
|
||||
seek_dist = aic->last_request_pos -
|
||||
blk_rq_pos(rq);
|
||||
as_update_seekdist(ad, aic, seek_dist);
|
||||
}
|
||||
aic->last_request_pos = rq->sector + rq->nr_sectors;
|
||||
aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
|
||||
set_bit(AS_TASK_IOSTARTED, &aic->state);
|
||||
spin_unlock(&aic->lock);
|
||||
}
|
||||
|
@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
|
|||
{
|
||||
unsigned long delay; /* jiffies */
|
||||
sector_t last = ad->last_sector[ad->batch_data_dir];
|
||||
sector_t next = rq->sector;
|
||||
sector_t next = blk_rq_pos(rq);
|
||||
sector_t delta; /* acceptable close offset (in sectors) */
|
||||
sector_t s;
|
||||
|
||||
|
@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
|
|||
* This has to be set in order to be correctly updated by
|
||||
* as_find_next_rq
|
||||
*/
|
||||
ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
|
||||
ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
|
||||
|
||||
if (data_dir == BLK_RW_SYNC) {
|
||||
struct io_context *ioc = RQ_IOC(rq);
|
||||
|
|
|
@ -324,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
|
|||
/*
|
||||
* The driver must store the error location in ->bi_sector, if
|
||||
* it supports it. For non-stacked drivers, this should be copied
|
||||
* from rq->sector.
|
||||
* from blk_rq_pos(rq).
|
||||
*/
|
||||
if (error_sector)
|
||||
*error_sector = bio->bi_sector;
|
||||
|
|
|
@ -72,7 +72,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
|
|||
return;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
|
||||
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
|
||||
|
||||
if (!new_io)
|
||||
part_stat_inc(cpu, part, merges[rw]);
|
||||
|
@ -185,10 +185,9 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
|
|||
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
|
||||
rq->cmd_flags);
|
||||
|
||||
printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
|
||||
(unsigned long long)rq->sector,
|
||||
rq->nr_sectors,
|
||||
rq->current_nr_sectors);
|
||||
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
|
||||
(unsigned long long)blk_rq_pos(rq),
|
||||
blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
|
||||
printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
|
||||
rq->bio, rq->biotail,
|
||||
rq->buffer, rq->data_len);
|
||||
|
@ -1557,7 +1556,7 @@ EXPORT_SYMBOL(submit_bio);
|
|||
*/
|
||||
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (rq->nr_sectors > q->max_sectors ||
|
||||
if (blk_rq_sectors(rq) > q->max_sectors ||
|
||||
rq->data_len > q->max_hw_sectors << 9) {
|
||||
printk(KERN_ERR "%s: over max size limit.\n", __func__);
|
||||
return -EIO;
|
||||
|
@ -1645,7 +1644,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
|
|||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(req->rq_disk, req->sector);
|
||||
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
||||
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
@ -1665,7 +1664,7 @@ static void blk_account_io_done(struct request *req)
|
|||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(req->rq_disk, req->sector);
|
||||
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
||||
|
||||
part_stat_inc(cpu, part, ios[rw]);
|
||||
part_stat_add(cpu, part, ticks[rw], duration);
|
||||
|
@ -1846,7 +1845,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
|||
if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
|
||||
printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
|
||||
req->rq_disk ? req->rq_disk->disk_name : "?",
|
||||
(unsigned long long)req->sector);
|
||||
(unsigned long long)blk_rq_pos(req));
|
||||
}
|
||||
|
||||
blk_account_io_completion(req, nr_bytes);
|
||||
|
|
|
@ -259,7 +259,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
|||
else
|
||||
max_sectors = q->max_sectors;
|
||||
|
||||
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
|
||||
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
|
||||
req->cmd_flags |= REQ_NOMERGE;
|
||||
if (req == q->last_merge)
|
||||
q->last_merge = NULL;
|
||||
|
@ -284,7 +284,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
|||
max_sectors = q->max_sectors;
|
||||
|
||||
|
||||
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
|
||||
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
|
||||
req->cmd_flags |= REQ_NOMERGE;
|
||||
if (req == q->last_merge)
|
||||
q->last_merge = NULL;
|
||||
|
@ -315,7 +315,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
|||
/*
|
||||
* Will it become too large?
|
||||
*/
|
||||
if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
|
||||
if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
|
||||
return 0;
|
||||
|
||||
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
|
||||
|
@ -345,7 +345,7 @@ static void blk_account_io_merge(struct request *req)
|
|||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(req->rq_disk, req->sector);
|
||||
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
||||
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part);
|
||||
|
@ -366,7 +366,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
|
|||
/*
|
||||
* not contiguous
|
||||
*/
|
||||
if (req->sector + req->nr_sectors != next->sector)
|
||||
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
|
||||
return 0;
|
||||
|
||||
if (rq_data_dir(req) != rq_data_dir(next)
|
||||
|
|
|
@ -349,8 +349,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
|
|||
else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
|
||||
return rq2;
|
||||
|
||||
s1 = rq1->sector;
|
||||
s2 = rq2->sector;
|
||||
s1 = blk_rq_pos(rq1);
|
||||
s2 = blk_rq_pos(rq2);
|
||||
|
||||
last = cfqd->last_position;
|
||||
|
||||
|
@ -949,10 +949,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
|
|||
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
|
||||
struct request *rq)
|
||||
{
|
||||
if (rq->sector >= cfqd->last_position)
|
||||
return rq->sector - cfqd->last_position;
|
||||
if (blk_rq_pos(rq) >= cfqd->last_position)
|
||||
return blk_rq_pos(rq) - cfqd->last_position;
|
||||
else
|
||||
return cfqd->last_position - rq->sector;
|
||||
return cfqd->last_position - blk_rq_pos(rq);
|
||||
}
|
||||
|
||||
#define CIC_SEEK_THR 8 * 1024
|
||||
|
@ -1918,10 +1918,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
|
|||
|
||||
if (!cic->last_request_pos)
|
||||
sdist = 0;
|
||||
else if (cic->last_request_pos < rq->sector)
|
||||
sdist = rq->sector - cic->last_request_pos;
|
||||
else if (cic->last_request_pos < blk_rq_pos(rq))
|
||||
sdist = blk_rq_pos(rq) - cic->last_request_pos;
|
||||
else
|
||||
sdist = cic->last_request_pos - rq->sector;
|
||||
sdist = cic->last_request_pos - blk_rq_pos(rq);
|
||||
|
||||
/*
|
||||
* Don't allow the seek distance to get too large from the
|
||||
|
@ -2071,7 +2071,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
cfq_update_io_seektime(cfqd, cic, rq);
|
||||
cfq_update_idle_window(cfqd, cfqq, cic);
|
||||
|
||||
cic->last_request_pos = rq->sector + rq->nr_sectors;
|
||||
cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
|
||||
|
||||
if (cfqq == cfqd->active_queue) {
|
||||
/*
|
||||
|
|
|
@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
|||
|
||||
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
|
||||
if (__rq) {
|
||||
BUG_ON(sector != __rq->sector);
|
||||
BUG_ON(sector != blk_rq_pos(__rq));
|
||||
|
||||
if (elv_rq_merge_ok(__rq, bio)) {
|
||||
ret = ELEVATOR_FRONT_MERGE;
|
||||
|
|
|
@ -52,7 +52,7 @@ static const int elv_hash_shift = 6;
|
|||
#define ELV_HASH_FN(sec) \
|
||||
(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
|
||||
#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
|
||||
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
|
||||
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
|
||||
|
||||
DEFINE_TRACE(block_rq_insert);
|
||||
DEFINE_TRACE(block_rq_issue);
|
||||
|
@ -119,9 +119,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
|
|||
* we can merge and sequence is ok, check if it's possible
|
||||
*/
|
||||
if (elv_rq_merge_ok(__rq, bio)) {
|
||||
if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
|
||||
if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
|
||||
ret = ELEVATOR_BACK_MERGE;
|
||||
else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
|
||||
else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
|
||||
ret = ELEVATOR_FRONT_MERGE;
|
||||
}
|
||||
|
||||
|
@ -370,9 +370,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
|
|||
parent = *p;
|
||||
__rq = rb_entry(parent, struct request, rb_node);
|
||||
|
||||
if (rq->sector < __rq->sector)
|
||||
if (blk_rq_pos(rq) < blk_rq_pos(__rq))
|
||||
p = &(*p)->rb_left;
|
||||
else if (rq->sector > __rq->sector)
|
||||
else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
|
||||
p = &(*p)->rb_right;
|
||||
else
|
||||
return __rq;
|
||||
|
@ -400,9 +400,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
|
|||
while (n) {
|
||||
rq = rb_entry(n, struct request, rb_node);
|
||||
|
||||
if (sector < rq->sector)
|
||||
if (sector < blk_rq_pos(rq))
|
||||
n = n->rb_left;
|
||||
else if (sector > rq->sector)
|
||||
else if (sector > blk_rq_pos(rq))
|
||||
n = n->rb_right;
|
||||
else
|
||||
return rq;
|
||||
|
@ -441,14 +441,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
|
|||
break;
|
||||
if (pos->cmd_flags & stop_flags)
|
||||
break;
|
||||
if (rq->sector >= boundary) {
|
||||
if (pos->sector < boundary)
|
||||
if (blk_rq_pos(rq) >= boundary) {
|
||||
if (blk_rq_pos(pos) < boundary)
|
||||
continue;
|
||||
} else {
|
||||
if (pos->sector >= boundary)
|
||||
if (blk_rq_pos(pos) >= boundary)
|
||||
break;
|
||||
}
|
||||
if (rq->sector >= pos->sector)
|
||||
if (blk_rq_pos(rq) >= blk_rq_pos(pos))
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -3338,8 +3338,8 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
|
|||
}
|
||||
Command->Completion = Request->end_io_data;
|
||||
Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
|
||||
Command->BlockNumber = Request->sector;
|
||||
Command->BlockCount = Request->nr_sectors;
|
||||
Command->BlockNumber = blk_rq_pos(Request);
|
||||
Command->BlockCount = blk_rq_sectors(Request);
|
||||
Command->Request = Request;
|
||||
blkdev_dequeue_request(Request);
|
||||
Command->SegmentCount = blk_rq_map_sg(req_q,
|
||||
|
@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
|
|||
* successfully as possible.
|
||||
*/
|
||||
Command->SegmentCount = 1;
|
||||
Command->BlockNumber = Request->sector;
|
||||
Command->BlockNumber = blk_rq_pos(Request);
|
||||
Command->BlockCount = 1;
|
||||
DAC960_QueueReadWriteCommand(Command);
|
||||
return;
|
||||
|
|
|
@ -1351,13 +1351,13 @@ static void redo_fd_request(void)
|
|||
drive = floppy - unit;
|
||||
|
||||
/* Here someone could investigate to be more efficient */
|
||||
for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) {
|
||||
for (cnt = 0; cnt < blk_rq_cur_sectors(CURRENT); cnt++) {
|
||||
#ifdef DEBUG
|
||||
printk("fd: sector %ld + %d requested for %s\n",
|
||||
CURRENT->sector,cnt,
|
||||
blk_rq_pos(CURRENT), cnt,
|
||||
(rq_data_dir(CURRENT) == READ) ? "read" : "write");
|
||||
#endif
|
||||
block = CURRENT->sector + cnt;
|
||||
block = blk_rq_pos(CURRENT) + cnt;
|
||||
if ((int)block > floppy->blocks) {
|
||||
__blk_end_request_cur(CURRENT, -EIO);
|
||||
goto repeat;
|
||||
|
|
|
@ -725,7 +725,7 @@ static void do_fd_action( int drive )
|
|||
if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
|
||||
if (ReqCmd == READ) {
|
||||
copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
|
||||
if (++ReqCnt < CURRENT->current_nr_sectors) {
|
||||
if (++ReqCnt < blk_rq_cur_sectors(CURRENT)) {
|
||||
/* read next sector */
|
||||
setup_req_params( drive );
|
||||
goto repeat;
|
||||
|
@ -1130,7 +1130,7 @@ static void fd_rwsec_done1(int status)
|
|||
}
|
||||
}
|
||||
|
||||
if (++ReqCnt < CURRENT->current_nr_sectors) {
|
||||
if (++ReqCnt < blk_rq_cur_sectors(CURRENT)) {
|
||||
/* read next sector */
|
||||
setup_req_params( SelectedDrive );
|
||||
do_fd_action( SelectedDrive );
|
||||
|
@ -1394,7 +1394,7 @@ static void redo_fd_request(void)
|
|||
|
||||
DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
|
||||
CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
|
||||
CURRENT ? CURRENT->sector : 0 ));
|
||||
CURRENT ? blk_rq_pos(CURRENT) : 0 ));
|
||||
|
||||
IsFormatting = 0;
|
||||
|
||||
|
@ -1440,7 +1440,7 @@ repeat:
|
|||
UD.autoprobe = 0;
|
||||
}
|
||||
|
||||
if (CURRENT->sector + 1 > UDT->blocks) {
|
||||
if (blk_rq_pos(CURRENT) + 1 > UDT->blocks) {
|
||||
__blk_end_request_cur(CURRENT, -EIO);
|
||||
goto repeat;
|
||||
}
|
||||
|
@ -1450,7 +1450,7 @@ repeat:
|
|||
|
||||
ReqCnt = 0;
|
||||
ReqCmd = rq_data_dir(CURRENT);
|
||||
ReqBlock = CURRENT->sector;
|
||||
ReqBlock = blk_rq_pos(CURRENT);
|
||||
ReqBuffer = CURRENT->buffer;
|
||||
setup_req_params( drive );
|
||||
do_fd_action( drive );
|
||||
|
|
|
@ -2835,10 +2835,10 @@ static void do_cciss_request(struct request_queue *q)
|
|||
c->Request.Timeout = 0; // Don't time out
|
||||
c->Request.CDB[0] =
|
||||
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
|
||||
start_blk = creq->sector;
|
||||
start_blk = blk_rq_pos(creq);
|
||||
#ifdef CCISS_DEBUG
|
||||
printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
|
||||
(int)creq->nr_sectors);
|
||||
printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
|
||||
(int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
|
||||
#endif /* CCISS_DEBUG */
|
||||
|
||||
sg_init_table(tmp_sg, MAXSGENTRIES);
|
||||
|
@ -2864,8 +2864,8 @@ static void do_cciss_request(struct request_queue *q)
|
|||
h->maxSG = seg;
|
||||
|
||||
#ifdef CCISS_DEBUG
|
||||
printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
|
||||
creq->nr_sectors, seg);
|
||||
printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
|
||||
blk_rq_sectors(creq), seg);
|
||||
#endif /* CCISS_DEBUG */
|
||||
|
||||
c->Header.SGList = c->Header.SGTotal = seg;
|
||||
|
@ -2877,8 +2877,8 @@ static void do_cciss_request(struct request_queue *q)
|
|||
c->Request.CDB[4] = (start_blk >> 8) & 0xff;
|
||||
c->Request.CDB[5] = start_blk & 0xff;
|
||||
c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
|
||||
c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
|
||||
c->Request.CDB[8] = creq->nr_sectors & 0xff;
|
||||
c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
|
||||
c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
|
||||
c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
|
||||
} else {
|
||||
u32 upper32 = upper_32_bits(start_blk);
|
||||
|
@ -2893,10 +2893,10 @@ static void do_cciss_request(struct request_queue *q)
|
|||
c->Request.CDB[7]= (start_blk >> 16) & 0xff;
|
||||
c->Request.CDB[8]= (start_blk >> 8) & 0xff;
|
||||
c->Request.CDB[9]= start_blk & 0xff;
|
||||
c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
|
||||
c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
|
||||
c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
|
||||
c->Request.CDB[13]= creq->nr_sectors & 0xff;
|
||||
c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
|
||||
c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
|
||||
c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
|
||||
c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
|
||||
c->Request.CDB[14] = c->Request.CDB[15] = 0;
|
||||
}
|
||||
} else if (blk_pc_request(creq)) {
|
||||
|
|
|
@ -919,10 +919,11 @@ queue_next:
|
|||
c->hdr.size = sizeof(rblk_t) >> 2;
|
||||
c->size += sizeof(rblk_t);
|
||||
|
||||
c->req.hdr.blk = creq->sector;
|
||||
c->req.hdr.blk = blk_rq_pos(creq);
|
||||
c->rq = creq;
|
||||
DBGPX(
|
||||
printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
|
||||
printk("sector=%d, nr_sectors=%u\n",
|
||||
blk_rq_pos(creq), blk_rq_sectors(creq));
|
||||
);
|
||||
sg_init_table(tmp_sg, SG_MAX);
|
||||
seg = blk_rq_map_sg(q, creq, tmp_sg);
|
||||
|
@ -940,9 +941,9 @@ DBGPX(
|
|||
tmp_sg[i].offset,
|
||||
tmp_sg[i].length, dir);
|
||||
}
|
||||
DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
|
||||
DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
|
||||
c->req.hdr.sg_cnt = seg;
|
||||
c->req.hdr.blk_cnt = creq->nr_sectors;
|
||||
c->req.hdr.blk_cnt = blk_rq_sectors(creq);
|
||||
c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
|
||||
c->type = CMD_RWREQ;
|
||||
|
||||
|
|
|
@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
|
|||
|
||||
/* current_count_sectors can be zero if transfer failed */
|
||||
if (error)
|
||||
nr_sectors = req->current_nr_sectors;
|
||||
nr_sectors = blk_rq_cur_sectors(req);
|
||||
if (__blk_end_request(req, error, nr_sectors << 9))
|
||||
return;
|
||||
|
||||
|
@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
|
|||
if (uptodate) {
|
||||
/* maintain values for invalidation on geometry
|
||||
* change */
|
||||
block = current_count_sectors + req->sector;
|
||||
block = current_count_sectors + blk_rq_pos(req);
|
||||
INFBOUND(DRS->maxblock, block);
|
||||
if (block > _floppy->sect)
|
||||
DRS->maxtrack = 1;
|
||||
|
@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
|
|||
/* record write error information */
|
||||
DRWE->write_errors++;
|
||||
if (DRWE->write_errors == 1) {
|
||||
DRWE->first_error_sector = req->sector;
|
||||
DRWE->first_error_sector = blk_rq_pos(req);
|
||||
DRWE->first_error_generation = DRS->generation;
|
||||
}
|
||||
DRWE->last_error_sector = req->sector;
|
||||
DRWE->last_error_sector = blk_rq_pos(req);
|
||||
DRWE->last_error_generation = DRS->generation;
|
||||
}
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
@ -2503,24 +2503,24 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
|
|||
|
||||
max_sector = transfer_size(ssize,
|
||||
min(max_sector, max_sector_2),
|
||||
current_req->nr_sectors);
|
||||
blk_rq_sectors(current_req));
|
||||
|
||||
if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
|
||||
buffer_max > fsector_t + current_req->nr_sectors)
|
||||
buffer_max > fsector_t + blk_rq_sectors(current_req))
|
||||
current_count_sectors = min_t(int, buffer_max - fsector_t,
|
||||
current_req->nr_sectors);
|
||||
blk_rq_sectors(current_req));
|
||||
|
||||
remaining = current_count_sectors << 9;
|
||||
#ifdef FLOPPY_SANITY_CHECK
|
||||
if ((remaining >> 9) > current_req->nr_sectors &&
|
||||
if ((remaining >> 9) > blk_rq_sectors(current_req) &&
|
||||
CT(COMMAND) == FD_WRITE) {
|
||||
DPRINT("in copy buffer\n");
|
||||
printk("current_count_sectors=%ld\n", current_count_sectors);
|
||||
printk("remaining=%d\n", remaining >> 9);
|
||||
printk("current_req->nr_sectors=%ld\n",
|
||||
current_req->nr_sectors);
|
||||
printk("current_req->nr_sectors=%u\n",
|
||||
blk_rq_sectors(current_req));
|
||||
printk("current_req->current_nr_sectors=%u\n",
|
||||
current_req->current_nr_sectors);
|
||||
blk_rq_cur_sectors(current_req));
|
||||
printk("max_sector=%d\n", max_sector);
|
||||
printk("ssize=%d\n", ssize);
|
||||
}
|
||||
|
@ -2530,7 +2530,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
|
|||
|
||||
dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
|
||||
|
||||
size = current_req->current_nr_sectors << 9;
|
||||
size = blk_rq_cur_sectors(current_req) << 9;
|
||||
|
||||
rq_for_each_segment(bv, current_req, iter) {
|
||||
if (!remaining)
|
||||
|
@ -2648,10 +2648,10 @@ static int make_raw_rw_request(void)
|
|||
|
||||
max_sector = _floppy->sect * _floppy->head;
|
||||
|
||||
TRACK = (int)current_req->sector / max_sector;
|
||||
fsector_t = (int)current_req->sector % max_sector;
|
||||
TRACK = (int)blk_rq_pos(current_req) / max_sector;
|
||||
fsector_t = (int)blk_rq_pos(current_req) % max_sector;
|
||||
if (_floppy->track && TRACK >= _floppy->track) {
|
||||
if (current_req->current_nr_sectors & 1) {
|
||||
if (blk_rq_cur_sectors(current_req) & 1) {
|
||||
current_count_sectors = 1;
|
||||
return 1;
|
||||
} else
|
||||
|
@ -2669,7 +2669,7 @@ static int make_raw_rw_request(void)
|
|||
if (fsector_t >= max_sector) {
|
||||
current_count_sectors =
|
||||
min_t(int, _floppy->sect - fsector_t,
|
||||
current_req->nr_sectors);
|
||||
blk_rq_sectors(current_req));
|
||||
return 1;
|
||||
}
|
||||
SIZECODE = 2;
|
||||
|
@ -2720,7 +2720,7 @@ static int make_raw_rw_request(void)
|
|||
|
||||
in_sector_offset = (fsector_t % _floppy->sect) % ssize;
|
||||
aligned_sector_t = fsector_t - in_sector_offset;
|
||||
max_size = current_req->nr_sectors;
|
||||
max_size = blk_rq_sectors(current_req);
|
||||
if ((raw_cmd->track == buffer_track) &&
|
||||
(current_drive == buffer_drive) &&
|
||||
(fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
|
||||
|
@ -2729,10 +2729,10 @@ static int make_raw_rw_request(void)
|
|||
copy_buffer(1, max_sector, buffer_max);
|
||||
return 1;
|
||||
}
|
||||
} else if (in_sector_offset || current_req->nr_sectors < ssize) {
|
||||
} else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
|
||||
if (CT(COMMAND) == FD_WRITE) {
|
||||
if (fsector_t + current_req->nr_sectors > ssize &&
|
||||
fsector_t + current_req->nr_sectors < ssize + ssize)
|
||||
if (fsector_t + blk_rq_sectors(current_req) > ssize &&
|
||||
fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
|
||||
max_size = ssize + ssize;
|
||||
else
|
||||
max_size = ssize;
|
||||
|
@ -2776,7 +2776,7 @@ static int make_raw_rw_request(void)
|
|||
(indirect * 2 > direct * 3 &&
|
||||
*errors < DP->max_errors.read_track && ((!probing
|
||||
|| (DP->read_track & (1 << DRS->probed_format)))))) {
|
||||
max_size = current_req->nr_sectors;
|
||||
max_size = blk_rq_sectors(current_req);
|
||||
} else {
|
||||
raw_cmd->kernel_data = current_req->buffer;
|
||||
raw_cmd->length = current_count_sectors << 9;
|
||||
|
@ -2801,7 +2801,7 @@ static int make_raw_rw_request(void)
|
|||
fsector_t > buffer_max ||
|
||||
fsector_t < buffer_min ||
|
||||
((CT(COMMAND) == FD_READ ||
|
||||
(!in_sector_offset && current_req->nr_sectors >= ssize)) &&
|
||||
(!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
|
||||
max_sector > 2 * max_buffer_sectors + buffer_min &&
|
||||
max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
|
||||
/* not enough space */
|
||||
|
@ -2879,8 +2879,8 @@ static int make_raw_rw_request(void)
|
|||
printk("write\n");
|
||||
return 0;
|
||||
}
|
||||
} else if (raw_cmd->length > current_req->nr_sectors << 9 ||
|
||||
current_count_sectors > current_req->nr_sectors) {
|
||||
} else if (raw_cmd->length > blk_rq_sectors(current_req) << 9 ||
|
||||
current_count_sectors > blk_rq_sectors(current_req)) {
|
||||
DPRINT("buffer overrun in direct transfer\n");
|
||||
return 0;
|
||||
} else if (raw_cmd->length < current_count_sectors << 9) {
|
||||
|
@ -2990,8 +2990,9 @@ static void do_fd_request(struct request_queue * q)
|
|||
if (usage_count == 0) {
|
||||
printk("warning: usage count=0, current_req=%p exiting\n",
|
||||
current_req);
|
||||
printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector,
|
||||
current_req->cmd_type, current_req->cmd_flags);
|
||||
printk("sect=%ld type=%x flags=%x\n",
|
||||
(long)blk_rq_pos(current_req), current_req->cmd_type,
|
||||
current_req->cmd_flags);
|
||||
return;
|
||||
}
|
||||
if (test_bit(0, &fdc_busy)) {
|
||||
|
|
|
@ -228,7 +228,7 @@ static void dump_status(const char *msg, unsigned int stat)
|
|||
printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
|
||||
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
|
||||
if (CURRENT)
|
||||
printk(", sector=%ld", CURRENT->sector);
|
||||
printk(", sector=%ld", blk_rq_pos(CURRENT));
|
||||
}
|
||||
printk("\n");
|
||||
}
|
||||
|
@ -457,9 +457,9 @@ ok_to_read:
|
|||
req = CURRENT;
|
||||
insw(HD_DATA, req->buffer, 256);
|
||||
#ifdef DEBUG
|
||||
printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
|
||||
req->rq_disk->disk_name, req->sector + 1, req->nr_sectors - 1,
|
||||
req->buffer+512);
|
||||
printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
|
||||
req->rq_disk->disk_name, blk_rq_pos(req) + 1,
|
||||
blk_rq_sectors(req) - 1, req->buffer+512);
|
||||
#endif
|
||||
if (__blk_end_request(req, 0, 512)) {
|
||||
SET_HANDLER(&read_intr);
|
||||
|
@ -485,7 +485,7 @@ static void write_intr(void)
|
|||
continue;
|
||||
if (!OK_STATUS(i))
|
||||
break;
|
||||
if ((req->nr_sectors <= 1) || (i & DRQ_STAT))
|
||||
if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
|
||||
goto ok_to_write;
|
||||
} while (--retries > 0);
|
||||
dump_status("write_intr", i);
|
||||
|
@ -589,8 +589,8 @@ repeat:
|
|||
return;
|
||||
}
|
||||
disk = req->rq_disk->private_data;
|
||||
block = req->sector;
|
||||
nsect = req->nr_sectors;
|
||||
block = blk_rq_pos(req);
|
||||
nsect = blk_rq_sectors(req);
|
||||
if (block >= get_capacity(req->rq_disk) ||
|
||||
((block+nsect) > get_capacity(req->rq_disk))) {
|
||||
printk("%s: bad access: block=%d, count=%d\n",
|
||||
|
|
|
@ -220,7 +220,8 @@ static void mg_dump_status(const char *msg, unsigned int stat,
|
|||
if (host->breq) {
|
||||
req = elv_next_request(host->breq);
|
||||
if (req)
|
||||
printk(", sector=%u", (u32)req->sector);
|
||||
printk(", sector=%u",
|
||||
(unsigned int)blk_rq_pos(req));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -493,12 +494,12 @@ static void mg_read(struct request *req)
|
|||
u32 j;
|
||||
struct mg_host *host = req->rq_disk->private_data;
|
||||
|
||||
if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
|
||||
MG_ERR_NONE)
|
||||
if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
|
||||
MG_CMD_RD, NULL) != MG_ERR_NONE)
|
||||
mg_bad_rw_intr(host);
|
||||
|
||||
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
|
||||
req->nr_sectors, req->sector, req->buffer);
|
||||
blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
|
||||
|
||||
do {
|
||||
u16 *buff = (u16 *)req->buffer;
|
||||
|
@ -522,14 +523,14 @@ static void mg_write(struct request *req)
|
|||
u32 j;
|
||||
struct mg_host *host = req->rq_disk->private_data;
|
||||
|
||||
if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
|
||||
MG_ERR_NONE) {
|
||||
if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
|
||||
MG_CMD_WR, NULL) != MG_ERR_NONE) {
|
||||
mg_bad_rw_intr(host);
|
||||
return;
|
||||
}
|
||||
|
||||
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
|
||||
req->nr_sectors, req->sector, req->buffer);
|
||||
blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
|
||||
|
||||
do {
|
||||
u16 *buff = (u16 *)req->buffer;
|
||||
|
@ -579,7 +580,7 @@ ok_to_read:
|
|||
(i << 1));
|
||||
|
||||
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
|
||||
req->sector, req->nr_sectors - 1, req->buffer);
|
||||
blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
|
||||
|
||||
/* send read confirm */
|
||||
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
|
||||
|
@ -609,7 +610,7 @@ static void mg_write_intr(struct mg_host *host)
|
|||
break;
|
||||
if (!MG_READY_OK(i))
|
||||
break;
|
||||
if ((req->nr_sectors <= 1) || (i & ATA_DRQ))
|
||||
if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
|
||||
goto ok_to_write;
|
||||
} while (0);
|
||||
mg_dump_status("mg_write_intr", i, host);
|
||||
|
@ -627,7 +628,7 @@ ok_to_write:
|
|||
buff++;
|
||||
}
|
||||
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
|
||||
req->sector, req->nr_sectors, req->buffer);
|
||||
blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
|
||||
host->mg_do_intr = mg_write_intr;
|
||||
mod_timer(&host->timer, jiffies + 3 * HZ);
|
||||
}
|
||||
|
@ -749,9 +750,9 @@ static void mg_request(struct request_queue *q)
|
|||
|
||||
del_timer(&host->timer);
|
||||
|
||||
sect_num = req->sector;
|
||||
sect_num = blk_rq_pos(req);
|
||||
/* deal whole segments */
|
||||
sect_cnt = req->nr_sectors;
|
||||
sect_cnt = blk_rq_sectors(req);
|
||||
|
||||
/* sanity check */
|
||||
if (sect_num >= get_capacity(req->rq_disk) ||
|
||||
|
|
|
@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
|
|||
req, error ? "failed" : "done");
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
__blk_end_request(req, error, req->nr_sectors << 9);
|
||||
__blk_end_request(req, error, blk_rq_sectors(req) << 9);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
|
|||
{
|
||||
int result, flags;
|
||||
struct nbd_request request;
|
||||
unsigned long size = req->nr_sectors << 9;
|
||||
unsigned long size = blk_rq_sectors(req) << 9;
|
||||
|
||||
request.magic = htonl(NBD_REQUEST_MAGIC);
|
||||
request.type = htonl(nbd_cmd(req));
|
||||
request.from = cpu_to_be64((u64) req->sector << 9);
|
||||
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
|
||||
request.len = htonl(size);
|
||||
memcpy(request.handle, &req, sizeof(req));
|
||||
|
||||
dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
|
||||
dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
|
||||
lo->disk->disk_name, req,
|
||||
nbdcmd_to_ascii(nbd_cmd(req)),
|
||||
(unsigned long long)req->sector << 9,
|
||||
req->nr_sectors << 9);
|
||||
(unsigned long long)blk_rq_pos(req) << 9,
|
||||
blk_rq_sectors(req) << 9);
|
||||
result = sock_xmit(lo, 1, &request, sizeof(request),
|
||||
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
|
||||
if (result <= 0) {
|
||||
|
|
|
@ -728,8 +728,8 @@ static void do_pcd_request(struct request_queue * q)
|
|||
if (cd != pcd_current)
|
||||
pcd_bufblk = -1;
|
||||
pcd_current = cd;
|
||||
pcd_sector = pcd_req->sector;
|
||||
pcd_count = pcd_req->current_nr_sectors;
|
||||
pcd_sector = blk_rq_pos(pcd_req);
|
||||
pcd_count = blk_rq_cur_sectors(pcd_req);
|
||||
pcd_buf = pcd_req->buffer;
|
||||
pcd_busy = 1;
|
||||
ps_set_intr(do_pcd_read, NULL, 0, nice);
|
||||
|
|
|
@ -444,11 +444,11 @@ static enum action do_pd_io_start(void)
|
|||
|
||||
pd_cmd = rq_data_dir(pd_req);
|
||||
if (pd_cmd == READ || pd_cmd == WRITE) {
|
||||
pd_block = pd_req->sector;
|
||||
pd_count = pd_req->current_nr_sectors;
|
||||
pd_block = blk_rq_pos(pd_req);
|
||||
pd_count = blk_rq_cur_sectors(pd_req);
|
||||
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
|
||||
return Fail;
|
||||
pd_run = pd_req->nr_sectors;
|
||||
pd_run = blk_rq_sectors(pd_req);
|
||||
pd_buf = pd_req->buffer;
|
||||
pd_retries = 0;
|
||||
if (pd_cmd == READ)
|
||||
|
@ -479,7 +479,7 @@ static int pd_next_buf(void)
|
|||
return 0;
|
||||
spin_lock_irqsave(&pd_lock, saved_flags);
|
||||
__blk_end_request_cur(pd_req, 0);
|
||||
pd_count = pd_req->current_nr_sectors;
|
||||
pd_count = blk_rq_cur_sectors(pd_req);
|
||||
pd_buf = pd_req->buffer;
|
||||
spin_unlock_irqrestore(&pd_lock, saved_flags);
|
||||
return 0;
|
||||
|
|
|
@ -768,9 +768,9 @@ repeat:
|
|||
return;
|
||||
|
||||
pf_current = pf_req->rq_disk->private_data;
|
||||
pf_block = pf_req->sector;
|
||||
pf_run = pf_req->nr_sectors;
|
||||
pf_count = pf_req->current_nr_sectors;
|
||||
pf_block = blk_rq_pos(pf_req);
|
||||
pf_run = blk_rq_sectors(pf_req);
|
||||
pf_count = blk_rq_cur_sectors(pf_req);
|
||||
|
||||
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
|
||||
pf_end_request(-EIO);
|
||||
|
@ -810,7 +810,7 @@ static int pf_next_buf(void)
|
|||
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
|
||||
if (!pf_req)
|
||||
return 1;
|
||||
pf_count = pf_req->current_nr_sectors;
|
||||
pf_count = blk_rq_cur_sectors(pf_req);
|
||||
pf_buf = pf_req->buffer;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
|
|||
rq_for_each_segment(bv, req, iter)
|
||||
n++;
|
||||
dev_dbg(&dev->sbd.core,
|
||||
"%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
|
||||
__func__, __LINE__, op, n, req->nr_sectors,
|
||||
blk_rq_sectors(req));
|
||||
"%s:%u: %s req has %u bvecs for %u sectors\n",
|
||||
__func__, __LINE__, op, n, blk_rq_sectors(req));
|
||||
#endif
|
||||
|
||||
start_sector = req->sector * priv->blocking_factor;
|
||||
sectors = req->nr_sectors * priv->blocking_factor;
|
||||
start_sector = blk_rq_pos(req) * priv->blocking_factor;
|
||||
sectors = blk_rq_sectors(req) * priv->blocking_factor;
|
||||
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
|
||||
__func__, __LINE__, op, sectors, start_sector);
|
||||
|
||||
|
|
|
@ -416,7 +416,7 @@ static int __send_request(struct request *req)
|
|||
desc->slice = 0;
|
||||
}
|
||||
desc->status = ~0;
|
||||
desc->offset = (req->sector << 9) / port->vdisk_block_size;
|
||||
desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
|
||||
desc->size = len;
|
||||
desc->ncookies = err;
|
||||
|
||||
|
|
|
@ -531,7 +531,7 @@ static void redo_fd_request(struct request_queue *q)
|
|||
while ((req = elv_next_request(q))) {
|
||||
|
||||
fs = req->rq_disk->private_data;
|
||||
if (req->sector < 0 || req->sector >= fs->total_secs) {
|
||||
if (blk_rq_pos(req) >= fs->total_secs) {
|
||||
__blk_end_request_cur(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
@ -551,8 +551,8 @@ static void redo_fd_request(struct request_queue *q)
|
|||
__blk_end_request_cur(req, -EIO);
|
||||
break;
|
||||
case READ:
|
||||
if (floppy_read_sectors(fs, req->sector,
|
||||
req->current_nr_sectors,
|
||||
if (floppy_read_sectors(fs, blk_rq_pos(req),
|
||||
blk_rq_cur_sectors(req),
|
||||
req->buffer)) {
|
||||
__blk_end_request_cur(req, -EIO);
|
||||
continue;
|
||||
|
|
|
@ -312,14 +312,14 @@ static void start_request(struct floppy_state *fs)
|
|||
}
|
||||
while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
|
||||
#if 0
|
||||
printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
|
||||
printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
|
||||
req->rq_disk->disk_name, req->cmd,
|
||||
(long)req->sector, req->nr_sectors, req->buffer);
|
||||
printk(" errors=%d current_nr_sectors=%ld\n",
|
||||
req->errors, req->current_nr_sectors);
|
||||
(long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
|
||||
printk(" errors=%d current_nr_sectors=%u\n",
|
||||
req->errors, blk_rq_cur_sectors(req));
|
||||
#endif
|
||||
|
||||
if (req->sector >= fs->total_secs) {
|
||||
if (blk_rq_pos(req) >= fs->total_secs) {
|
||||
__blk_end_request_cur(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
@ -337,13 +337,14 @@ static void start_request(struct floppy_state *fs)
|
|||
}
|
||||
}
|
||||
|
||||
/* Do not remove the cast. req->sector is now a sector_t and
|
||||
* can be 64 bits, but it will never go past 32 bits for this
|
||||
* driver anyway, so we can safely cast it down and not have
|
||||
* to do a 64/32 division
|
||||
/* Do not remove the cast. blk_rq_pos(req) is now a
|
||||
* sector_t and can be 64 bits, but it will never go
|
||||
* past 32 bits for this driver anyway, so we can
|
||||
* safely cast it down and not have to do a 64/32
|
||||
* division
|
||||
*/
|
||||
fs->req_cyl = ((long)req->sector) / fs->secpercyl;
|
||||
x = ((long)req->sector) % fs->secpercyl;
|
||||
fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
|
||||
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
|
||||
fs->head = x / fs->secpertrack;
|
||||
fs->req_sector = x % fs->secpertrack + 1;
|
||||
fd_req = req;
|
||||
|
@ -420,7 +421,7 @@ static inline void setup_transfer(struct floppy_state *fs)
|
|||
struct dbdma_cmd *cp = fs->dma_cmd;
|
||||
struct dbdma_regs __iomem *dr = fs->dma;
|
||||
|
||||
if (fd_req->current_nr_sectors <= 0) {
|
||||
if (blk_rq_cur_sectors(fd_req) <= 0) {
|
||||
printk(KERN_ERR "swim3: transfer 0 sectors?\n");
|
||||
return;
|
||||
}
|
||||
|
@ -428,8 +429,8 @@ static inline void setup_transfer(struct floppy_state *fs)
|
|||
n = 1;
|
||||
else {
|
||||
n = fs->secpertrack - fs->req_sector + 1;
|
||||
if (n > fd_req->current_nr_sectors)
|
||||
n = fd_req->current_nr_sectors;
|
||||
if (n > blk_rq_cur_sectors(fd_req))
|
||||
n = blk_rq_cur_sectors(fd_req);
|
||||
}
|
||||
fs->scount = n;
|
||||
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
|
||||
|
@ -600,7 +601,8 @@ static void xfer_timeout(unsigned long data)
|
|||
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
|
||||
out_8(&sw->select, RELAX);
|
||||
printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
|
||||
(rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
|
||||
(rq_data_dir(fd_req)==WRITE? "writ": "read"),
|
||||
(long)blk_rq_pos(fd_req));
|
||||
__blk_end_request_cur(fd_req, -EIO);
|
||||
fs->state = idle;
|
||||
start_request(fs);
|
||||
|
@ -714,7 +716,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
|
|||
} else {
|
||||
printk("swim3: error %sing block %ld (err=%x)\n",
|
||||
rq_data_dir(fd_req) == WRITE? "writ": "read",
|
||||
(long)fd_req->sector, err);
|
||||
(long)blk_rq_pos(fd_req), err);
|
||||
__blk_end_request_cur(fd_req, -EIO);
|
||||
fs->state = idle;
|
||||
}
|
||||
|
|
|
@ -903,10 +903,10 @@ queue_one_request:
|
|||
msg->sg_count = n_elem;
|
||||
msg->sg_type = SGT_32BIT;
|
||||
msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
|
||||
msg->lba = cpu_to_le32(rq->sector & 0xffffffff);
|
||||
tmp = (rq->sector >> 16) >> 16;
|
||||
msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
|
||||
tmp = (blk_rq_pos(rq) >> 16) >> 16;
|
||||
msg->lba_high = cpu_to_le16( (u16) tmp );
|
||||
msg->lba_count = cpu_to_le16(rq->nr_sectors);
|
||||
msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
|
||||
|
||||
msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
|
||||
for (i = 0; i < n_elem; i++) {
|
||||
|
|
|
@ -726,8 +726,8 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
|
|||
* The call to blk_queue_hardsect_size() guarantees that request
|
||||
* is aligned, but it is given in terms of 512 byte units, always.
|
||||
*/
|
||||
block = rq->sector >> lun->capacity.bshift;
|
||||
nblks = rq->nr_sectors >> lun->capacity.bshift;
|
||||
block = blk_rq_pos(rq) >> lun->capacity.bshift;
|
||||
nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
|
||||
|
||||
cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
|
||||
/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
|
||||
|
@ -739,7 +739,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
|
|||
cmd->cdb[8] = nblks;
|
||||
cmd->cdb_len = 10;
|
||||
|
||||
cmd->len = rq->nr_sectors * 512;
|
||||
cmd->len = blk_rq_sectors(rq) * 512;
|
||||
}
|
||||
|
||||
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
|
||||
|
|
|
@ -252,7 +252,7 @@ static int send_request(struct request *req)
|
|||
struct viodasd_device *d;
|
||||
unsigned long flags;
|
||||
|
||||
start = (u64)req->sector << 9;
|
||||
start = (u64)blk_rq_pos(req) << 9;
|
||||
|
||||
if (rq_data_dir(req) == READ) {
|
||||
direction = DMA_FROM_DEVICE;
|
||||
|
|
|
@ -85,7 +85,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
|
|||
vbr->req = req;
|
||||
if (blk_fs_request(vbr->req)) {
|
||||
vbr->out_hdr.type = 0;
|
||||
vbr->out_hdr.sector = vbr->req->sector;
|
||||
vbr->out_hdr.sector = blk_rq_pos(vbr->req);
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
} else if (blk_pc_request(vbr->req)) {
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
|
||||
|
|