mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
block: implement blk_rq_pos/[cur_]sectors() and convert obvious ones
Implement accessors - blk_rq_pos(), blk_rq_sectors() and blk_rq_cur_sectors() which return rq->hard_sector, rq->hard_nr_sectors and rq->hard_cur_sectors respectively and convert direct references of the said fields to the accessors. This is in preparation of request data length handling cleanup. Geert : suggested adding const to struct request * parameter to accessors Sergei : spotted error in patch description [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Acked-by: Stephen Rothwell <sfr@canb.auug.org.au> Tested-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Grant Likely <grant.likely@secretlab.ca> Ackec-by: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
c3a4d78c58
commit
5b93629b45
12 changed files with 42 additions and 25 deletions
|
@ -163,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
|
|||
* For an empty barrier, there's no actual BAR request, which
|
||||
* in turn makes POSTFLUSH unnecessary. Mask them off.
|
||||
*/
|
||||
if (!rq->hard_nr_sectors) {
|
||||
if (!blk_rq_sectors(rq)) {
|
||||
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
|
||||
QUEUE_ORDERED_DO_POSTFLUSH);
|
||||
/*
|
||||
|
|
|
@ -1683,7 +1683,7 @@ static void blk_account_io_done(struct request *req)
|
|||
unsigned int blk_rq_bytes(struct request *rq)
|
||||
{
|
||||
if (blk_fs_request(rq))
|
||||
return rq->hard_nr_sectors << 9;
|
||||
return blk_rq_sectors(rq) << 9;
|
||||
|
||||
return rq->data_len;
|
||||
}
|
||||
|
|
|
@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
|
|||
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
|
||||
cfqd->rq_in_driver);
|
||||
|
||||
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
|
||||
cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
|
||||
}
|
||||
|
||||
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
|
||||
|
|
|
@ -136,7 +136,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
|
|||
dev_dbg(&dev->sbd.core,
|
||||
"%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
|
||||
__func__, __LINE__, op, n, req->nr_sectors,
|
||||
req->hard_nr_sectors);
|
||||
blk_rq_sectors(req));
|
||||
#endif
|
||||
|
||||
start_sector = req->sector * priv->blocking_factor;
|
||||
|
|
|
@ -368,12 +368,12 @@ static void do_viodasd_request(struct request_queue *q)
|
|||
blkdev_dequeue_request(req);
|
||||
/* check that request contains a valid command */
|
||||
if (!blk_fs_request(req)) {
|
||||
viodasd_end_request(req, -EIO, req->hard_nr_sectors);
|
||||
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
|
||||
continue;
|
||||
}
|
||||
/* Try sending the request */
|
||||
if (send_request(req) != 0)
|
||||
viodasd_end_request(req, -EIO, req->hard_nr_sectors);
|
||||
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -590,7 +590,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
|
|||
err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
|
||||
printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
|
||||
event->xRc, bevent->sub_result, err->msg);
|
||||
num_sect = req->hard_nr_sectors;
|
||||
num_sect = blk_rq_sectors(req);
|
||||
}
|
||||
qlock = req->q->queue_lock;
|
||||
spin_lock_irqsave(qlock, irq_flags);
|
||||
|
|
|
@ -645,8 +645,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
|
|||
|
||||
/* Okay, it's a data request, set it up for transfer */
|
||||
dev_dbg(ace->dev,
|
||||
"request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n",
|
||||
(unsigned long long) req->sector, req->hard_nr_sectors,
|
||||
"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
|
||||
(unsigned long long) req->sector, blk_rq_sectors(req),
|
||||
req->current_nr_sectors, rq_data_dir(req));
|
||||
|
||||
ace->req = req;
|
||||
|
@ -654,7 +654,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
|
|||
ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
|
||||
ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
|
||||
|
||||
count = req->hard_nr_sectors;
|
||||
count = blk_rq_sectors(req);
|
||||
if (rq_data_dir(req)) {
|
||||
/* Kick off write request */
|
||||
dev_dbg(ace->dev, "write data\n");
|
||||
|
@ -719,8 +719,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
|
|||
/* bio finished; is there another one? */
|
||||
if (__blk_end_request(ace->req, 0,
|
||||
blk_rq_cur_bytes(ace->req))) {
|
||||
/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
|
||||
* ace->req->hard_nr_sectors,
|
||||
/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
|
||||
* blk_rq_sectors(ace->req),
|
||||
* ace->req->current_nr_sectors);
|
||||
*/
|
||||
ace->data_ptr = ace->req->buffer;
|
||||
|
|
|
@ -730,7 +730,7 @@ out_end:
|
|||
if (blk_pc_request(rq))
|
||||
nsectors = (rq->data_len + 511) >> 9;
|
||||
else
|
||||
nsectors = rq->hard_nr_sectors;
|
||||
nsectors = blk_rq_sectors(rq);
|
||||
|
||||
if (nsectors == 0)
|
||||
nsectors = 1;
|
||||
|
@ -875,7 +875,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
|
|||
|
||||
return ide_issue_pc(drive, &cmd);
|
||||
out_end:
|
||||
nsectors = rq->hard_nr_sectors;
|
||||
nsectors = blk_rq_sectors(rq);
|
||||
|
||||
if (nsectors == 0)
|
||||
nsectors = 1;
|
||||
|
@ -1359,8 +1359,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
|
|||
static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
int hard_sect = queue_hardsect_size(q);
|
||||
long block = (long)rq->hard_sector / (hard_sect >> 9);
|
||||
unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
|
||||
long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
|
||||
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
|
||||
|
||||
memset(rq->cmd, 0, BLK_MAX_CDB);
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ unsigned int ide_rq_bytes(struct request *rq)
|
|||
if (blk_pc_request(rq))
|
||||
return rq->data_len;
|
||||
else
|
||||
return rq->hard_cur_sectors << 9;
|
||||
return blk_rq_cur_sectors(rq) << 9;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ide_rq_bytes);
|
||||
|
||||
|
@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
|
|||
* and complete the whole request right now
|
||||
*/
|
||||
if (blk_noretry_request(rq) && error <= 0)
|
||||
nr_bytes = rq->hard_nr_sectors << 9;
|
||||
nr_bytes = blk_rq_sectors(rq) << 9;
|
||||
|
||||
rc = ide_end_rq(drive, rq, error, nr_bytes);
|
||||
if (rc == 0)
|
||||
|
|
|
@ -427,7 +427,7 @@ static void i2o_block_end_request(struct request *req, int error,
|
|||
unsigned long flags;
|
||||
|
||||
if (blk_end_request(req, error, nr_bytes)) {
|
||||
int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
|
||||
int leftover = (blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT);
|
||||
|
||||
if (blk_pc_request(req))
|
||||
leftover = req->data_len;
|
||||
|
|
|
@ -546,7 +546,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
|
|||
* to queue the remainder of them.
|
||||
*/
|
||||
if (blk_end_request(req, error, bytes)) {
|
||||
int leftover = (req->hard_nr_sectors << 9);
|
||||
int leftover = blk_rq_sectors(req) << 9;
|
||||
|
||||
if (blk_pc_request(req))
|
||||
leftover = req->resid_len;
|
||||
|
|
|
@ -832,13 +832,30 @@ static inline void blk_run_address_space(struct address_space *mapping)
|
|||
extern void blkdev_dequeue_request(struct request *req);
|
||||
|
||||
/*
|
||||
* blk_end_request() takes bytes instead of sectors as a complete size.
|
||||
* blk_rq_bytes() returns bytes left to complete in the entire request.
|
||||
* blk_rq_cur_bytes() returns bytes left to complete in the current segment.
|
||||
* blk_rq_pos() : the current sector
|
||||
* blk_rq_bytes() : bytes left in the entire request
|
||||
* blk_rq_cur_bytes() : bytes left in the current segment
|
||||
* blk_rq_sectors() : sectors left in the entire request
|
||||
* blk_rq_cur_sectors() : sectors left in the current segment
|
||||
*/
|
||||
static inline sector_t blk_rq_pos(const struct request *rq)
|
||||
{
|
||||
return rq->hard_sector;
|
||||
}
|
||||
|
||||
extern unsigned int blk_rq_bytes(struct request *rq);
|
||||
extern unsigned int blk_rq_cur_bytes(struct request *rq);
|
||||
|
||||
static inline unsigned int blk_rq_sectors(const struct request *rq)
|
||||
{
|
||||
return rq->hard_nr_sectors;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
|
||||
{
|
||||
return rq->hard_cur_sectors;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request completion related functions.
|
||||
*
|
||||
|
|
|
@ -646,7 +646,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
|||
rq->cmd_len, rq->cmd);
|
||||
} else {
|
||||
what |= BLK_TC_ACT(BLK_TC_FS);
|
||||
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
|
||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9,
|
||||
rw, what, rq->errors, 0, NULL);
|
||||
}
|
||||
}
|
||||
|
@ -857,7 +857,7 @@ void blk_add_driver_data(struct request_queue *q,
|
|||
__blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
|
||||
rq->errors, len, data);
|
||||
else
|
||||
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
|
||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9,
|
||||
0, BLK_TA_DRV_DATA, rq->errors, len, data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_add_driver_data);
|
||||
|
|
Loading…
Reference in a new issue