mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
[BLOCK] add @uptodate to end_that_request_last() and @error to rq_end_io_fn()
add @uptodate argument to end_that_request_last() and @error to rq_end_io_fn(). there's no generic way to pass error code to request completion function, making generic error handling of non-fs request difficult (rq->errors is driver-specific and each driver uses it differently). this patch adds @uptodate to end_that_request_last() and @error to rq_end_io_fn(). for fs requests, this doesn't really matter, so just using the same uptodate argument used in the last call to end_that_request_first() should suffice. imho, this can also help the generic command-carrying request jens is working on. Signed-off-by: tejun heo <htejun@gmail.com> Signed-Off-By: Jens Axboe <axboe@suse.de>
This commit is contained in:
parent
64100099ed
commit
8ffdc6550c
21 changed files with 42 additions and 34 deletions
|
@ -498,7 +498,7 @@ struct request *elv_next_request(request_queue_t *q)
|
|||
blkdev_dequeue_request(rq);
|
||||
rq->flags |= REQ_QUIET;
|
||||
end_that_request_chunk(rq, 0, nr_bytes);
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, 0);
|
||||
} else {
|
||||
printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
|
||||
ret);
|
||||
|
|
|
@ -344,7 +344,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
|
|||
/*
|
||||
* Cache flushing for ordered writes handling
|
||||
*/
|
||||
static void blk_pre_flush_end_io(struct request *flush_rq)
|
||||
static void blk_pre_flush_end_io(struct request *flush_rq, int error)
|
||||
{
|
||||
struct request *rq = flush_rq->end_io_data;
|
||||
request_queue_t *q = rq->q;
|
||||
|
@ -362,7 +362,7 @@ static void blk_pre_flush_end_io(struct request *flush_rq)
|
|||
}
|
||||
}
|
||||
|
||||
static void blk_post_flush_end_io(struct request *flush_rq)
|
||||
static void blk_post_flush_end_io(struct request *flush_rq, int error)
|
||||
{
|
||||
struct request *rq = flush_rq->end_io_data;
|
||||
request_queue_t *q = rq->q;
|
||||
|
@ -2317,7 +2317,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
|
|||
*/
|
||||
void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
|
||||
struct request *rq, int at_head,
|
||||
void (*done)(struct request *))
|
||||
rq_end_io_fn *done)
|
||||
{
|
||||
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
||||
|
||||
|
@ -2521,7 +2521,7 @@ EXPORT_SYMBOL(blk_put_request);
|
|||
* blk_end_sync_rq - executes a completion event on a request
|
||||
* @rq: request to complete
|
||||
*/
|
||||
void blk_end_sync_rq(struct request *rq)
|
||||
void blk_end_sync_rq(struct request *rq, int error)
|
||||
{
|
||||
struct completion *waiting = rq->waiting;
|
||||
|
||||
|
@ -3183,9 +3183,17 @@ EXPORT_SYMBOL(end_that_request_chunk);
|
|||
/*
|
||||
* queue lock must be held
|
||||
*/
|
||||
void end_that_request_last(struct request *req)
|
||||
void end_that_request_last(struct request *req, int uptodate)
|
||||
{
|
||||
struct gendisk *disk = req->rq_disk;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* extend uptodate bool to allow < 0 value to be direct io error
|
||||
*/
|
||||
error = 0;
|
||||
if (end_io_error(uptodate))
|
||||
error = !uptodate ? -EIO : uptodate;
|
||||
|
||||
if (unlikely(laptop_mode) && blk_fs_request(req))
|
||||
laptop_io_completion();
|
||||
|
@ -3200,7 +3208,7 @@ void end_that_request_last(struct request *req)
|
|||
disk->in_flight--;
|
||||
}
|
||||
if (req->end_io)
|
||||
req->end_io(req);
|
||||
req->end_io(req, error);
|
||||
else
|
||||
__blk_put_request(req->q, req);
|
||||
}
|
||||
|
@ -3212,7 +3220,7 @@ void end_request(struct request *req, int uptodate)
|
|||
if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
|
||||
add_disk_randomness(req->rq_disk);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3471,7 +3471,7 @@ static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
|
|||
|
||||
if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
|
||||
|
||||
end_that_request_last(Request);
|
||||
end_that_request_last(Request, UpToDate);
|
||||
|
||||
if (Command->Completion) {
|
||||
complete(Command->Completion);
|
||||
|
|
|
@ -2310,7 +2310,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
|
|||
printk("Done with %p\n", cmd->rq);
|
||||
#endif /* CCISS_DEBUG */
|
||||
|
||||
end_that_request_last(cmd->rq);
|
||||
end_that_request_last(cmd->rq, status ? 1 : -EIO);
|
||||
cmd_free(h,cmd,1);
|
||||
}
|
||||
|
||||
|
|
|
@ -1036,7 +1036,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
|
|||
complete_buffers(cmd->rq->bio, ok);
|
||||
|
||||
DBGPX(printk("Done with %p\n", cmd->rq););
|
||||
end_that_request_last(cmd->rq);
|
||||
end_that_request_last(cmd->rq, ok ? 1 : -EIO);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2301,7 +2301,7 @@ static void floppy_end_request(struct request *req, int uptodate)
|
|||
add_disk_randomness(req->rq_disk);
|
||||
floppy_off((long)req->rq_disk->private_data);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
|
||||
/* We're done with the request */
|
||||
current_req = NULL;
|
||||
|
|
|
@ -136,7 +136,7 @@ static void nbd_end_request(struct request *req)
|
|||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
|
|
|
@ -770,7 +770,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
|
|||
rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
|
||||
assert(rc == 0);
|
||||
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
|
||||
rc = carm_put_request(host, crq);
|
||||
assert(rc == 0);
|
||||
|
|
|
@ -951,7 +951,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
static void ub_end_rq(struct request *rq, int uptodate)
|
||||
{
|
||||
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, uptodate);
|
||||
}
|
||||
|
||||
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
|
||||
|
|
|
@ -305,7 +305,7 @@ static void viodasd_end_request(struct request *req, int uptodate,
|
|||
if (end_that_request_first(req, uptodate, num_sectors))
|
||||
return;
|
||||
add_disk_randomness(req->rq_disk);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1402,7 +1402,7 @@ static void do_cdu31a_request(request_queue_t * q)
|
|||
if (!end_that_request_first(req, 1, nblock)) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, 1);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
continue;
|
||||
|
|
|
@ -614,7 +614,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
|
|||
*/
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
end_that_request_chunk(failed, 0, failed->data_len);
|
||||
end_that_request_last(failed);
|
||||
end_that_request_last(failed, 0);
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -1735,7 +1735,7 @@ end_request:
|
|||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
blkdev_dequeue_request(rq);
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, 1);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
return ide_stopped;
|
||||
|
|
|
@ -89,7 +89,7 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
|
|||
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, uptodate);
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
|
@ -247,7 +247,7 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
|
|||
}
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, 1);
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -379,7 +379,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
|
|||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
rq->errors = err;
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, !rq->errors);
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -466,7 +466,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
|
|||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
|
||||
if (likely(dev)) {
|
||||
dev->open_queue_depth--;
|
||||
|
|
|
@ -263,7 +263,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
*/
|
||||
add_disk_randomness(req->rq_disk);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, 1);
|
||||
}
|
||||
spin_unlock_irq(&md->lock);
|
||||
} while (ret);
|
||||
|
@ -289,7 +289,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
|
||||
add_disk_randomness(req->rq_disk);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, 0);
|
||||
spin_unlock_irq(&md->lock);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1035,7 +1035,7 @@ dasd_end_request(struct request *req, int uptodate)
|
|||
if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
|
||||
BUG();
|
||||
add_disk_randomness(req->rq_disk);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -78,7 +78,7 @@ tapeblock_end_request(struct request *req, int uptodate)
|
|||
{
|
||||
if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
|
||||
BUG();
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -1046,7 +1046,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
|
|||
|
||||
/* kill current request */
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, 0);
|
||||
if (req->flags & REQ_SENSE)
|
||||
kfree(scsi->pc->buffer);
|
||||
kfree(scsi->pc);
|
||||
|
@ -1056,7 +1056,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
|
|||
/* now nuke the drive queue */
|
||||
while ((req = elv_next_request(drive->queue))) {
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, 0);
|
||||
}
|
||||
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
|
|
|
@ -791,7 +791,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
|
|||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (blk_rq_tagged(req))
|
||||
blk_queue_end_tag(q, req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
/*
|
||||
|
|
|
@ -748,7 +748,7 @@ static void sd_end_flush(request_queue_t *q, struct request *flush_rq)
|
|||
* force journal abort of barriers
|
||||
*/
|
||||
end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors);
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, -EOPNOTSUPP);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
|
|||
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
|
||||
|
||||
struct request;
|
||||
typedef void (rq_end_io_fn)(struct request *);
|
||||
typedef void (rq_end_io_fn)(struct request *, int);
|
||||
|
||||
struct request_list {
|
||||
int count[2];
|
||||
|
@ -560,7 +560,7 @@ extern void register_disk(struct gendisk *dev);
|
|||
extern void generic_make_request(struct bio *bio);
|
||||
extern void blk_put_request(struct request *);
|
||||
extern void __blk_put_request(request_queue_t *, struct request *);
|
||||
extern void blk_end_sync_rq(struct request *rq);
|
||||
extern void blk_end_sync_rq(struct request *rq, int error);
|
||||
extern void blk_attempt_remerge(request_queue_t *, struct request *);
|
||||
extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
|
||||
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
|
||||
|
@ -614,7 +614,7 @@ static inline void blk_run_address_space(struct address_space *mapping)
|
|||
*/
|
||||
extern int end_that_request_first(struct request *, int, int);
|
||||
extern int end_that_request_chunk(struct request *, int, int);
|
||||
extern void end_that_request_last(struct request *);
|
||||
extern void end_that_request_last(struct request *, int);
|
||||
extern void end_request(struct request *req, int uptodate);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue