block: change request end_io handler to pass back a return value
Everything is just converted to returning RQ_END_IO_NONE, and there should be no functional changes with this patch. In preparation for allowing the end_io handler to pass ownership back to the block layer, rather than retain ownership of the request. Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
4b6a5d9cea
commit
de671d6116
13 changed files with 65 additions and 29 deletions
|
|
@ -217,7 +217,8 @@ static void blk_flush_complete_seq(struct request *rq,
|
|||
blk_kick_flush(q, fq, cmd_flags);
|
||||
}
|
||||
|
||||
static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
||||
static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct request_queue *q = flush_rq->q;
|
||||
struct list_head *running;
|
||||
|
|
@ -231,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||
if (!req_ref_put_and_test(flush_rq)) {
|
||||
fq->rq_status = error;
|
||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
return;
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
blk_account_io_flush(flush_rq);
|
||||
|
|
@ -268,6 +269,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||
}
|
||||
|
||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
bool is_flush_rq(struct request *rq)
|
||||
|
|
@ -353,7 +355,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
|||
blk_flush_queue_rq(flush_rq, false);
|
||||
}
|
||||
|
||||
static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
||||
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
|
|
@ -375,6 +378,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
|||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
|
||||
blk_mq_sched_restart(hctx);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1001,7 +1001,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
|
|||
|
||||
if (rq->end_io) {
|
||||
rq_qos_done(rq->q, rq);
|
||||
rq->end_io(rq, error);
|
||||
if (rq->end_io(rq, error) == RQ_END_IO_FREE)
|
||||
blk_mq_free_request(rq);
|
||||
} else {
|
||||
blk_mq_free_request(rq);
|
||||
}
|
||||
|
|
@ -1295,12 +1296,13 @@ struct blk_rq_wait {
|
|||
blk_status_t ret;
|
||||
};
|
||||
|
||||
static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
|
||||
static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
|
||||
{
|
||||
struct blk_rq_wait *wait = rq->end_io_data;
|
||||
|
||||
wait->ret = ret;
|
||||
complete(&wait->done);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
bool blk_rq_is_poll(struct request *rq)
|
||||
|
|
@ -1534,10 +1536,12 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
|
|||
|
||||
void blk_mq_put_rq_ref(struct request *rq)
|
||||
{
|
||||
if (is_flush_rq(rq))
|
||||
rq->end_io(rq, 0);
|
||||
else if (req_ref_put_and_test(rq))
|
||||
if (is_flush_rq(rq)) {
|
||||
if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
|
||||
blk_mq_free_request(rq);
|
||||
} else if (req_ref_put_and_test(rq)) {
|
||||
__blk_mq_free_request(rq);
|
||||
}
|
||||
}
|
||||
|
||||
static bool blk_mq_check_expired(struct request *rq, void *priv)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue