Skip to content

Commit

Permalink
blk-mq: pair blk_mq_start_request / blk_mq_requeue_request
Browse files Browse the repository at this point in the history
Make sure we have a proper pairing between starting and requeueing
requests.  Move the dma drain and REQ_END setup into blk_mq_start_request,
and make sure blk_mq_requeue_request properly undoes them, giving us
a pair of function to prepare and unprepare a request without leaving
side effects.

Together this ensures we always clean up properly after
BLK_MQ_RQ_QUEUE_BUSY returns from ->queue_rq.

Signed-off-by: Christoph Hellwig <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
Christoph Hellwig authored and axboe committed Feb 11, 2014
1 parent 1e93b8c commit 49f5baa
Showing 1 changed file with 26 additions and 23 deletions.
49 changes: 26 additions & 23 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ void blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);

static void blk_mq_start_request(struct request *rq)
static void blk_mq_start_request(struct request *rq, bool last)
{
struct request_queue *q = rq->q;

Expand All @@ -390,6 +390,25 @@ static void blk_mq_start_request(struct request *rq)
*/
rq->deadline = jiffies + q->rq_timeout;
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);

if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
* Make sure space for the drain appears. We know we can do
* this because max_hw_segments has been adjusted to be one
* fewer than the device can handle.
*/
rq->nr_phys_segments++;
}

/*
* Flag the last request in the series so that drivers know when IO
* should be kicked off, if they don't do it on a per-request basis.
*
* Note: the flag isn't the only condition drivers should do kick off.
* If drive is busy, the last request might not have the bit set.
*/
if (last)
rq->cmd_flags |= REQ_END;
}

static void blk_mq_requeue_request(struct request *rq)
Expand All @@ -398,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq)

trace_block_rq_requeue(q, rq);
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);

rq->cmd_flags &= ~REQ_END;

if (q->dma_drain_size && blk_rq_bytes(rq))
rq->nr_phys_segments--;
}

struct blk_mq_timeout_data {
Expand Down Expand Up @@ -565,29 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)

rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
blk_mq_start_request(rq);

if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
* make sure space for the drain appears we
* know we can do this because max_hw_segments
* has been adjusted to be one fewer than the
* device can handle
*/
rq->nr_phys_segments++;
}

/*
* Last request in the series. Flag it as such, this
* enables drivers to know when IO should be kicked off,
* if they don't do it on a per-request basis.
*
* Note: the flag isn't the only condition drivers
* should do kick off. If drive is busy, the last
* request might not have the bit set.
*/
if (list_empty(&rq_list))
rq->cmd_flags |= REQ_END;
blk_mq_start_request(rq, list_empty(&rq_list));

ret = q->mq_ops->queue_rq(hctx, rq);
switch (ret) {
Expand Down

0 comments on commit 49f5baa

Please sign in to comment.