Skip to content

Commit

Permalink
block/mq-deadline: Micro-optimize the batching algorithm
Browse files Browse the repository at this point in the history
When dispatching the first request of a batch, the deadline_move_request()
call clears .next_rq[] for the opposite data direction. .next_rq[] is not
restored when changing data direction. Fix this by not clearing .next_rq[]
and by keeping track of the data direction of a batch in a variable instead.

This patch is a micro-optimization because:
- The number of deadline_next_request() calls for the read direction is
  halved.
- The number of times that deadline_next_request() returns NULL is reduced.

Cc: Damien Le Moal <[email protected]>
Cc: Hannes Reinecke <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Ming Lei <[email protected]>
Cc: Johannes Thumshirn <[email protected]>
Cc: Himanshu Madhani <[email protected]>
Signed-off-by: Bart Van Assche <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
bvanassche authored and axboe committed Jun 21, 2021
1 parent 0775758 commit d672d32
Showing 1 changed file with 5 additions and 6 deletions.
11 changes: 5 additions & 6 deletions block/mq-deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ struct deadline_data {
struct rb_root sort_list[DD_DIR_COUNT];
struct list_head fifo_list[DD_DIR_COUNT];

/* Data direction of latest dispatched request. */
enum dd_data_dir last_dir;
/*
* next in sort order. read, write or both are NULL
*/
Expand Down Expand Up @@ -179,8 +181,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
{
const enum dd_data_dir data_dir = rq_data_dir(rq);

dd->next_rq[DD_READ] = NULL;
dd->next_rq[DD_WRITE] = NULL;
dd->next_rq[data_dir] = deadline_latter_request(rq);

/*
Expand Down Expand Up @@ -292,10 +292,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd)
/*
* batches are currently reads XOR writes
*/
rq = deadline_next_request(dd, DD_WRITE);
if (!rq)
rq = deadline_next_request(dd, DD_READ);

rq = deadline_next_request(dd, dd->last_dir);
if (rq && dd->batching < dd->fifo_batch)
/* we have a next request are still entitled to batch */
goto dispatch_request;
Expand Down Expand Up @@ -361,6 +358,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd)
if (!rq)
return NULL;

dd->last_dir = data_dir;
dd->batching = 0;

dispatch_request:
Expand Down Expand Up @@ -473,6 +471,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
dd->fifo_expire[DD_WRITE] = write_expire;
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->last_dir = DD_WRITE;
dd->fifo_batch = fifo_batch;
spin_lock_init(&dd->lock);
spin_lock_init(&dd->zone_lock);
Expand Down

0 comments on commit d672d32

Please sign in to comment.