Skip to content

Commit cfd6971

Browse files
Algodev-githubaxboe
authored andcommitted
block, bfq: reduce latency during request-pool saturation
This patch introduces an heuristic that reduces latency when the I/O-request pool is saturated. This goal is achieved by disabling device idling, for non-weight-raised queues, when there are weight- raised queues with pending or in-flight requests. In fact, as explained in more detail in the comment on the function bfq_bfqq_may_idle(), this reduces the rate at which processes associated with non-weight-raised queues grab requests from the pool, thereby increasing the probability that processes associated with weight-raised queues get a request immediately (or at least soon) when they need one. Along the same line, if there are weight-raised queues, then this patch halves the service rate of async (write) requests for non-weight-raised queues. Signed-off-by: Paolo Valente <[email protected]> Signed-off-by: Arianna Avanzini <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent bcd5642 commit cfd6971

File tree

1 file changed

+63
-3
lines changed

1 file changed

+63
-3
lines changed

block/bfq-iosched.c

Lines changed: 63 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -420,6 +420,8 @@ struct bfq_data {
420420
* queue in service, even if it is idling).
421421
*/
422422
int busy_queues;
423+
/* number of weight-raised busy @bfq_queues */
424+
int wr_busy_queues;
423425
/* number of queued requests */
424426
int queued;
425427
/* number of requests dispatched and waiting for completion */
@@ -2490,6 +2492,9 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
24902492

24912493
bfqd->busy_queues--;
24922494

2495+
if (bfqq->wr_coeff > 1)
2496+
bfqd->wr_busy_queues--;
2497+
24932498
bfqg_stats_update_dequeue(bfqq_group(bfqq));
24942499

24952500
bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
@@ -2506,6 +2511,9 @@ static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
25062511

25072512
bfq_mark_bfqq_busy(bfqq);
25082513
bfqd->busy_queues++;
2514+
2515+
if (bfqq->wr_coeff > 1)
2516+
bfqd->wr_busy_queues++;
25092517
}
25102518

25112519
#ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -3779,7 +3787,16 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
37793787
if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
37803788
return blk_rq_sectors(rq);
37813789

3782-
return blk_rq_sectors(rq) * bfq_async_charge_factor;
3790+
/*
3791+
* If there are no weight-raised queues, then amplify service
3792+
* by just the async charge factor; otherwise amplify service
3793+
* by twice the async charge factor, to further reduce latency
3794+
* for weight-raised queues.
3795+
*/
3796+
if (bfqq->bfqd->wr_busy_queues == 0)
3797+
return blk_rq_sectors(rq) * bfq_async_charge_factor;
3798+
3799+
return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
37833800
}
37843801

37853802
/**
@@ -4234,6 +4251,7 @@ static void bfq_add_request(struct request *rq)
42344251
bfqq->wr_coeff = bfqd->bfq_wr_coeff;
42354252
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
42364253

4254+
bfqd->wr_busy_queues++;
42374255
bfqq->entity.prio_changed = 1;
42384256
}
42394257
if (prev != bfqq->next_rq)
@@ -4474,6 +4492,8 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
44744492
/* Must be called with bfqq != NULL */
44754493
static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
44764494
{
4495+
if (bfq_bfqq_busy(bfqq))
4496+
bfqq->bfqd->wr_busy_queues--;
44774497
bfqq->wr_coeff = 1;
44784498
bfqq->wr_cur_max_time = 0;
44794499
bfqq->last_wr_start_finish = jiffies;
@@ -5497,7 +5517,8 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
54975517
static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
54985518
{
54995519
struct bfq_data *bfqd = bfqq->bfqd;
5500-
bool idling_boosts_thr, asymmetric_scenario;
5520+
bool idling_boosts_thr, idling_boosts_thr_without_issues,
5521+
asymmetric_scenario;
55015522

55025523
if (bfqd->strict_guarantees)
55035524
return true;
@@ -5519,6 +5540,44 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
55195540
*/
55205541
idling_boosts_thr = !bfqd->hw_tag || bfq_bfqq_IO_bound(bfqq);
55215542

5543+
/*
5544+
* The value of the next variable,
5545+
* idling_boosts_thr_without_issues, is equal to that of
5546+
* idling_boosts_thr, unless a special case holds. In this
5547+
* special case, described below, idling may cause problems to
5548+
* weight-raised queues.
5549+
*
5550+
* When the request pool is saturated (e.g., in the presence
5551+
* of write hogs), if the processes associated with
5552+
* non-weight-raised queues ask for requests at a lower rate,
5553+
* then processes associated with weight-raised queues have a
5554+
* higher probability to get a request from the pool
5555+
* immediately (or at least soon) when they need one. Thus
5556+
* they have a higher probability to actually get a fraction
5557+
* of the device throughput proportional to their high
5558+
* weight. This is especially true with NCQ-capable drives,
5559+
* which enqueue several requests in advance, and further
5560+
* reorder internally-queued requests.
5561+
*
5562+
* For this reason, we force to false the value of
5563+
* idling_boosts_thr_without_issues if there are weight-raised
5564+
* busy queues. In this case, and if bfqq is not weight-raised,
5565+
* this guarantees that the device is not idled for bfqq (if,
5566+
* instead, bfqq is weight-raised, then idling will be
5567+
* guaranteed by another variable, see below). Combined with
5568+
* the timestamping rules of BFQ (see [1] for details), this
5569+
* behavior causes bfqq, and hence any sync non-weight-raised
5570+
* queue, to get a lower number of requests served, and thus
5571+
* to ask for a lower number of requests from the request
5572+
* pool, before the busy weight-raised queues get served
5573+
* again. This often mitigates starvation problems in the
5574+
* presence of heavy write workloads and NCQ, thereby
5575+
* guaranteeing a higher application and system responsiveness
5576+
* in these hostile scenarios.
5577+
*/
5578+
idling_boosts_thr_without_issues = idling_boosts_thr &&
5579+
bfqd->wr_busy_queues == 0;
5580+
55225581
/*
55235582
* There is then a case where idling must be performed not for
55245583
* throughput concerns, but to preserve service guarantees. To
@@ -5593,7 +5652,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
55935652
* is necessary to preserve service guarantees.
55945653
*/
55955654
return bfq_bfqq_sync(bfqq) &&
5596-
(idling_boosts_thr || asymmetric_scenario);
5655+
(idling_boosts_thr_without_issues || asymmetric_scenario);
55975656
}
55985657

55995658
/*
@@ -6801,6 +6860,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
68016860
* high-definition compressed
68026861
* video.
68036862
*/
6863+
bfqd->wr_busy_queues = 0;
68046864

68056865
/*
68066866
* Begin by assuming, optimistically, that the device is a

0 commit comments

Comments
 (0)