@@ -420,6 +420,8 @@ struct bfq_data {
420
420
* queue in service, even if it is idling).
421
421
*/
422
422
int busy_queues ;
423
+ /* number of weight-raised busy @bfq_queues */
424
+ int wr_busy_queues ;
423
425
/* number of queued requests */
424
426
int queued ;
425
427
/* number of requests dispatched and waiting for completion */
@@ -2490,6 +2492,9 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2490
2492
2491
2493
bfqd -> busy_queues -- ;
2492
2494
2495
+ if (bfqq -> wr_coeff > 1 )
2496
+ bfqd -> wr_busy_queues -- ;
2497
+
2493
2498
bfqg_stats_update_dequeue (bfqq_group (bfqq ));
2494
2499
2495
2500
bfq_deactivate_bfqq (bfqd , bfqq , true, expiration );
@@ -2506,6 +2511,9 @@ static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2506
2511
2507
2512
bfq_mark_bfqq_busy (bfqq );
2508
2513
bfqd -> busy_queues ++ ;
2514
+
2515
+ if (bfqq -> wr_coeff > 1 )
2516
+ bfqd -> wr_busy_queues ++ ;
2509
2517
}
2510
2518
2511
2519
#ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -3779,7 +3787,16 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
3779
3787
if (bfq_bfqq_sync (bfqq ) || bfqq -> wr_coeff > 1 )
3780
3788
return blk_rq_sectors (rq );
3781
3789
3782
- return blk_rq_sectors (rq ) * bfq_async_charge_factor ;
3790
+ /*
3791
+ * If there are no weight-raised queues, then amplify service
3792
+ * by just the async charge factor; otherwise amplify service
3793
+ * by twice the async charge factor, to further reduce latency
3794
+ * for weight-raised queues.
3795
+ */
3796
+ if (bfqq -> bfqd -> wr_busy_queues == 0 )
3797
+ return blk_rq_sectors (rq ) * bfq_async_charge_factor ;
3798
+
3799
+ return blk_rq_sectors (rq ) * 2 * bfq_async_charge_factor ;
3783
3800
}
3784
3801
3785
3802
/**
@@ -4234,6 +4251,7 @@ static void bfq_add_request(struct request *rq)
4234
4251
bfqq -> wr_coeff = bfqd -> bfq_wr_coeff ;
4235
4252
bfqq -> wr_cur_max_time = bfq_wr_duration (bfqd );
4236
4253
4254
+ bfqd -> wr_busy_queues ++ ;
4237
4255
bfqq -> entity .prio_changed = 1 ;
4238
4256
}
4239
4257
if (prev != bfqq -> next_rq )
@@ -4474,6 +4492,8 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
4474
4492
/* Must be called with bfqq != NULL */
4475
4493
static void bfq_bfqq_end_wr (struct bfq_queue * bfqq )
4476
4494
{
4495
+ if (bfq_bfqq_busy (bfqq ))
4496
+ bfqq -> bfqd -> wr_busy_queues -- ;
4477
4497
bfqq -> wr_coeff = 1 ;
4478
4498
bfqq -> wr_cur_max_time = 0 ;
4479
4499
bfqq -> last_wr_start_finish = jiffies ;
@@ -5497,7 +5517,8 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
5497
5517
static bool bfq_bfqq_may_idle (struct bfq_queue * bfqq )
5498
5518
{
5499
5519
struct bfq_data * bfqd = bfqq -> bfqd ;
5500
- bool idling_boosts_thr , asymmetric_scenario ;
5520
+ bool idling_boosts_thr , idling_boosts_thr_without_issues ,
5521
+ asymmetric_scenario ;
5501
5522
5502
5523
if (bfqd -> strict_guarantees )
5503
5524
return true;
@@ -5519,6 +5540,44 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
5519
5540
*/
5520
5541
idling_boosts_thr = !bfqd -> hw_tag || bfq_bfqq_IO_bound (bfqq );
5521
5542
5543
+ /*
5544
+ * The value of the next variable,
5545
+ * idling_boosts_thr_without_issues, is equal to that of
5546
+ * idling_boosts_thr, unless a special case holds. In this
5547
+ * special case, described below, idling may cause problems to
5548
+ * weight-raised queues.
5549
+ *
5550
+ * When the request pool is saturated (e.g., in the presence
5551
+ * of write hogs), if the processes associated with
5552
+ * non-weight-raised queues ask for requests at a lower rate,
5553
+ * then processes associated with weight-raised queues have a
5554
+ * higher probability to get a request from the pool
5555
+ * immediately (or at least soon) when they need one. Thus
5556
+ * they have a higher probability to actually get a fraction
5557
+ * of the device throughput proportional to their high
5558
+ * weight. This is especially true with NCQ-capable drives,
5559
+ * which enqueue several requests in advance, and further
5560
+ * reorder internally-queued requests.
5561
+ *
5562
+ * For this reason, we force to false the value of
5563
+ * idling_boosts_thr_without_issues if there are weight-raised
5564
+ * busy queues. In this case, and if bfqq is not weight-raised,
5565
+ * this guarantees that the device is not idled for bfqq (if,
5566
+ * instead, bfqq is weight-raised, then idling will be
5567
+ * guaranteed by another variable, see below). Combined with
5568
+ * the timestamping rules of BFQ (see [1] for details), this
5569
+ * behavior causes bfqq, and hence any sync non-weight-raised
5570
+ * queue, to get a lower number of requests served, and thus
5571
+ * to ask for a lower number of requests from the request
5572
+ * pool, before the busy weight-raised queues get served
5573
+ * again. This often mitigates starvation problems in the
5574
+ * presence of heavy write workloads and NCQ, thereby
5575
+ * guaranteeing a higher application and system responsiveness
5576
+ * in these hostile scenarios.
5577
+ */
5578
+ idling_boosts_thr_without_issues = idling_boosts_thr &&
5579
+ bfqd -> wr_busy_queues == 0 ;
5580
+
5522
5581
/*
5523
5582
* There is then a case where idling must be performed not for
5524
5583
* throughput concerns, but to preserve service guarantees. To
@@ -5593,7 +5652,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
5593
5652
* is necessary to preserve service guarantees.
5594
5653
*/
5595
5654
return bfq_bfqq_sync (bfqq ) &&
5596
- (idling_boosts_thr || asymmetric_scenario );
5655
+ (idling_boosts_thr_without_issues || asymmetric_scenario );
5597
5656
}
5598
5657
5599
5658
/*
@@ -6801,6 +6860,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
6801
6860
* high-definition compressed
6802
6861
* video.
6803
6862
*/
6863
+ bfqd -> wr_busy_queues = 0 ;
6804
6864
6805
6865
/*
6806
6866
* Begin by assuming, optimistically, that the device is a
0 commit comments