Skip to content

Commit 26edb30

Browse files
krismanaxboe
authored andcommitted
sbitmap: Try each queue to wake up at least one waiter
Jan reported the new algorithm as merged might be problematic if the queue being awaken becomes empty between the waitqueue_active inside sbq_wake_ptr check and the wake up. If that happens, wake_up_nr will not wake up any waiter and we loose too many wake ups. In order to guarantee progress, we need to wake up at least one waiter here, if there are any. This now requires trying to wake up from every queue. Instead of walking through all the queues with sbq_wake_ptr, this call moves the wake up inside that function. In a previous version of the patch, I found that updating wake_index several times when walking through queues had a measurable overhead. This ensures we only update it once, at the end. Fixes: 4f8126b ("sbitmap: Use single per-bitmap counting to wake up queued tags") Reported-by: Jan Kara <[email protected]> Signed-off-by: Gabriel Krisman Bertazi <[email protected]> Reviewed-by: Jan Kara <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent ee7dc86 commit 26edb30

File tree

1 file changed

+12
-16
lines changed

1 file changed

+12
-16
lines changed

lib/sbitmap.c

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -560,12 +560,12 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
560560
}
561561
EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
562562

563-
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
563+
static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
564564
{
565565
int i, wake_index;
566566

567567
if (!atomic_read(&sbq->ws_active))
568-
return NULL;
568+
return;
569569

570570
wake_index = atomic_read(&sbq->wake_index);
571571
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
@@ -579,20 +579,22 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
579579
*/
580580
wake_index = sbq_index_inc(wake_index);
581581

582-
if (waitqueue_active(&ws->wait)) {
583-
if (wake_index != atomic_read(&sbq->wake_index))
584-
atomic_set(&sbq->wake_index, wake_index);
585-
return ws;
586-
}
582+
/*
583+
* It is sufficient to wake up at least one waiter to
584+
* guarantee forward progress.
585+
*/
586+
if (waitqueue_active(&ws->wait) &&
587+
wake_up_nr(&ws->wait, nr))
588+
break;
587589
}
588590

589-
return NULL;
591+
if (wake_index != atomic_read(&sbq->wake_index))
592+
atomic_set(&sbq->wake_index, wake_index);
590593
}
591594

592595
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
593596
{
594597
unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
595-
struct sbq_wait_state *ws = NULL;
596598
unsigned int wakeups;
597599

598600
if (!atomic_read(&sbq->ws_active))
@@ -604,16 +606,10 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
604606
do {
605607
if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
606608
return;
607-
608-
if (!ws) {
609-
ws = sbq_wake_ptr(sbq);
610-
if (!ws)
611-
return;
612-
}
613609
} while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
614610
&wakeups, wakeups + wake_batch));
615611

616-
wake_up_nr(&ws->wait, wake_batch);
612+
__sbitmap_queue_wake_up(sbq, wake_batch);
617613
}
618614
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
619615

0 commit comments

Comments
 (0)