@@ -30,7 +30,7 @@ struct Sched {
30
30
31
31
M * midle ; // idle m's waiting for work
32
32
int32 nmidle ; // number of idle m's waiting for work
33
- int32 mlocked ; // number of locked m's waiting for work
33
+ int32 nmidlelocked ; // number of locked m's waiting for work
34
34
int32 mcount ; // number of m's that have been created
35
35
36
36
P * pidle ; // idle P's
@@ -95,7 +95,7 @@ static void stoplockedm(void);
95
95
static void startlockedm (G * );
96
96
static void sysmon (void );
97
97
static uint32 retake (int64 );
98
- static void inclocked (int32 );
98
+ static void incidlelocked (int32 );
99
99
static void checkdead (void );
100
100
static void exitsyscall0 (G * );
101
101
static void park0 (G * );
@@ -1019,7 +1019,7 @@ stoplockedm(void)
1019
1019
p = releasep ();
1020
1020
handoffp (p );
1021
1021
}
1022
- inclocked (1 );
1022
+ incidlelocked (1 );
1023
1023
// Wait until another thread schedules lockedg again.
1024
1024
runtime·notesleep (& m -> park );
1025
1025
runtime·noteclear (& m -> park );
@@ -1042,7 +1042,7 @@ startlockedm(G *gp)
1042
1042
if (mp -> nextp )
1043
1043
runtime·throw ("startlockedm: m has p" );
1044
1044
// directly handoff current P to the locked m
1045
- inclocked (-1 );
1045
+ incidlelocked (-1 );
1046
1046
p = releasep ();
1047
1047
mp -> nextp = p ;
1048
1048
runtime·notewakeup (& mp -> park );
@@ -1485,7 +1485,7 @@ void
1485
1485
p = releasep ();
1486
1486
handoffp (p );
1487
1487
if (g -> isbackground ) // do not consider blocked scavenger for deadlock detection
1488
- inclocked (1 );
1488
+ incidlelocked (1 );
1489
1489
1490
1490
// Resave for traceback during blocked call.
1491
1491
save (runtime·getcallerpc (& dummy ), runtime·getcallersp (& dummy ));
@@ -1505,7 +1505,7 @@ runtime·exitsyscall(void)
1505
1505
m -> locks ++ ; // see comment in entersyscall
1506
1506
1507
1507
if (g -> isbackground ) // do not consider blocked scavenger for deadlock detection
1508
- inclocked (-1 );
1508
+ incidlelocked (-1 );
1509
1509
1510
1510
if (exitsyscallfast ()) {
1511
1511
// There's a cpu for us, so we can run.
@@ -2159,10 +2159,10 @@ releasep(void)
2159
2159
}
2160
2160
2161
2161
static void
2162
- inclocked (int32 v )
2162
+ incidlelocked (int32 v )
2163
2163
{
2164
2164
runtime·lock (& runtime·sched );
2165
- runtime·sched .mlocked += v ;
2165
+ runtime·sched .nmidlelocked += v ;
2166
2166
if (v > 0 )
2167
2167
checkdead ();
2168
2168
runtime·unlock (& runtime·sched );
@@ -2177,12 +2177,12 @@ checkdead(void)
2177
2177
int32 run , grunning , s ;
2178
2178
2179
2179
// -1 for sysmon
2180
- run = runtime·sched .mcount - runtime·sched .nmidle - runtime·sched .mlocked - 1 ;
2180
+ run = runtime·sched .mcount - runtime·sched .nmidle - runtime·sched .nmidlelocked - 1 ;
2181
2181
if (run > 0 )
2182
2182
return ;
2183
2183
if (run < 0 ) {
2184
- runtime·printf ("checkdead: nmidle=%d mlocked =%d mcount=%d\n" ,
2185
- runtime·sched .nmidle , runtime·sched .mlocked , runtime·sched .mcount );
2184
+ runtime·printf ("checkdead: nmidle=%d nmidlelocked =%d mcount=%d\n" ,
2185
+ runtime·sched .nmidle , runtime·sched .nmidlelocked , runtime·sched .mcount );
2186
2186
runtime·throw ("checkdead: inconsistent counts" );
2187
2187
}
2188
2188
grunning = 0 ;
@@ -2238,7 +2238,18 @@ sysmon(void)
2238
2238
if (lastpoll != 0 && lastpoll + 10 * 1000 * 1000 > now ) {
2239
2239
runtime·cas64 (& runtime·sched .lastpoll , lastpoll , now );
2240
2240
gp = runtime·netpoll (false); // non-blocking
2241
- injectglist (gp );
2241
+ if (gp ) {
2242
+ // Need to decrement number of idle locked M's
2243
+ // (pretending that one more is running) before injectglist.
2244
+ // Otherwise it can lead to the following situation:
2245
+ // injectglist grabs all P's but before it starts M's to run the P's,
2246
+ // another M returns from syscall, finishes running its G,
2247
+ // observes that there is no work to do and no other running M's
2248
+ // and reports deadlock.
2249
+ incidlelocked (-1 );
2250
+ injectglist (gp );
2251
+ incidlelocked (1 );
2252
+ }
2242
2253
}
2243
2254
// retake P's blocked in syscalls
2244
2255
// and preempt long running G's
@@ -2284,15 +2295,16 @@ retake(int64 now)
2284
2295
if (p -> runqhead == p -> runqtail &&
2285
2296
runtime·atomicload (& runtime·sched .nmspinning ) + runtime·atomicload (& runtime·sched .npidle ) > 0 )
2286
2297
continue ;
2287
- // Need to increment number of locked M's before the CAS.
2298
+ // Need to decrement number of idle locked M's
2299
+ // (pretending that one more is running) before the CAS.
2288
2300
// Otherwise the M from which we retake can exit the syscall,
2289
2301
// increment nmidle and report deadlock.
2290
- inclocked (-1 );
2302
+ incidlelocked (-1 );
2291
2303
if (runtime·cas (& p -> status , s , Pidle )) {
2292
2304
n ++ ;
2293
2305
handoffp (p );
2294
2306
}
2295
- inclocked (1 );
2307
+ incidlelocked (1 );
2296
2308
} else if (s == Prunning ) {
2297
2309
// Preempt G if it's running for more than 10ms.
2298
2310
if (pd -> when + 10 * 1000 * 1000 > now )
0 commit comments