Skip to content

Commit d3e2e7b

Browse files
hao022Kernel Patches Daemon
authored and
Kernel Patches Daemon
committed
bpf: avoid hashtab deadlock with try_lock
The commit 20b6cc3 ("bpf: Avoid hashtab deadlock with map_locked"), try to fix deadlock, but in some case, the deadlock occurs: * CPUn in task context with K1, and taking lock. * CPUn interrupted by NMI context, with K2. * They are using the same bucket, but different map_locked. | Task | +---v----+ | CPUn | +---^----+ | | NMI Anyway, the lockdep still warn: [ 36.092222] ================================ [ 36.092230] WARNING: inconsistent lock state [ 36.092234] 6.1.0-rc5+ #81 Tainted: G E [ 36.092236] -------------------------------- [ 36.092237] inconsistent {INITIAL USE} -> {IN-NMI} usage. [ 36.092238] perf/1515 [HC1[1]:SC0[0]:HE0:SE1] takes: [ 36.092242] ffff888341acd1a0 (&htab->lockdep_key){....}-{2:2}, at: htab_lock_bucket+0x4d/0x58 [ 36.092253] {INITIAL USE} state was registered at: [ 36.092255] mark_usage+0x1d/0x11d [ 36.092262] __lock_acquire+0x3c9/0x6ed [ 36.092266] lock_acquire+0x23d/0x29a [ 36.092270] _raw_spin_lock_irqsave+0x43/0x7f [ 36.092274] htab_lock_bucket+0x4d/0x58 [ 36.092276] htab_map_delete_elem+0x82/0xfb [ 36.092278] map_delete_elem+0x156/0x1ac [ 36.092282] __sys_bpf+0x138/0xb71 [ 36.092285] __do_sys_bpf+0xd/0x15 [ 36.092288] do_syscall_64+0x6d/0x84 [ 36.092291] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 36.092295] irq event stamp: 120346 [ 36.092296] hardirqs last enabled at (120345): [<ffffffff8180b97f>] _raw_spin_unlock_irq+0x24/0x39 [ 36.092299] hardirqs last disabled at (120346): [<ffffffff81169e85>] generic_exec_single+0x40/0xb9 [ 36.092303] softirqs last enabled at (120268): [<ffffffff81c00347>] __do_softirq+0x347/0x387 [ 36.092307] softirqs last disabled at (120133): [<ffffffff810ba4f0>] __irq_exit_rcu+0x67/0xc6 [ 36.092311] [ 36.092311] other info that might help us debug this: [ 36.092312] Possible unsafe locking scenario: [ 36.092312] [ 36.092313] CPU0 [ 36.092313] ---- [ 36.092314] lock(&htab->lockdep_key); [ 36.092315] <Interrupt> [ 36.092316] lock(&htab->lockdep_key); [ 36.092318] [ 36.092318] *** DEADLOCK *** [ 36.092318] [ 36.092318] 3 locks held by perf/1515: [ 36.092320] #0: ffff8881b9805cc0 (&cpuctx_mutex){+.+.}-{4:4}, at: perf_event_ctx_lock_nested+0x8e/0xba [ 36.092327] #1: ffff8881075ecc20 (&event->child_mutex){+.+.}-{4:4}, at: perf_event_for_each_child+0x35/0x76 [ 36.092332] #2: ffff8881b9805c20 (&cpuctx_lock){-.-.}-{2:2}, at: perf_ctx_lock+0x12/0x27 [ 36.092339] [ 36.092339] stack backtrace: [ 36.092341] CPU: 0 PID: 1515 Comm: perf Tainted: G E 6.1.0-rc5+ #81 [ 36.092344] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 [ 36.092349] Call Trace: [ 36.092351] <NMI> [ 36.092354] dump_stack_lvl+0x57/0x81 [ 36.092359] lock_acquire+0x1f4/0x29a [ 36.092363] ? handle_pmi_common+0x13f/0x1f0 [ 36.092366] ? htab_lock_bucket+0x4d/0x58 [ 36.092371] _raw_spin_lock_irqsave+0x43/0x7f [ 36.092374] ? htab_lock_bucket+0x4d/0x58 [ 36.092377] htab_lock_bucket+0x4d/0x58 [ 36.092379] htab_map_update_elem+0x11e/0x220 [ 36.092386] bpf_prog_f3a535ca81a8128a_bpf_prog2+0x3e/0x42 [ 36.092392] trace_call_bpf+0x177/0x215 [ 36.092398] perf_trace_run_bpf_submit+0x52/0xaa [ 36.092403] ? x86_pmu_stop+0x97/0x97 [ 36.092407] perf_trace_nmi_handler+0xb7/0xe0 [ 36.092415] nmi_handle+0x116/0x254 [ 36.092418] ? x86_pmu_stop+0x97/0x97 [ 36.092423] default_do_nmi+0x3d/0xf6 [ 36.092428] exc_nmi+0xa1/0x109 [ 36.092432] end_repeat_nmi+0x16/0x67 [ 36.092436] RIP: 0010:wrmsrl+0xd/0x1b [ 36.092441] Code: 04 01 00 00 c6 84 07 48 01 00 00 01 5b e9 46 15 80 00 5b c3 cc cc cc cc c3 cc cc cc cc 48 89 f2 89 f9 89 f0 48 c1 ea 20 0f 30 <66> 90 c3 cc cc cc cc 31 d2 e9 2f 04 49 00 0f 1f 44 00 00 40 0f6 [ 36.092443] RSP: 0018:ffffc900043dfc48 EFLAGS: 00000002 [ 36.092445] RAX: 000000000000000f RBX: ffff8881b96153e0 RCX: 000000000000038f [ 36.092447] RDX: 0000000000000007 RSI: 000000070000000f RDI: 000000000000038f [ 36.092449] RBP: 000000070000000f R08: ffffffffffffffff R09: ffff8881053bdaa8 [ 36.092451] R10: ffff8881b9805d40 R11: 0000000000000005 R12: ffff8881b9805c00 [ 36.092452] R13: 0000000000000000 R14: 0000000000000000 R15: ffff8881075ec970 [ 36.092460] ? wrmsrl+0xd/0x1b [ 36.092465] ? wrmsrl+0xd/0x1b [ 36.092469] </NMI> [ 36.092469] <TASK> [ 36.092470] __intel_pmu_enable_all.constprop.0+0x7c/0xaf [ 36.092475] event_function+0xb6/0xd3 [ 36.092478] ? cpu_to_node+0x1a/0x1a [ 36.092482] ? cpu_to_node+0x1a/0x1a [ 36.092485] remote_function+0x1e/0x4c [ 36.092489] generic_exec_single+0x48/0xb9 [ 36.092492] ? __lock_acquire+0x666/0x6ed [ 36.092497] smp_call_function_single+0xbf/0x106 [ 36.092499] ? cpu_to_node+0x1a/0x1a [ 36.092504] ? kvm_sched_clock_read+0x5/0x11 [ 36.092508] ? __perf_event_task_sched_in+0x13d/0x13d [ 36.092513] cpu_function_call+0x47/0x69 [ 36.092516] ? perf_event_update_time+0x52/0x52 [ 36.092519] event_function_call+0x89/0x117 [ 36.092521] ? __perf_event_task_sched_in+0x13d/0x13d [ 36.092526] ? _perf_event_disable+0x4a/0x4a [ 36.092528] perf_event_for_each_child+0x3d/0x76 [ 36.092532] ? _perf_event_disable+0x4a/0x4a [ 36.092533] _perf_ioctl+0x564/0x590 [ 36.092537] ? __lock_release+0xd5/0x1b0 [ 36.092543] ? perf_event_ctx_lock_nested+0x8e/0xba [ 36.092547] perf_ioctl+0x42/0x5f [ 36.092551] vfs_ioctl+0x1e/0x2f [ 36.092554] __do_sys_ioctl+0x66/0x89 [ 36.092559] do_syscall_64+0x6d/0x84 [ 36.092563] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 36.092566] RIP: 0033:0x7fe7110f362b [ 36.092569] Code: 0f 1e fa 48 8b 05 5d b8 2c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 2d b8 2c 00 f7 d8 64 89 018 [ 36.092570] RSP: 002b:00007ffebb8e4b08 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 36.092573] RAX: ffffffffffffffda RBX: 0000000000002400 RCX: 00007fe7110f362b [ 36.092575] RDX: 0000000000000000 RSI: 0000000000002400 RDI: 0000000000000013 [ 36.092576] RBP: 00007ffebb8e4b40 R08: 0000000000000001 R09: 000055c1db4a5b40 [ 36.092577] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 [ 36.092579] R13: 000055c1db3b2a30 R14: 0000000000000000 R15: 0000000000000000 [ 36.092586] </TASK> Cc: Alexei Starovoitov <[email protected]> Cc: Daniel Borkmann <[email protected]> Cc: Andrii Nakryiko <[email protected]> Cc: Martin KaFai Lau <[email protected]> Cc: Song Liu <[email protected]> Cc: Yonghong Song <[email protected]> Cc: John Fastabend <[email protected]> Cc: KP Singh <[email protected]> Cc: Stanislav Fomichev <[email protected]> Cc: Hao Luo <[email protected]> Cc: Jiri Olsa <[email protected]> Signed-off-by: Tonghao Zhang <[email protected]>
1 parent c0abf79 commit d3e2e7b

File tree

1 file changed

+36
-60
lines changed

1 file changed

+36
-60
lines changed

kernel/bpf/hashtab.c

Lines changed: 36 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -80,9 +80,6 @@ struct bucket {
8080
raw_spinlock_t raw_lock;
8181
};
8282

83-
#define HASHTAB_MAP_LOCK_COUNT 8
84-
#define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
85-
8683
struct bpf_htab {
8784
struct bpf_map map;
8885
struct bpf_mem_alloc ma;
@@ -104,7 +101,6 @@ struct bpf_htab {
104101
u32 elem_size; /* size of each element in bytes */
105102
u32 hashrnd;
106103
struct lock_class_key lockdep_key;
107-
int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
108104
};
109105

110106
/* each htab element is struct htab_elem + key + value */
@@ -146,35 +142,26 @@ static void htab_init_buckets(struct bpf_htab *htab)
146142
}
147143
}
148144

149-
static inline int htab_lock_bucket(const struct bpf_htab *htab,
150-
struct bucket *b, u32 hash,
145+
static inline int htab_lock_bucket(struct bucket *b,
151146
unsigned long *pflags)
152147
{
153148
unsigned long flags;
154149

155-
hash = hash & HASHTAB_MAP_LOCK_MASK;
156-
157-
preempt_disable();
158-
if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
159-
__this_cpu_dec(*(htab->map_locked[hash]));
160-
preempt_enable();
161-
return -EBUSY;
150+
if (in_nmi()) {
151+
if (!raw_spin_trylock_irqsave(&b->raw_lock, flags))
152+
return -EBUSY;
153+
} else {
154+
raw_spin_lock_irqsave(&b->raw_lock, flags);
162155
}
163156

164-
raw_spin_lock_irqsave(&b->raw_lock, flags);
165157
*pflags = flags;
166-
167158
return 0;
168159
}
169160

170-
static inline void htab_unlock_bucket(const struct bpf_htab *htab,
171-
struct bucket *b, u32 hash,
161+
static inline void htab_unlock_bucket(struct bucket *b,
172162
unsigned long flags)
173163
{
174-
hash = hash & HASHTAB_MAP_LOCK_MASK;
175164
raw_spin_unlock_irqrestore(&b->raw_lock, flags);
176-
__this_cpu_dec(*(htab->map_locked[hash]));
177-
preempt_enable();
178165
}
179166

180167
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
@@ -467,7 +454,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
467454
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
468455
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
469456
struct bpf_htab *htab;
470-
int err, i;
457+
int err;
471458

472459
htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
473460
if (!htab)
@@ -512,15 +499,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
512499
if (!htab->buckets)
513500
goto free_htab;
514501

515-
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
516-
htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
517-
sizeof(int),
518-
sizeof(int),
519-
GFP_USER);
520-
if (!htab->map_locked[i])
521-
goto free_map_locked;
522-
}
523-
524502
if (htab->map.map_flags & BPF_F_ZERO_SEED)
525503
htab->hashrnd = 0;
526504
else
@@ -548,13 +526,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
548526
if (htab->use_percpu_counter) {
549527
err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
550528
if (err)
551-
goto free_map_locked;
529+
goto free_buckets;
552530
}
553531

554532
if (prealloc) {
555533
err = prealloc_init(htab);
556534
if (err)
557-
goto free_map_locked;
535+
goto free_buckets;
558536

559537
if (!percpu && !lru) {
560538
/* lru itself can remove the least used element, so
@@ -567,24 +545,23 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
567545
} else {
568546
err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
569547
if (err)
570-
goto free_map_locked;
548+
goto free_buckets;
571549
if (percpu) {
572550
err = bpf_mem_alloc_init(&htab->pcpu_ma,
573551
round_up(htab->map.value_size, 8), true);
574552
if (err)
575-
goto free_map_locked;
553+
goto free_buckets;
576554
}
577555
}
578556

579557
return &htab->map;
580558

581559
free_prealloc:
582560
prealloc_destroy(htab);
583-
free_map_locked:
561+
free_buckets:
584562
if (htab->use_percpu_counter)
585563
percpu_counter_destroy(&htab->pcount);
586-
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
587-
free_percpu(htab->map_locked[i]);
564+
588565
bpf_map_area_free(htab->buckets);
589566
bpf_mem_alloc_destroy(&htab->pcpu_ma);
590567
bpf_mem_alloc_destroy(&htab->ma);
@@ -781,7 +758,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
781758
b = __select_bucket(htab, tgt_l->hash);
782759
head = &b->head;
783760

784-
ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
761+
ret = htab_lock_bucket(b, &flags);
785762
if (ret)
786763
return false;
787764

@@ -792,7 +769,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
792769
break;
793770
}
794771

795-
htab_unlock_bucket(htab, b, tgt_l->hash, flags);
772+
htab_unlock_bucket(b, flags);
796773

797774
return l == tgt_l;
798775
}
@@ -1106,7 +1083,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
11061083
*/
11071084
}
11081085

1109-
ret = htab_lock_bucket(htab, b, hash, &flags);
1086+
ret = htab_lock_bucket(b, &flags);
11101087
if (ret)
11111088
return ret;
11121089

@@ -1151,7 +1128,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
11511128
}
11521129
ret = 0;
11531130
err:
1154-
htab_unlock_bucket(htab, b, hash, flags);
1131+
htab_unlock_bucket(b, flags);
11551132
return ret;
11561133
}
11571134

@@ -1197,7 +1174,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
11971174
copy_map_value(&htab->map,
11981175
l_new->key + round_up(map->key_size, 8), value);
11991176

1200-
ret = htab_lock_bucket(htab, b, hash, &flags);
1177+
ret = htab_lock_bucket(b, &flags);
12011178
if (ret)
12021179
return ret;
12031180

@@ -1218,7 +1195,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
12181195
ret = 0;
12191196

12201197
err:
1221-
htab_unlock_bucket(htab, b, hash, flags);
1198+
htab_unlock_bucket(b, flags);
12221199

12231200
if (ret)
12241201
htab_lru_push_free(htab, l_new);
@@ -1254,7 +1231,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
12541231
b = __select_bucket(htab, hash);
12551232
head = &b->head;
12561233

1257-
ret = htab_lock_bucket(htab, b, hash, &flags);
1234+
ret = htab_lock_bucket(b, &flags);
12581235
if (ret)
12591236
return ret;
12601237

@@ -1279,7 +1256,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
12791256
}
12801257
ret = 0;
12811258
err:
1282-
htab_unlock_bucket(htab, b, hash, flags);
1259+
htab_unlock_bucket(b, flags);
12831260
return ret;
12841261
}
12851262

@@ -1320,7 +1297,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
13201297
return -ENOMEM;
13211298
}
13221299

1323-
ret = htab_lock_bucket(htab, b, hash, &flags);
1300+
ret = htab_lock_bucket(b, &flags);
13241301
if (ret)
13251302
return ret;
13261303

@@ -1344,7 +1321,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
13441321
}
13451322
ret = 0;
13461323
err:
1347-
htab_unlock_bucket(htab, b, hash, flags);
1324+
htab_unlock_bucket(b, flags);
13481325
if (l_new)
13491326
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
13501327
return ret;
@@ -1383,7 +1360,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
13831360
b = __select_bucket(htab, hash);
13841361
head = &b->head;
13851362

1386-
ret = htab_lock_bucket(htab, b, hash, &flags);
1363+
ret = htab_lock_bucket(b, &flags);
13871364
if (ret)
13881365
return ret;
13891366

@@ -1396,7 +1373,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
13961373
ret = -ENOENT;
13971374
}
13981375

1399-
htab_unlock_bucket(htab, b, hash, flags);
1376+
htab_unlock_bucket(b, flags);
14001377
return ret;
14011378
}
14021379

@@ -1419,7 +1396,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
14191396
b = __select_bucket(htab, hash);
14201397
head = &b->head;
14211398

1422-
ret = htab_lock_bucket(htab, b, hash, &flags);
1399+
ret = htab_lock_bucket(b, &flags);
14231400
if (ret)
14241401
return ret;
14251402

@@ -1430,7 +1407,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
14301407
else
14311408
ret = -ENOENT;
14321409

1433-
htab_unlock_bucket(htab, b, hash, flags);
1410+
htab_unlock_bucket(b, flags);
14341411
if (l)
14351412
htab_lru_push_free(htab, l);
14361413
return ret;
@@ -1493,7 +1470,6 @@ static void htab_map_free_timers(struct bpf_map *map)
14931470
static void htab_map_free(struct bpf_map *map)
14941471
{
14951472
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1496-
int i;
14971473

14981474
/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
14991475
* bpf_free_used_maps() is called after bpf prog is no longer executing.
@@ -1515,10 +1491,10 @@ static void htab_map_free(struct bpf_map *map)
15151491
bpf_map_area_free(htab->buckets);
15161492
bpf_mem_alloc_destroy(&htab->pcpu_ma);
15171493
bpf_mem_alloc_destroy(&htab->ma);
1494+
15181495
if (htab->use_percpu_counter)
15191496
percpu_counter_destroy(&htab->pcount);
1520-
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1521-
free_percpu(htab->map_locked[i]);
1497+
15221498
lockdep_unregister_key(&htab->lockdep_key);
15231499
bpf_map_area_free(htab);
15241500
}
@@ -1562,7 +1538,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
15621538
b = __select_bucket(htab, hash);
15631539
head = &b->head;
15641540

1565-
ret = htab_lock_bucket(htab, b, hash, &bflags);
1541+
ret = htab_lock_bucket(b, &bflags);
15661542
if (ret)
15671543
return ret;
15681544

@@ -1600,7 +1576,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
16001576
free_htab_elem(htab, l);
16011577
}
16021578

1603-
htab_unlock_bucket(htab, b, hash, bflags);
1579+
htab_unlock_bucket(b, bflags);
16041580

16051581
if (is_lru_map && l)
16061582
htab_lru_push_free(htab, l);
@@ -1718,7 +1694,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17181694
head = &b->head;
17191695
/* do not grab the lock unless need it (bucket_cnt > 0). */
17201696
if (locked) {
1721-
ret = htab_lock_bucket(htab, b, batch, &flags);
1697+
ret = htab_lock_bucket(b, &flags);
17221698
if (ret) {
17231699
rcu_read_unlock();
17241700
bpf_enable_instrumentation();
@@ -1741,7 +1717,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17411717
/* Note that since bucket_cnt > 0 here, it is implicit
17421718
* that the locked was grabbed, so release it.
17431719
*/
1744-
htab_unlock_bucket(htab, b, batch, flags);
1720+
htab_unlock_bucket(b, flags);
17451721
rcu_read_unlock();
17461722
bpf_enable_instrumentation();
17471723
goto after_loop;
@@ -1752,7 +1728,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17521728
/* Note that since bucket_cnt > 0 here, it is implicit
17531729
* that the locked was grabbed, so release it.
17541730
*/
1755-
htab_unlock_bucket(htab, b, batch, flags);
1731+
htab_unlock_bucket(b, flags);
17561732
rcu_read_unlock();
17571733
bpf_enable_instrumentation();
17581734
kvfree(keys);
@@ -1813,7 +1789,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
18131789
dst_val += value_size;
18141790
}
18151791

1816-
htab_unlock_bucket(htab, b, batch, flags);
1792+
htab_unlock_bucket(b, flags);
18171793
locked = false;
18181794

18191795
while (node_to_free) {

0 commit comments

Comments
 (0)