@@ -80,9 +80,6 @@ struct bucket {
80
80
raw_spinlock_t raw_lock ;
81
81
};
82
82
83
- #define HASHTAB_MAP_LOCK_COUNT 8
84
- #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
85
-
86
83
struct bpf_htab {
87
84
struct bpf_map map ;
88
85
struct bpf_mem_alloc ma ;
@@ -104,7 +101,6 @@ struct bpf_htab {
104
101
u32 elem_size ; /* size of each element in bytes */
105
102
u32 hashrnd ;
106
103
struct lock_class_key lockdep_key ;
107
- int __percpu * map_locked [HASHTAB_MAP_LOCK_COUNT ];
108
104
};
109
105
110
106
/* each htab element is struct htab_elem + key + value */
@@ -146,35 +142,26 @@ static void htab_init_buckets(struct bpf_htab *htab)
146
142
}
147
143
}
148
144
149
- static inline int htab_lock_bucket (const struct bpf_htab * htab ,
150
- struct bucket * b , u32 hash ,
145
+ static inline int htab_lock_bucket (struct bucket * b ,
151
146
unsigned long * pflags )
152
147
{
153
148
unsigned long flags ;
154
149
155
- hash = hash & HASHTAB_MAP_LOCK_MASK ;
156
-
157
- preempt_disable ();
158
- if (unlikely (__this_cpu_inc_return (* (htab -> map_locked [hash ])) != 1 )) {
159
- __this_cpu_dec (* (htab -> map_locked [hash ]));
160
- preempt_enable ();
161
- return - EBUSY ;
150
+ if (in_nmi ()) {
151
+ if (!raw_spin_trylock_irqsave (& b -> raw_lock , flags ))
152
+ return - EBUSY ;
153
+ } else {
154
+ raw_spin_lock_irqsave (& b -> raw_lock , flags );
162
155
}
163
156
164
- raw_spin_lock_irqsave (& b -> raw_lock , flags );
165
157
* pflags = flags ;
166
-
167
158
return 0 ;
168
159
}
169
160
170
- static inline void htab_unlock_bucket (const struct bpf_htab * htab ,
171
- struct bucket * b , u32 hash ,
161
+ static inline void htab_unlock_bucket (struct bucket * b ,
172
162
unsigned long flags )
173
163
{
174
- hash = hash & HASHTAB_MAP_LOCK_MASK ;
175
164
raw_spin_unlock_irqrestore (& b -> raw_lock , flags );
176
- __this_cpu_dec (* (htab -> map_locked [hash ]));
177
- preempt_enable ();
178
165
}
179
166
180
167
static bool htab_lru_map_delete_node (void * arg , struct bpf_lru_node * node );
@@ -467,7 +454,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
467
454
bool percpu_lru = (attr -> map_flags & BPF_F_NO_COMMON_LRU );
468
455
bool prealloc = !(attr -> map_flags & BPF_F_NO_PREALLOC );
469
456
struct bpf_htab * htab ;
470
- int err , i ;
457
+ int err ;
471
458
472
459
htab = bpf_map_area_alloc (sizeof (* htab ), NUMA_NO_NODE );
473
460
if (!htab )
@@ -512,15 +499,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
512
499
if (!htab -> buckets )
513
500
goto free_htab ;
514
501
515
- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ ) {
516
- htab -> map_locked [i ] = bpf_map_alloc_percpu (& htab -> map ,
517
- sizeof (int ),
518
- sizeof (int ),
519
- GFP_USER );
520
- if (!htab -> map_locked [i ])
521
- goto free_map_locked ;
522
- }
523
-
524
502
if (htab -> map .map_flags & BPF_F_ZERO_SEED )
525
503
htab -> hashrnd = 0 ;
526
504
else
@@ -548,13 +526,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
548
526
if (htab -> use_percpu_counter ) {
549
527
err = percpu_counter_init (& htab -> pcount , 0 , GFP_KERNEL );
550
528
if (err )
551
- goto free_map_locked ;
529
+ goto free_buckets ;
552
530
}
553
531
554
532
if (prealloc ) {
555
533
err = prealloc_init (htab );
556
534
if (err )
557
- goto free_map_locked ;
535
+ goto free_buckets ;
558
536
559
537
if (!percpu && !lru ) {
560
538
/* lru itself can remove the least used element, so
@@ -567,24 +545,23 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
567
545
} else {
568
546
err = bpf_mem_alloc_init (& htab -> ma , htab -> elem_size , false);
569
547
if (err )
570
- goto free_map_locked ;
548
+ goto free_buckets ;
571
549
if (percpu ) {
572
550
err = bpf_mem_alloc_init (& htab -> pcpu_ma ,
573
551
round_up (htab -> map .value_size , 8 ), true);
574
552
if (err )
575
- goto free_map_locked ;
553
+ goto free_buckets ;
576
554
}
577
555
}
578
556
579
557
return & htab -> map ;
580
558
581
559
free_prealloc :
582
560
prealloc_destroy (htab );
583
- free_map_locked :
561
+ free_buckets :
584
562
if (htab -> use_percpu_counter )
585
563
percpu_counter_destroy (& htab -> pcount );
586
- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ )
587
- free_percpu (htab -> map_locked [i ]);
564
+
588
565
bpf_map_area_free (htab -> buckets );
589
566
bpf_mem_alloc_destroy (& htab -> pcpu_ma );
590
567
bpf_mem_alloc_destroy (& htab -> ma );
@@ -781,7 +758,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
781
758
b = __select_bucket (htab , tgt_l -> hash );
782
759
head = & b -> head ;
783
760
784
- ret = htab_lock_bucket (htab , b , tgt_l -> hash , & flags );
761
+ ret = htab_lock_bucket (b , & flags );
785
762
if (ret )
786
763
return false;
787
764
@@ -792,7 +769,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
792
769
break ;
793
770
}
794
771
795
- htab_unlock_bucket (htab , b , tgt_l -> hash , flags );
772
+ htab_unlock_bucket (b , flags );
796
773
797
774
return l == tgt_l ;
798
775
}
@@ -1106,7 +1083,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1106
1083
*/
1107
1084
}
1108
1085
1109
- ret = htab_lock_bucket (htab , b , hash , & flags );
1086
+ ret = htab_lock_bucket (b , & flags );
1110
1087
if (ret )
1111
1088
return ret ;
1112
1089
@@ -1151,7 +1128,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1151
1128
}
1152
1129
ret = 0 ;
1153
1130
err :
1154
- htab_unlock_bucket (htab , b , hash , flags );
1131
+ htab_unlock_bucket (b , flags );
1155
1132
return ret ;
1156
1133
}
1157
1134
@@ -1197,7 +1174,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1197
1174
copy_map_value (& htab -> map ,
1198
1175
l_new -> key + round_up (map -> key_size , 8 ), value );
1199
1176
1200
- ret = htab_lock_bucket (htab , b , hash , & flags );
1177
+ ret = htab_lock_bucket (b , & flags );
1201
1178
if (ret )
1202
1179
return ret ;
1203
1180
@@ -1218,7 +1195,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1218
1195
ret = 0 ;
1219
1196
1220
1197
err :
1221
- htab_unlock_bucket (htab , b , hash , flags );
1198
+ htab_unlock_bucket (b , flags );
1222
1199
1223
1200
if (ret )
1224
1201
htab_lru_push_free (htab , l_new );
@@ -1254,7 +1231,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1254
1231
b = __select_bucket (htab , hash );
1255
1232
head = & b -> head ;
1256
1233
1257
- ret = htab_lock_bucket (htab , b , hash , & flags );
1234
+ ret = htab_lock_bucket (b , & flags );
1258
1235
if (ret )
1259
1236
return ret ;
1260
1237
@@ -1279,7 +1256,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1279
1256
}
1280
1257
ret = 0 ;
1281
1258
err :
1282
- htab_unlock_bucket (htab , b , hash , flags );
1259
+ htab_unlock_bucket (b , flags );
1283
1260
return ret ;
1284
1261
}
1285
1262
@@ -1320,7 +1297,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1320
1297
return - ENOMEM ;
1321
1298
}
1322
1299
1323
- ret = htab_lock_bucket (htab , b , hash , & flags );
1300
+ ret = htab_lock_bucket (b , & flags );
1324
1301
if (ret )
1325
1302
return ret ;
1326
1303
@@ -1344,7 +1321,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1344
1321
}
1345
1322
ret = 0 ;
1346
1323
err :
1347
- htab_unlock_bucket (htab , b , hash , flags );
1324
+ htab_unlock_bucket (b , flags );
1348
1325
if (l_new )
1349
1326
bpf_lru_push_free (& htab -> lru , & l_new -> lru_node );
1350
1327
return ret ;
@@ -1383,7 +1360,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
1383
1360
b = __select_bucket (htab , hash );
1384
1361
head = & b -> head ;
1385
1362
1386
- ret = htab_lock_bucket (htab , b , hash , & flags );
1363
+ ret = htab_lock_bucket (b , & flags );
1387
1364
if (ret )
1388
1365
return ret ;
1389
1366
@@ -1396,7 +1373,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
1396
1373
ret = - ENOENT ;
1397
1374
}
1398
1375
1399
- htab_unlock_bucket (htab , b , hash , flags );
1376
+ htab_unlock_bucket (b , flags );
1400
1377
return ret ;
1401
1378
}
1402
1379
@@ -1419,7 +1396,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1419
1396
b = __select_bucket (htab , hash );
1420
1397
head = & b -> head ;
1421
1398
1422
- ret = htab_lock_bucket (htab , b , hash , & flags );
1399
+ ret = htab_lock_bucket (b , & flags );
1423
1400
if (ret )
1424
1401
return ret ;
1425
1402
@@ -1430,7 +1407,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1430
1407
else
1431
1408
ret = - ENOENT ;
1432
1409
1433
- htab_unlock_bucket (htab , b , hash , flags );
1410
+ htab_unlock_bucket (b , flags );
1434
1411
if (l )
1435
1412
htab_lru_push_free (htab , l );
1436
1413
return ret ;
@@ -1493,7 +1470,6 @@ static void htab_map_free_timers(struct bpf_map *map)
1493
1470
static void htab_map_free (struct bpf_map * map )
1494
1471
{
1495
1472
struct bpf_htab * htab = container_of (map , struct bpf_htab , map );
1496
- int i ;
1497
1473
1498
1474
/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1499
1475
* bpf_free_used_maps() is called after bpf prog is no longer executing.
@@ -1515,10 +1491,10 @@ static void htab_map_free(struct bpf_map *map)
1515
1491
bpf_map_area_free (htab -> buckets );
1516
1492
bpf_mem_alloc_destroy (& htab -> pcpu_ma );
1517
1493
bpf_mem_alloc_destroy (& htab -> ma );
1494
+
1518
1495
if (htab -> use_percpu_counter )
1519
1496
percpu_counter_destroy (& htab -> pcount );
1520
- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ )
1521
- free_percpu (htab -> map_locked [i ]);
1497
+
1522
1498
lockdep_unregister_key (& htab -> lockdep_key );
1523
1499
bpf_map_area_free (htab );
1524
1500
}
@@ -1562,7 +1538,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1562
1538
b = __select_bucket (htab , hash );
1563
1539
head = & b -> head ;
1564
1540
1565
- ret = htab_lock_bucket (htab , b , hash , & bflags );
1541
+ ret = htab_lock_bucket (b , & bflags );
1566
1542
if (ret )
1567
1543
return ret ;
1568
1544
@@ -1600,7 +1576,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1600
1576
free_htab_elem (htab , l );
1601
1577
}
1602
1578
1603
- htab_unlock_bucket (htab , b , hash , bflags );
1579
+ htab_unlock_bucket (b , bflags );
1604
1580
1605
1581
if (is_lru_map && l )
1606
1582
htab_lru_push_free (htab , l );
@@ -1718,7 +1694,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1718
1694
head = & b -> head ;
1719
1695
/* do not grab the lock unless need it (bucket_cnt > 0). */
1720
1696
if (locked ) {
1721
- ret = htab_lock_bucket (htab , b , batch , & flags );
1697
+ ret = htab_lock_bucket (b , & flags );
1722
1698
if (ret ) {
1723
1699
rcu_read_unlock ();
1724
1700
bpf_enable_instrumentation ();
@@ -1741,7 +1717,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1741
1717
/* Note that since bucket_cnt > 0 here, it is implicit
1742
1718
* that the locked was grabbed, so release it.
1743
1719
*/
1744
- htab_unlock_bucket (htab , b , batch , flags );
1720
+ htab_unlock_bucket (b , flags );
1745
1721
rcu_read_unlock ();
1746
1722
bpf_enable_instrumentation ();
1747
1723
goto after_loop ;
@@ -1752,7 +1728,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1752
1728
/* Note that since bucket_cnt > 0 here, it is implicit
1753
1729
* that the locked was grabbed, so release it.
1754
1730
*/
1755
- htab_unlock_bucket (htab , b , batch , flags );
1731
+ htab_unlock_bucket (b , flags );
1756
1732
rcu_read_unlock ();
1757
1733
bpf_enable_instrumentation ();
1758
1734
kvfree (keys );
@@ -1813,7 +1789,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1813
1789
dst_val += value_size ;
1814
1790
}
1815
1791
1816
- htab_unlock_bucket (htab , b , batch , flags );
1792
+ htab_unlock_bucket (b , flags );
1817
1793
locked = false;
1818
1794
1819
1795
while (node_to_free ) {
0 commit comments