@@ -185,7 +185,7 @@ EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
185
185
186
186
static struct ib_gid_table * rdma_gid_table (struct ib_device * device , u8 port )
187
187
{
188
- return device -> cache . ports [port - rdma_start_port ( device )] .gid ;
188
+ return device -> port_data [port ]. cache .gid ;
189
189
}
190
190
191
191
static bool is_gid_entry_free (const struct ib_gid_table_entry * entry )
@@ -765,7 +765,7 @@ static struct ib_gid_table *alloc_gid_table(int sz)
765
765
return NULL ;
766
766
}
767
767
768
- static void release_gid_table (struct ib_device * device , u8 port ,
768
+ static void release_gid_table (struct ib_device * device ,
769
769
struct ib_gid_table * table )
770
770
{
771
771
bool leak = false;
@@ -863,31 +863,27 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
863
863
864
864
static void gid_table_release_one (struct ib_device * ib_dev )
865
865
{
866
- struct ib_gid_table * table ;
867
- u8 port ;
866
+ unsigned int p ;
868
867
869
- for (port = 0 ; port < ib_dev -> phys_port_cnt ; port ++ ) {
870
- table = ib_dev -> cache .ports [port ].gid ;
871
- release_gid_table (ib_dev , port , table );
872
- ib_dev -> cache .ports [port ].gid = NULL ;
868
+ rdma_for_each_port (ib_dev , p ) {
869
+ release_gid_table (ib_dev , ib_dev -> port_data [p ].cache .gid );
870
+ ib_dev -> port_data [p ].cache .gid = NULL ;
873
871
}
874
872
}
875
873
876
874
static int _gid_table_setup_one (struct ib_device * ib_dev )
877
875
{
878
- u8 port ;
879
876
struct ib_gid_table * table ;
877
+ unsigned int rdma_port ;
880
878
881
- for (port = 0 ; port < ib_dev -> phys_port_cnt ; port ++ ) {
882
- u8 rdma_port = port + rdma_start_port (ib_dev );
883
-
879
+ rdma_for_each_port (ib_dev , rdma_port ) {
884
880
table = alloc_gid_table (
885
881
ib_dev -> port_data [rdma_port ].immutable .gid_tbl_len );
886
882
if (!table )
887
883
goto rollback_table_setup ;
888
884
889
885
gid_table_reserve_default (ib_dev , rdma_port , table );
890
- ib_dev -> cache . ports [ port ] .gid = table ;
886
+ ib_dev -> port_data [ rdma_port ]. cache .gid = table ;
891
887
}
892
888
return 0 ;
893
889
@@ -898,14 +894,11 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
898
894
899
895
static void gid_table_cleanup_one (struct ib_device * ib_dev )
900
896
{
901
- struct ib_gid_table * table ;
902
- u8 port ;
897
+ unsigned int p ;
903
898
904
- for (port = 0 ; port < ib_dev -> phys_port_cnt ; port ++ ) {
905
- table = ib_dev -> cache .ports [port ].gid ;
906
- cleanup_gid_table_port (ib_dev , port + rdma_start_port (ib_dev ),
907
- table );
908
- }
899
+ rdma_for_each_port (ib_dev , p )
900
+ cleanup_gid_table_port (ib_dev , p ,
901
+ ib_dev -> port_data [p ].cache .gid );
909
902
}
910
903
911
904
static int gid_table_setup_one (struct ib_device * ib_dev )
@@ -983,17 +976,17 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
983
976
unsigned long mask = GID_ATTR_FIND_MASK_GID |
984
977
GID_ATTR_FIND_MASK_GID_TYPE ;
985
978
struct ib_gid_attr gid_attr_val = {.ndev = ndev , .gid_type = gid_type };
986
- u8 p ;
979
+ unsigned int p ;
987
980
988
981
if (ndev )
989
982
mask |= GID_ATTR_FIND_MASK_NETDEV ;
990
983
991
- for ( p = 0 ; p < device -> phys_port_cnt ; p ++ ) {
984
+ rdma_for_each_port ( device , p ) {
992
985
struct ib_gid_table * table ;
993
986
unsigned long flags ;
994
987
int index ;
995
988
996
- table = device -> cache . ports [p ].gid ;
989
+ table = device -> port_data [p ]. cache .gid ;
997
990
read_lock_irqsave (& table -> rwlock , flags );
998
991
index = find_gid (table , gid , & gid_attr_val , false, mask , NULL );
999
992
if (index >= 0 ) {
@@ -1025,7 +1018,7 @@ int ib_get_cached_pkey(struct ib_device *device,
1025
1018
1026
1019
read_lock_irqsave (& device -> cache .lock , flags );
1027
1020
1028
- cache = device -> cache . ports [port_num - rdma_start_port ( device )] .pkey ;
1021
+ cache = device -> port_data [port_num ]. cache .pkey ;
1029
1022
1030
1023
if (index < 0 || index >= cache -> table_len )
1031
1024
ret = - EINVAL ;
@@ -1043,14 +1036,12 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
1043
1036
u64 * sn_pfx )
1044
1037
{
1045
1038
unsigned long flags ;
1046
- int p ;
1047
1039
1048
1040
if (!rdma_is_port_valid (device , port_num ))
1049
1041
return - EINVAL ;
1050
1042
1051
- p = port_num - rdma_start_port (device );
1052
1043
read_lock_irqsave (& device -> cache .lock , flags );
1053
- * sn_pfx = device -> cache . ports [ p ] .subnet_prefix ;
1044
+ * sn_pfx = device -> port_data [ port_num ]. cache .subnet_prefix ;
1054
1045
read_unlock_irqrestore (& device -> cache .lock , flags );
1055
1046
1056
1047
return 0 ;
@@ -1073,7 +1064,7 @@ int ib_find_cached_pkey(struct ib_device *device,
1073
1064
1074
1065
read_lock_irqsave (& device -> cache .lock , flags );
1075
1066
1076
- cache = device -> cache . ports [port_num - rdma_start_port ( device )] .pkey ;
1067
+ cache = device -> port_data [port_num ]. cache .pkey ;
1077
1068
1078
1069
* index = -1 ;
1079
1070
@@ -1113,7 +1104,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
1113
1104
1114
1105
read_lock_irqsave (& device -> cache .lock , flags );
1115
1106
1116
- cache = device -> cache . ports [port_num - rdma_start_port ( device )] .pkey ;
1107
+ cache = device -> port_data [port_num ]. cache .pkey ;
1117
1108
1118
1109
* index = -1 ;
1119
1110
@@ -1141,7 +1132,7 @@ int ib_get_cached_lmc(struct ib_device *device,
1141
1132
return - EINVAL ;
1142
1133
1143
1134
read_lock_irqsave (& device -> cache .lock , flags );
1144
- * lmc = device -> cache . ports [port_num - rdma_start_port ( device )] .lmc ;
1135
+ * lmc = device -> port_data [port_num ]. cache .lmc ;
1145
1136
read_unlock_irqrestore (& device -> cache .lock , flags );
1146
1137
1147
1138
return ret ;
@@ -1159,8 +1150,7 @@ int ib_get_cached_port_state(struct ib_device *device,
1159
1150
return - EINVAL ;
1160
1151
1161
1152
read_lock_irqsave (& device -> cache .lock , flags );
1162
- * port_state = device -> cache .ports [port_num
1163
- - rdma_start_port (device )].port_state ;
1153
+ * port_state = device -> port_data [port_num ].cache .port_state ;
1164
1154
read_unlock_irqrestore (& device -> cache .lock , flags );
1165
1155
1166
1156
return ret ;
@@ -1361,16 +1351,13 @@ static void ib_cache_update(struct ib_device *device,
1361
1351
1362
1352
write_lock_irq (& device -> cache .lock );
1363
1353
1364
- old_pkey_cache = device -> cache .ports [port -
1365
- rdma_start_port (device )].pkey ;
1354
+ old_pkey_cache = device -> port_data [port ].cache .pkey ;
1366
1355
1367
- device -> cache .ports [port - rdma_start_port (device )].pkey = pkey_cache ;
1368
- device -> cache .ports [port - rdma_start_port (device )].lmc = tprops -> lmc ;
1369
- device -> cache .ports [port - rdma_start_port (device )].port_state =
1370
- tprops -> state ;
1356
+ device -> port_data [port ].cache .pkey = pkey_cache ;
1357
+ device -> port_data [port ].cache .lmc = tprops -> lmc ;
1358
+ device -> port_data [port ].cache .port_state = tprops -> state ;
1371
1359
1372
- device -> cache .ports [port - rdma_start_port (device )].subnet_prefix =
1373
- tprops -> subnet_prefix ;
1360
+ device -> port_data [port ].cache .subnet_prefix = tprops -> subnet_prefix ;
1374
1361
write_unlock_irq (& device -> cache .lock );
1375
1362
1376
1363
if (enforce_security )
@@ -1433,19 +1420,9 @@ int ib_cache_setup_one(struct ib_device *device)
1433
1420
1434
1421
rwlock_init (& device -> cache .lock );
1435
1422
1436
- device -> cache .ports =
1437
- kcalloc (rdma_end_port (device ) - rdma_start_port (device ) + 1 ,
1438
- sizeof (* device -> cache .ports ),
1439
- GFP_KERNEL );
1440
- if (!device -> cache .ports )
1441
- return - ENOMEM ;
1442
-
1443
1423
err = gid_table_setup_one (device );
1444
- if (err ) {
1445
- kfree (device -> cache .ports );
1446
- device -> cache .ports = NULL ;
1424
+ if (err )
1447
1425
return err ;
1448
- }
1449
1426
1450
1427
rdma_for_each_port (device , p )
1451
1428
ib_cache_update (device , p , true);
@@ -1458,22 +1435,18 @@ int ib_cache_setup_one(struct ib_device *device)
1458
1435
1459
1436
void ib_cache_release_one (struct ib_device * device )
1460
1437
{
1461
- int p ;
1462
-
1463
- if (!device -> cache .ports )
1464
- return ;
1438
+ unsigned int p ;
1465
1439
1466
1440
/*
1467
1441
* The release function frees all the cache elements.
1468
1442
* This function should be called as part of freeing
1469
1443
* all the device's resources when the cache could no
1470
1444
* longer be accessed.
1471
1445
*/
1472
- for ( p = 0 ; p <= rdma_end_port ( device ) - rdma_start_port ( device ); ++ p )
1473
- kfree (device -> cache . ports [p ].pkey );
1446
+ rdma_for_each_port ( device , p )
1447
+ kfree (device -> port_data [p ]. cache .pkey );
1474
1448
1475
1449
gid_table_release_one (device );
1476
- kfree (device -> cache .ports );
1477
1450
}
1478
1451
1479
1452
void ib_cache_cleanup_one (struct ib_device * device )
0 commit comments