Skip to content

Commit 8faea9f

Browse files
committed
RDMA/cache: Move the cache per-port data into the main ib_port_data
Like the other cases there no real reason to have another array just for the cache. This larger conversion gets its own patch. Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent 8ceb135 commit 8faea9f

File tree

2 files changed

+33
-59
lines changed

2 files changed

+33
-59
lines changed

drivers/infiniband/core/cache.c

Lines changed: 31 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
185185

186186
static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
187187
{
188-
return device->cache.ports[port - rdma_start_port(device)].gid;
188+
return device->port_data[port].cache.gid;
189189
}
190190

191191
static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
@@ -765,7 +765,7 @@ static struct ib_gid_table *alloc_gid_table(int sz)
765765
return NULL;
766766
}
767767

768-
static void release_gid_table(struct ib_device *device, u8 port,
768+
static void release_gid_table(struct ib_device *device,
769769
struct ib_gid_table *table)
770770
{
771771
bool leak = false;
@@ -863,31 +863,27 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
863863

864864
static void gid_table_release_one(struct ib_device *ib_dev)
865865
{
866-
struct ib_gid_table *table;
867-
u8 port;
866+
unsigned int p;
868867

869-
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
870-
table = ib_dev->cache.ports[port].gid;
871-
release_gid_table(ib_dev, port, table);
872-
ib_dev->cache.ports[port].gid = NULL;
868+
rdma_for_each_port (ib_dev, p) {
869+
release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
870+
ib_dev->port_data[p].cache.gid = NULL;
873871
}
874872
}
875873

876874
static int _gid_table_setup_one(struct ib_device *ib_dev)
877875
{
878-
u8 port;
879876
struct ib_gid_table *table;
877+
unsigned int rdma_port;
880878

881-
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
882-
u8 rdma_port = port + rdma_start_port(ib_dev);
883-
879+
rdma_for_each_port (ib_dev, rdma_port) {
884880
table = alloc_gid_table(
885881
ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
886882
if (!table)
887883
goto rollback_table_setup;
888884

889885
gid_table_reserve_default(ib_dev, rdma_port, table);
890-
ib_dev->cache.ports[port].gid = table;
886+
ib_dev->port_data[rdma_port].cache.gid = table;
891887
}
892888
return 0;
893889

@@ -898,14 +894,11 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
898894

899895
static void gid_table_cleanup_one(struct ib_device *ib_dev)
900896
{
901-
struct ib_gid_table *table;
902-
u8 port;
897+
unsigned int p;
903898

904-
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
905-
table = ib_dev->cache.ports[port].gid;
906-
cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
907-
table);
908-
}
899+
rdma_for_each_port (ib_dev, p)
900+
cleanup_gid_table_port(ib_dev, p,
901+
ib_dev->port_data[p].cache.gid);
909902
}
910903

911904
static int gid_table_setup_one(struct ib_device *ib_dev)
@@ -983,17 +976,17 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
983976
unsigned long mask = GID_ATTR_FIND_MASK_GID |
984977
GID_ATTR_FIND_MASK_GID_TYPE;
985978
struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
986-
u8 p;
979+
unsigned int p;
987980

988981
if (ndev)
989982
mask |= GID_ATTR_FIND_MASK_NETDEV;
990983

991-
for (p = 0; p < device->phys_port_cnt; p++) {
984+
rdma_for_each_port(device, p) {
992985
struct ib_gid_table *table;
993986
unsigned long flags;
994987
int index;
995988

996-
table = device->cache.ports[p].gid;
989+
table = device->port_data[p].cache.gid;
997990
read_lock_irqsave(&table->rwlock, flags);
998991
index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
999992
if (index >= 0) {
@@ -1025,7 +1018,7 @@ int ib_get_cached_pkey(struct ib_device *device,
10251018

10261019
read_lock_irqsave(&device->cache.lock, flags);
10271020

1028-
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1021+
cache = device->port_data[port_num].cache.pkey;
10291022

10301023
if (index < 0 || index >= cache->table_len)
10311024
ret = -EINVAL;
@@ -1043,14 +1036,12 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
10431036
u64 *sn_pfx)
10441037
{
10451038
unsigned long flags;
1046-
int p;
10471039

10481040
if (!rdma_is_port_valid(device, port_num))
10491041
return -EINVAL;
10501042

1051-
p = port_num - rdma_start_port(device);
10521043
read_lock_irqsave(&device->cache.lock, flags);
1053-
*sn_pfx = device->cache.ports[p].subnet_prefix;
1044+
*sn_pfx = device->port_data[port_num].cache.subnet_prefix;
10541045
read_unlock_irqrestore(&device->cache.lock, flags);
10551046

10561047
return 0;
@@ -1073,7 +1064,7 @@ int ib_find_cached_pkey(struct ib_device *device,
10731064

10741065
read_lock_irqsave(&device->cache.lock, flags);
10751066

1076-
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1067+
cache = device->port_data[port_num].cache.pkey;
10771068

10781069
*index = -1;
10791070

@@ -1113,7 +1104,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
11131104

11141105
read_lock_irqsave(&device->cache.lock, flags);
11151106

1116-
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1107+
cache = device->port_data[port_num].cache.pkey;
11171108

11181109
*index = -1;
11191110

@@ -1141,7 +1132,7 @@ int ib_get_cached_lmc(struct ib_device *device,
11411132
return -EINVAL;
11421133

11431134
read_lock_irqsave(&device->cache.lock, flags);
1144-
*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
1135+
*lmc = device->port_data[port_num].cache.lmc;
11451136
read_unlock_irqrestore(&device->cache.lock, flags);
11461137

11471138
return ret;
@@ -1159,8 +1150,7 @@ int ib_get_cached_port_state(struct ib_device *device,
11591150
return -EINVAL;
11601151

11611152
read_lock_irqsave(&device->cache.lock, flags);
1162-
*port_state = device->cache.ports[port_num
1163-
- rdma_start_port(device)].port_state;
1153+
*port_state = device->port_data[port_num].cache.port_state;
11641154
read_unlock_irqrestore(&device->cache.lock, flags);
11651155

11661156
return ret;
@@ -1361,16 +1351,13 @@ static void ib_cache_update(struct ib_device *device,
13611351

13621352
write_lock_irq(&device->cache.lock);
13631353

1364-
old_pkey_cache = device->cache.ports[port -
1365-
rdma_start_port(device)].pkey;
1354+
old_pkey_cache = device->port_data[port].cache.pkey;
13661355

1367-
device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
1368-
device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
1369-
device->cache.ports[port - rdma_start_port(device)].port_state =
1370-
tprops->state;
1356+
device->port_data[port].cache.pkey = pkey_cache;
1357+
device->port_data[port].cache.lmc = tprops->lmc;
1358+
device->port_data[port].cache.port_state = tprops->state;
13711359

1372-
device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
1373-
tprops->subnet_prefix;
1360+
device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
13741361
write_unlock_irq(&device->cache.lock);
13751362

13761363
if (enforce_security)
@@ -1433,19 +1420,9 @@ int ib_cache_setup_one(struct ib_device *device)
14331420

14341421
rwlock_init(&device->cache.lock);
14351422

1436-
device->cache.ports =
1437-
kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1,
1438-
sizeof(*device->cache.ports),
1439-
GFP_KERNEL);
1440-
if (!device->cache.ports)
1441-
return -ENOMEM;
1442-
14431423
err = gid_table_setup_one(device);
1444-
if (err) {
1445-
kfree(device->cache.ports);
1446-
device->cache.ports = NULL;
1424+
if (err)
14471425
return err;
1448-
}
14491426

14501427
rdma_for_each_port (device, p)
14511428
ib_cache_update(device, p, true);
@@ -1458,22 +1435,18 @@ int ib_cache_setup_one(struct ib_device *device)
14581435

14591436
void ib_cache_release_one(struct ib_device *device)
14601437
{
1461-
int p;
1462-
1463-
if (!device->cache.ports)
1464-
return;
1438+
unsigned int p;
14651439

14661440
/*
14671441
* The release function frees all the cache elements.
14681442
* This function should be called as part of freeing
14691443
* all the device's resources when the cache could no
14701444
* longer be accessed.
14711445
*/
1472-
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1473-
kfree(device->cache.ports[p].pkey);
1446+
rdma_for_each_port (device, p)
1447+
kfree(device->port_data[p].cache.pkey);
14741448

14751449
gid_table_release_one(device);
1476-
kfree(device->cache.ports);
14771450
}
14781451

14791452
void ib_cache_cleanup_one(struct ib_device *device)

include/rdma/ib_verbs.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2186,7 +2186,6 @@ struct ib_port_cache {
21862186
struct ib_cache {
21872187
rwlock_t lock;
21882188
struct ib_event_handler event_handler;
2189-
struct ib_port_cache *ports;
21902189
};
21912190

21922191
struct iw_cm_verbs;
@@ -2203,6 +2202,8 @@ struct ib_port_data {
22032202

22042203
spinlock_t pkey_list_lock;
22052204
struct list_head pkey_list;
2205+
2206+
struct ib_port_cache cache;
22062207
};
22072208

22082209
/* rdma netdev type - specifies protocol type */

0 commit comments

Comments
 (0)