@@ -370,12 +370,10 @@ static bool vxlan_mdb_is_valid_source(const struct nlattr *attr, __be16 proto,
370
370
return true;
371
371
}
372
372
373
- static void vxlan_mdb_config_group_set (struct vxlan_mdb_config * cfg ,
374
- const struct br_mdb_entry * entry ,
375
- const struct nlattr * source_attr )
373
+ static void vxlan_mdb_group_set (struct vxlan_mdb_entry_key * group ,
374
+ const struct br_mdb_entry * entry ,
375
+ const struct nlattr * source_attr )
376
376
{
377
- struct vxlan_mdb_entry_key * group = & cfg -> group ;
378
-
379
377
switch (entry -> addr .proto ) {
380
378
case htons (ETH_P_IP ):
381
379
group -> dst .sa .sa_family = AF_INET ;
@@ -503,7 +501,7 @@ static int vxlan_mdb_config_attrs_init(struct vxlan_mdb_config *cfg,
503
501
entry -> addr .proto , extack ))
504
502
return - EINVAL ;
505
503
506
- vxlan_mdb_config_group_set ( cfg , entry , mdbe_attrs [MDBE_ATTR_SOURCE ]);
504
+ vxlan_mdb_group_set ( & cfg -> group , entry , mdbe_attrs [MDBE_ATTR_SOURCE ]);
507
505
508
506
/* rtnetlink code only validates that IPv4 group address is
509
507
* multicast.
@@ -927,23 +925,20 @@ vxlan_mdb_nlmsg_src_list_size(const struct vxlan_mdb_entry_key *group,
927
925
return nlmsg_size ;
928
926
}
929
927
930
- static size_t vxlan_mdb_nlmsg_size (const struct vxlan_dev * vxlan ,
931
- const struct vxlan_mdb_entry * mdb_entry ,
932
- const struct vxlan_mdb_remote * remote )
928
+ static size_t
929
+ vxlan_mdb_nlmsg_remote_size (const struct vxlan_dev * vxlan ,
930
+ const struct vxlan_mdb_entry * mdb_entry ,
931
+ const struct vxlan_mdb_remote * remote )
933
932
{
934
933
const struct vxlan_mdb_entry_key * group = & mdb_entry -> key ;
935
934
struct vxlan_rdst * rd = rtnl_dereference (remote -> rd );
936
935
size_t nlmsg_size ;
937
936
938
- nlmsg_size = NLMSG_ALIGN (sizeof (struct br_port_msg )) +
939
- /* MDBA_MDB */
940
- nla_total_size (0 ) +
941
- /* MDBA_MDB_ENTRY */
942
- nla_total_size (0 ) +
943
937
/* MDBA_MDB_ENTRY_INFO */
944
- nla_total_size (sizeof (struct br_mdb_entry )) +
938
+ nlmsg_size = nla_total_size (sizeof (struct br_mdb_entry )) +
945
939
/* MDBA_MDB_EATTR_TIMER */
946
940
nla_total_size (sizeof (u32 ));
941
+
947
942
/* MDBA_MDB_EATTR_SOURCE */
948
943
if (vxlan_mdb_is_sg (group ))
949
944
nlmsg_size += nla_total_size (vxlan_addr_size (& group -> dst ));
@@ -971,6 +966,19 @@ static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan,
971
966
return nlmsg_size ;
972
967
}
973
968
969
+ static size_t vxlan_mdb_nlmsg_size (const struct vxlan_dev * vxlan ,
970
+ const struct vxlan_mdb_entry * mdb_entry ,
971
+ const struct vxlan_mdb_remote * remote )
972
+ {
973
+ return NLMSG_ALIGN (sizeof (struct br_port_msg )) +
974
+ /* MDBA_MDB */
975
+ nla_total_size (0 ) +
976
+ /* MDBA_MDB_ENTRY */
977
+ nla_total_size (0 ) +
978
+ /* Remote entry */
979
+ vxlan_mdb_nlmsg_remote_size (vxlan , mdb_entry , remote );
980
+ }
981
+
974
982
static int vxlan_mdb_nlmsg_fill (const struct vxlan_dev * vxlan ,
975
983
struct sk_buff * skb ,
976
984
const struct vxlan_mdb_entry * mdb_entry ,
@@ -1298,6 +1306,156 @@ int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
1298
1306
return err ;
1299
1307
}
1300
1308
1309
+ static const struct nla_policy vxlan_mdbe_attrs_get_pol [MDBE_ATTR_MAX + 1 ] = {
1310
+ [MDBE_ATTR_SOURCE ] = NLA_POLICY_RANGE (NLA_BINARY ,
1311
+ sizeof (struct in_addr ),
1312
+ sizeof (struct in6_addr )),
1313
+ [MDBE_ATTR_SRC_VNI ] = NLA_POLICY_FULL_RANGE (NLA_U32 , & vni_range ),
1314
+ };
1315
+
1316
+ static int vxlan_mdb_get_parse (struct net_device * dev , struct nlattr * tb [],
1317
+ struct vxlan_mdb_entry_key * group ,
1318
+ struct netlink_ext_ack * extack )
1319
+ {
1320
+ struct br_mdb_entry * entry = nla_data (tb [MDBA_GET_ENTRY ]);
1321
+ struct nlattr * mdbe_attrs [MDBE_ATTR_MAX + 1 ];
1322
+ struct vxlan_dev * vxlan = netdev_priv (dev );
1323
+ int err ;
1324
+
1325
+ memset (group , 0 , sizeof (* group ));
1326
+ group -> vni = vxlan -> default_dst .remote_vni ;
1327
+
1328
+ if (!tb [MDBA_GET_ENTRY_ATTRS ]) {
1329
+ vxlan_mdb_group_set (group , entry , NULL );
1330
+ return 0 ;
1331
+ }
1332
+
1333
+ err = nla_parse_nested (mdbe_attrs , MDBE_ATTR_MAX ,
1334
+ tb [MDBA_GET_ENTRY_ATTRS ],
1335
+ vxlan_mdbe_attrs_get_pol , extack );
1336
+ if (err )
1337
+ return err ;
1338
+
1339
+ if (mdbe_attrs [MDBE_ATTR_SOURCE ] &&
1340
+ !vxlan_mdb_is_valid_source (mdbe_attrs [MDBE_ATTR_SOURCE ],
1341
+ entry -> addr .proto , extack ))
1342
+ return - EINVAL ;
1343
+
1344
+ vxlan_mdb_group_set (group , entry , mdbe_attrs [MDBE_ATTR_SOURCE ]);
1345
+
1346
+ if (mdbe_attrs [MDBE_ATTR_SRC_VNI ])
1347
+ group -> vni =
1348
+ cpu_to_be32 (nla_get_u32 (mdbe_attrs [MDBE_ATTR_SRC_VNI ]));
1349
+
1350
+ return 0 ;
1351
+ }
1352
+
1353
+ static struct sk_buff *
1354
+ vxlan_mdb_get_reply_alloc (const struct vxlan_dev * vxlan ,
1355
+ const struct vxlan_mdb_entry * mdb_entry )
1356
+ {
1357
+ struct vxlan_mdb_remote * remote ;
1358
+ size_t nlmsg_size ;
1359
+
1360
+ nlmsg_size = NLMSG_ALIGN (sizeof (struct br_port_msg )) +
1361
+ /* MDBA_MDB */
1362
+ nla_total_size (0 ) +
1363
+ /* MDBA_MDB_ENTRY */
1364
+ nla_total_size (0 );
1365
+
1366
+ list_for_each_entry (remote , & mdb_entry -> remotes , list )
1367
+ nlmsg_size += vxlan_mdb_nlmsg_remote_size (vxlan , mdb_entry ,
1368
+ remote );
1369
+
1370
+ return nlmsg_new (nlmsg_size , GFP_KERNEL );
1371
+ }
1372
+
1373
+ static int
1374
+ vxlan_mdb_get_reply_fill (const struct vxlan_dev * vxlan ,
1375
+ struct sk_buff * skb ,
1376
+ const struct vxlan_mdb_entry * mdb_entry ,
1377
+ u32 portid , u32 seq )
1378
+ {
1379
+ struct nlattr * mdb_nest , * mdb_entry_nest ;
1380
+ struct vxlan_mdb_remote * remote ;
1381
+ struct br_port_msg * bpm ;
1382
+ struct nlmsghdr * nlh ;
1383
+ int err ;
1384
+
1385
+ nlh = nlmsg_put (skb , portid , seq , RTM_NEWMDB , sizeof (* bpm ), 0 );
1386
+ if (!nlh )
1387
+ return - EMSGSIZE ;
1388
+
1389
+ bpm = nlmsg_data (nlh );
1390
+ memset (bpm , 0 , sizeof (* bpm ));
1391
+ bpm -> family = AF_BRIDGE ;
1392
+ bpm -> ifindex = vxlan -> dev -> ifindex ;
1393
+ mdb_nest = nla_nest_start_noflag (skb , MDBA_MDB );
1394
+ if (!mdb_nest ) {
1395
+ err = - EMSGSIZE ;
1396
+ goto cancel ;
1397
+ }
1398
+ mdb_entry_nest = nla_nest_start_noflag (skb , MDBA_MDB_ENTRY );
1399
+ if (!mdb_entry_nest ) {
1400
+ err = - EMSGSIZE ;
1401
+ goto cancel ;
1402
+ }
1403
+
1404
+ list_for_each_entry (remote , & mdb_entry -> remotes , list ) {
1405
+ err = vxlan_mdb_entry_info_fill (vxlan , skb , mdb_entry , remote );
1406
+ if (err )
1407
+ goto cancel ;
1408
+ }
1409
+
1410
+ nla_nest_end (skb , mdb_entry_nest );
1411
+ nla_nest_end (skb , mdb_nest );
1412
+ nlmsg_end (skb , nlh );
1413
+
1414
+ return 0 ;
1415
+
1416
+ cancel :
1417
+ nlmsg_cancel (skb , nlh );
1418
+ return err ;
1419
+ }
1420
+
1421
+ int vxlan_mdb_get (struct net_device * dev , struct nlattr * tb [], u32 portid ,
1422
+ u32 seq , struct netlink_ext_ack * extack )
1423
+ {
1424
+ struct vxlan_dev * vxlan = netdev_priv (dev );
1425
+ struct vxlan_mdb_entry * mdb_entry ;
1426
+ struct vxlan_mdb_entry_key group ;
1427
+ struct sk_buff * skb ;
1428
+ int err ;
1429
+
1430
+ ASSERT_RTNL ();
1431
+
1432
+ err = vxlan_mdb_get_parse (dev , tb , & group , extack );
1433
+ if (err )
1434
+ return err ;
1435
+
1436
+ mdb_entry = vxlan_mdb_entry_lookup (vxlan , & group );
1437
+ if (!mdb_entry ) {
1438
+ NL_SET_ERR_MSG_MOD (extack , "MDB entry not found" );
1439
+ return - ENOENT ;
1440
+ }
1441
+
1442
+ skb = vxlan_mdb_get_reply_alloc (vxlan , mdb_entry );
1443
+ if (!skb )
1444
+ return - ENOMEM ;
1445
+
1446
+ err = vxlan_mdb_get_reply_fill (vxlan , skb , mdb_entry , portid , seq );
1447
+ if (err ) {
1448
+ NL_SET_ERR_MSG_MOD (extack , "Failed to fill MDB get reply" );
1449
+ goto free ;
1450
+ }
1451
+
1452
+ return rtnl_unicast (skb , dev_net (dev ), portid );
1453
+
1454
+ free :
1455
+ kfree_skb (skb );
1456
+ return err ;
1457
+ }
1458
+
1301
1459
struct vxlan_mdb_entry * vxlan_mdb_entry_skb_get (struct vxlan_dev * vxlan ,
1302
1460
struct sk_buff * skb ,
1303
1461
__be32 src_vni )
0 commit comments