Skip to content

Commit 39285e1

Browse files
TaeheeYoodavem330
authored andcommitted
net: team: do not use dynamic lockdep key
team interface has used a dynamic lockdep key to avoid false-positive lockdep deadlock detection. Virtual interfaces such as team usually have their own lock for protecting private data. These interfaces can be nested. team0 | team1 Each interface's lock is actually different(team0->lock and team1->lock). So, mutex_lock(&team0->lock); mutex_lock(&team1->lock); mutex_unlock(&team1->lock); mutex_unlock(&team0->lock); The above case is absolutely safe. But lockdep warns about deadlock. Because the lockdep understands these two locks are same. This is a false-positive lockdep warning. So, in order to avoid this problem, the team interfaces started to use dynamic lockdep key. The false-positive problem was fixed, but it introduced a new problem. When the new team virtual interface is created, it registers a dynamic lockdep key(creates dynamic lockdep key) and uses it. But there is the limitation of the number of lockdep keys. So, If so many team interfaces are created, it consumes all lockdep keys. Then, the lockdep stops to work and warns about it. In order to fix this problem, team interfaces use the subclass instead of the dynamic key. So, when a new team interface is created, it doesn't register(create) a new lockdep, but uses existed subclass key instead. It is already used by the bonding interface for a similar case. As the bonding interface does, the subclass variable is the same as the 'dev->nested_level'. This variable indicates the depth in the stacked interface graph. The 'dev->nested_level' is protected by RTNL and RCU. So, 'mutex_lock_nested()' for 'team->lock' requires RTNL or RCU. In the current code, 'team->lock' is usually acquired under RTNL, there is no problem with using 'dev->nested_level'. The 'team_nl_team_get()' and The 'lb_stats_refresh()' functions acquire 'team->lock' without RTNL. But these don't iterate their own ports nested so they don't need nested lock. Reproducer: for i in {0..1000} do ip link add team$i type team ip link add dummy$i master team$i type dummy ip link set dummy$i up ip link set team$i up done Splat looks like: BUG: MAX_LOCKDEP_ENTRIES too low! turning off the locking correctness validator. Please attach the output of /proc/lock_stat to the bug report CPU: 0 PID: 4104 Comm: ip Not tainted 6.5.0-rc7+ #45 Call Trace: <TASK> dump_stack_lvl+0x64/0xb0 add_lock_to_list+0x30d/0x5e0 check_prev_add+0x73a/0x23a0 ... sock_def_readable+0xfe/0x4f0 netlink_broadcast+0x76b/0xac0 nlmsg_notify+0x69/0x1d0 dev_open+0xed/0x130 ... Reported-by: [email protected] Fixes: 369f61b ("team: fix nested locking lockdep warning") Signed-off-by: Taehee Yoo <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent a5e2151 commit 39285e1

File tree

3 files changed

+85
-60
lines changed

3 files changed

+85
-60
lines changed

drivers/net/team/team.c

Lines changed: 54 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -1135,8 +1135,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
11351135
struct netlink_ext_ack *extack)
11361136
{
11371137
struct net_device *dev = team->dev;
1138-
struct team_port *port;
11391138
char *portname = port_dev->name;
1139+
struct team_port *port;
11401140
int err;
11411141

11421142
if (port_dev->flags & IFF_LOOPBACK) {
@@ -1203,18 +1203,31 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
12031203

12041204
memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
12051205

1206-
err = team_port_enter(team, port);
1206+
err = dev_open(port_dev, extack);
12071207
if (err) {
1208-
netdev_err(dev, "Device %s failed to enter team mode\n",
1208+
netdev_dbg(dev, "Device %s opening failed\n",
12091209
portname);
1210-
goto err_port_enter;
1210+
goto err_dev_open;
12111211
}
12121212

1213-
err = dev_open(port_dev, extack);
1213+
err = team_upper_dev_link(team, port, extack);
12141214
if (err) {
1215-
netdev_dbg(dev, "Device %s opening failed\n",
1215+
netdev_err(dev, "Device %s failed to set upper link\n",
12161216
portname);
1217-
goto err_dev_open;
1217+
goto err_set_upper_link;
1218+
}
1219+
1220+
/* lockdep subclass variable(dev->nested_level) was updated by
1221+
* team_upper_dev_link().
1222+
*/
1223+
team_unlock(team);
1224+
team_lock(team);
1225+
1226+
err = team_port_enter(team, port);
1227+
if (err) {
1228+
netdev_err(dev, "Device %s failed to enter team mode\n",
1229+
portname);
1230+
goto err_port_enter;
12181231
}
12191232

12201233
err = vlan_vids_add_by_dev(port_dev, dev);
@@ -1242,13 +1255,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
12421255
goto err_handler_register;
12431256
}
12441257

1245-
err = team_upper_dev_link(team, port, extack);
1246-
if (err) {
1247-
netdev_err(dev, "Device %s failed to set upper link\n",
1248-
portname);
1249-
goto err_set_upper_link;
1250-
}
1251-
12521258
err = __team_option_inst_add_port(team, port);
12531259
if (err) {
12541260
netdev_err(dev, "Device %s failed to add per-port options\n",
@@ -1295,9 +1301,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
12951301
__team_option_inst_del_port(team, port);
12961302

12971303
err_option_port_add:
1298-
team_upper_dev_unlink(team, port);
1299-
1300-
err_set_upper_link:
13011304
netdev_rx_handler_unregister(port_dev);
13021305

13031306
err_handler_register:
@@ -1307,13 +1310,16 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
13071310
vlan_vids_del_by_dev(port_dev, dev);
13081311

13091312
err_vids_add:
1313+
team_port_leave(team, port);
1314+
1315+
err_port_enter:
1316+
team_upper_dev_unlink(team, port);
1317+
1318+
err_set_upper_link:
13101319
dev_close(port_dev);
13111320

13121321
err_dev_open:
1313-
team_port_leave(team, port);
13141322
team_port_set_orig_dev_addr(port);
1315-
1316-
err_port_enter:
13171323
dev_set_mtu(port_dev, port->orig.mtu);
13181324

13191325
err_set_mtu:
@@ -1616,6 +1622,7 @@ static int team_init(struct net_device *dev)
16161622
int err;
16171623

16181624
team->dev = dev;
1625+
mutex_init(&team->lock);
16191626
team_set_no_mode(team);
16201627
team->notifier_ctx = false;
16211628

@@ -1643,8 +1650,6 @@ static int team_init(struct net_device *dev)
16431650
goto err_options_register;
16441651
netif_carrier_off(dev);
16451652

1646-
lockdep_register_key(&team->team_lock_key);
1647-
__mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
16481653
netdev_lockdep_set_classes(dev);
16491654

16501655
return 0;
@@ -1665,7 +1670,7 @@ static void team_uninit(struct net_device *dev)
16651670
struct team_port *port;
16661671
struct team_port *tmp;
16671672

1668-
mutex_lock(&team->lock);
1673+
team_lock(team);
16691674
list_for_each_entry_safe(port, tmp, &team->port_list, list)
16701675
team_port_del(team, port->dev);
16711676

@@ -1674,9 +1679,8 @@ static void team_uninit(struct net_device *dev)
16741679
team_mcast_rejoin_fini(team);
16751680
team_notify_peers_fini(team);
16761681
team_queue_override_fini(team);
1677-
mutex_unlock(&team->lock);
1682+
team_unlock(team);
16781683
netdev_change_features(dev);
1679-
lockdep_unregister_key(&team->team_lock_key);
16801684
}
16811685

16821686
static void team_destructor(struct net_device *dev)
@@ -1790,18 +1794,18 @@ static void team_set_rx_mode(struct net_device *dev)
17901794

17911795
static int team_set_mac_address(struct net_device *dev, void *p)
17921796
{
1793-
struct sockaddr *addr = p;
17941797
struct team *team = netdev_priv(dev);
1798+
struct sockaddr *addr = p;
17951799
struct team_port *port;
17961800

17971801
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
17981802
return -EADDRNOTAVAIL;
17991803
dev_addr_set(dev, addr->sa_data);
1800-
mutex_lock(&team->lock);
1804+
team_lock(team);
18011805
list_for_each_entry(port, &team->port_list, list)
18021806
if (team->ops.port_change_dev_addr)
18031807
team->ops.port_change_dev_addr(team, port);
1804-
mutex_unlock(&team->lock);
1808+
team_unlock(team);
18051809
return 0;
18061810
}
18071811

@@ -1815,7 +1819,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
18151819
* Alhough this is reader, it's guarded by team lock. It's not possible
18161820
* to traverse list in reverse under rcu_read_lock
18171821
*/
1818-
mutex_lock(&team->lock);
1822+
team_lock(team);
18191823
team->port_mtu_change_allowed = true;
18201824
list_for_each_entry(port, &team->port_list, list) {
18211825
err = dev_set_mtu(port->dev, new_mtu);
@@ -1826,7 +1830,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
18261830
}
18271831
}
18281832
team->port_mtu_change_allowed = false;
1829-
mutex_unlock(&team->lock);
1833+
team_unlock(team);
18301834

18311835
dev->mtu = new_mtu;
18321836

@@ -1836,7 +1840,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
18361840
list_for_each_entry_continue_reverse(port, &team->port_list, list)
18371841
dev_set_mtu(port->dev, dev->mtu);
18381842
team->port_mtu_change_allowed = false;
1839-
mutex_unlock(&team->lock);
1843+
team_unlock(team);
18401844

18411845
return err;
18421846
}
@@ -1890,20 +1894,20 @@ static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
18901894
* Alhough this is reader, it's guarded by team lock. It's not possible
18911895
* to traverse list in reverse under rcu_read_lock
18921896
*/
1893-
mutex_lock(&team->lock);
1897+
team_lock(team);
18941898
list_for_each_entry(port, &team->port_list, list) {
18951899
err = vlan_vid_add(port->dev, proto, vid);
18961900
if (err)
18971901
goto unwind;
18981902
}
1899-
mutex_unlock(&team->lock);
1903+
team_unlock(team);
19001904

19011905
return 0;
19021906

19031907
unwind:
19041908
list_for_each_entry_continue_reverse(port, &team->port_list, list)
19051909
vlan_vid_del(port->dev, proto, vid);
1906-
mutex_unlock(&team->lock);
1910+
team_unlock(team);
19071911

19081912
return err;
19091913
}
@@ -1913,10 +1917,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
19131917
struct team *team = netdev_priv(dev);
19141918
struct team_port *port;
19151919

1916-
mutex_lock(&team->lock);
1920+
team_lock(team);
19171921
list_for_each_entry(port, &team->port_list, list)
19181922
vlan_vid_del(port->dev, proto, vid);
1919-
mutex_unlock(&team->lock);
1923+
team_unlock(team);
19201924

19211925
return 0;
19221926
}
@@ -1938,9 +1942,9 @@ static void team_netpoll_cleanup(struct net_device *dev)
19381942
{
19391943
struct team *team = netdev_priv(dev);
19401944

1941-
mutex_lock(&team->lock);
1945+
team_lock(team);
19421946
__team_netpoll_cleanup(team);
1943-
mutex_unlock(&team->lock);
1947+
team_unlock(team);
19441948
}
19451949

19461950
static int team_netpoll_setup(struct net_device *dev,
@@ -1950,15 +1954,15 @@ static int team_netpoll_setup(struct net_device *dev,
19501954
struct team_port *port;
19511955
int err = 0;
19521956

1953-
mutex_lock(&team->lock);
1957+
team_lock(team);
19541958
list_for_each_entry(port, &team->port_list, list) {
19551959
err = __team_port_enable_netpoll(port);
19561960
if (err) {
19571961
__team_netpoll_cleanup(team);
19581962
break;
19591963
}
19601964
}
1961-
mutex_unlock(&team->lock);
1965+
team_unlock(team);
19621966
return err;
19631967
}
19641968
#endif
@@ -1969,9 +1973,9 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
19691973
struct team *team = netdev_priv(dev);
19701974
int err;
19711975

1972-
mutex_lock(&team->lock);
1976+
team_lock(team);
19731977
err = team_port_add(team, port_dev, extack);
1974-
mutex_unlock(&team->lock);
1978+
team_unlock(team);
19751979

19761980
if (!err)
19771981
netdev_change_features(dev);
@@ -1984,19 +1988,12 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
19841988
struct team *team = netdev_priv(dev);
19851989
int err;
19861990

1987-
mutex_lock(&team->lock);
1991+
team_lock(team);
19881992
err = team_port_del(team, port_dev);
1989-
mutex_unlock(&team->lock);
1990-
1991-
if (err)
1992-
return err;
1993+
team_unlock(team);
19931994

1994-
if (netif_is_team_master(port_dev)) {
1995-
lockdep_unregister_key(&team->team_lock_key);
1996-
lockdep_register_key(&team->team_lock_key);
1997-
lockdep_set_class(&team->lock, &team->team_lock_key);
1998-
}
1999-
netdev_change_features(dev);
1995+
if (!err)
1996+
netdev_change_features(dev);
20001997

20011998
return err;
20021999
}
@@ -2316,13 +2313,13 @@ static struct team *team_nl_team_get(struct genl_info *info)
23162313
}
23172314

23182315
team = netdev_priv(dev);
2319-
mutex_lock(&team->lock);
2316+
__team_lock(team);
23202317
return team;
23212318
}
23222319

23232320
static void team_nl_team_put(struct team *team)
23242321
{
2325-
mutex_unlock(&team->lock);
2322+
team_unlock(team);
23262323
dev_put(team->dev);
23272324
}
23282325

@@ -2984,9 +2981,9 @@ static void team_port_change_check(struct team_port *port, bool linkup)
29842981
{
29852982
struct team *team = port->team;
29862983

2987-
mutex_lock(&team->lock);
2984+
team_lock(team);
29882985
__team_port_change_check(port, linkup);
2989-
mutex_unlock(&team->lock);
2986+
team_unlock(team);
29902987
}
29912988

29922989

drivers/net/team/team_mode_loadbalance.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -478,7 +478,7 @@ static void lb_stats_refresh(struct work_struct *work)
478478
team = lb_priv_ex->team;
479479
lb_priv = get_lb_priv(team);
480480

481-
if (!mutex_trylock(&team->lock)) {
481+
if (!team_trylock(team)) {
482482
schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
483483
return;
484484
}
@@ -515,7 +515,7 @@ static void lb_stats_refresh(struct work_struct *work)
515515
schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
516516
(lb_priv_ex->stats.refresh_interval * HZ) / 10);
517517

518-
mutex_unlock(&team->lock);
518+
team_unlock(team);
519519
}
520520

521521
static void lb_stats_refresh_interval_get(struct team *team,

include/linux/if_team.h

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -221,10 +221,38 @@ struct team {
221221
atomic_t count_pending;
222222
struct delayed_work dw;
223223
} mcast_rejoin;
224-
struct lock_class_key team_lock_key;
225224
long mode_priv[TEAM_MODE_PRIV_LONGS];
226225
};
227226

227+
static inline void __team_lock(struct team *team)
228+
{
229+
mutex_lock(&team->lock);
230+
}
231+
232+
static inline int team_trylock(struct team *team)
233+
{
234+
return mutex_trylock(&team->lock);
235+
}
236+
237+
#ifdef CONFIG_LOCKDEP
238+
static inline void team_lock(struct team *team)
239+
{
240+
ASSERT_RTNL();
241+
mutex_lock_nested(&team->lock, team->dev->nested_level);
242+
}
243+
244+
#else
245+
static inline void team_lock(struct team *team)
246+
{
247+
__team_lock(team);
248+
}
249+
#endif
250+
251+
static inline void team_unlock(struct team *team)
252+
{
253+
mutex_unlock(&team->lock);
254+
}
255+
228256
static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
229257
struct sk_buff *skb)
230258
{

0 commit comments

Comments
 (0)