Skip to content

Commit 4977134

Browse files
Paolo AbeniNipaLocal
Paolo Abeni
authored and
NipaLocal
committed
udp_tunnel: use static call for GRO hooks when possible
It's quite common to have a single UDP tunnel type active in the whole system. In such a case we can replace the indirect call for the UDP tunnel GRO callback with a static call. Add the related accounting in the control path and switch to static call when possible. To keep the code simple use a static array for the registered tunnel types, and size such array based on the kernel config. Note that there are valid kernel configurations leading to UDP_MAX_TUNNEL_TYPES == 0 even with IS_ENABLED(CONFIG_NET_UDP_TUNNEL), Explicitly skip the accounting in such a case, to avoid compile warning when accessing "udp_tunnel_gro_types". Signed-off-by: Paolo Abeni <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: NipaLocal <nipa@local>
1 parent 2a2de41 commit 4977134

File tree

2 files changed

+138
-1
lines changed

2 files changed

+138
-1
lines changed

include/net/udp_tunnel.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -193,13 +193,16 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
193193

194194
#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
195195
void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add);
196+
void udp_tunnel_update_gro_rcv(struct sock *sk, bool add);
196197
#else
197198
static inline void udp_tunnel_update_gro_lookup(struct net *net,
198199
struct sock *sk, bool add) {}
200+
static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {}
199201
#endif
200202

201203
static inline void udp_tunnel_cleanup_gro(struct sock *sk)
202204
{
205+
udp_tunnel_update_gro_rcv(sk, false);
203206
udp_tunnel_update_gro_lookup(sock_net(sk), sk, false);
204207
}
205208

@@ -212,6 +215,7 @@ static inline void udp_tunnel_encap_enable(struct sock *sk)
212215
if (READ_ONCE(sk->sk_family) == PF_INET6)
213216
ipv6_stub->udpv6_encap_enable();
214217
#endif
218+
udp_tunnel_update_gro_rcv(sk, true);
215219
udp_encap_enable();
216220
}
217221

net/ipv4/udp_offload.c

Lines changed: 134 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,38 @@
1515
#include <net/udp_tunnel.h>
1616

1717
#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
18+
19+
/*
20+
* Dummy GRO tunnel callback, exists mainly to avoid dangling/NULL
21+
* values for the udp tunnel static call.
22+
*/
23+
static struct sk_buff *dummy_gro_rcv(struct sock *sk,
24+
struct list_head *head,
25+
struct sk_buff *skb)
26+
{
27+
NAPI_GRO_CB(skb)->flush = 1;
28+
return NULL;
29+
}
30+
31+
typedef struct sk_buff *(*udp_tunnel_gro_rcv_t)(struct sock *sk,
32+
struct list_head *head,
33+
struct sk_buff *skb);
34+
35+
struct udp_tunnel_type_entry {
36+
udp_tunnel_gro_rcv_t gro_receive;
37+
refcount_t count;
38+
};
39+
40+
#define UDP_MAX_TUNNEL_TYPES (IS_ENABLED(CONFIG_GENEVE) + \
41+
IS_ENABLED(CONFIG_VXLAN) * 2 + \
42+
IS_ENABLED(CONFIG_NET_FOU) * 2 + \
43+
IS_ENABLED(CONFIG_XFRM) * 2)
44+
45+
DEFINE_STATIC_CALL(udp_tunnel_gro_rcv, dummy_gro_rcv);
46+
static DEFINE_STATIC_KEY_FALSE(udp_tunnel_static_call);
47+
static struct mutex udp_tunnel_gro_type_lock;
48+
static struct udp_tunnel_type_entry udp_tunnel_gro_types[UDP_MAX_TUNNEL_TYPES];
49+
static unsigned int udp_tunnel_gro_type_nr;
1850
static DEFINE_SPINLOCK(udp_tunnel_gro_lock);
1951

2052
void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add)
@@ -43,6 +75,105 @@ void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add)
4375
spin_unlock(&udp_tunnel_gro_lock);
4476
}
4577
EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_lookup);
78+
79+
void udp_tunnel_update_gro_rcv(struct sock *sk, bool add)
80+
{
81+
struct udp_tunnel_type_entry *cur = NULL;
82+
struct udp_sock *up = udp_sk(sk);
83+
int i, old_gro_type_nr;
84+
85+
if (!UDP_MAX_TUNNEL_TYPES || !up->gro_receive)
86+
return;
87+
88+
mutex_lock(&udp_tunnel_gro_type_lock);
89+
90+
/* Check if the static call is permanently disabled. */
91+
if (udp_tunnel_gro_type_nr > UDP_MAX_TUNNEL_TYPES)
92+
goto out;
93+
94+
for (i = 0; i < udp_tunnel_gro_type_nr; i++)
95+
if (udp_tunnel_gro_types[i].gro_receive == up->gro_receive)
96+
cur = &udp_tunnel_gro_types[i];
97+
98+
old_gro_type_nr = udp_tunnel_gro_type_nr;
99+
if (add) {
100+
/*
101+
* Update the matching entry, if found, or add a new one
102+
* if needed
103+
*/
104+
if (cur) {
105+
refcount_inc(&cur->count);
106+
goto out;
107+
}
108+
109+
if (unlikely(udp_tunnel_gro_type_nr == UDP_MAX_TUNNEL_TYPES)) {
110+
pr_err_once("Too many UDP tunnel types, please increase UDP_MAX_TUNNEL_TYPES\n");
111+
/* Ensure static call will never be enabled */
112+
udp_tunnel_gro_type_nr = UDP_MAX_TUNNEL_TYPES + 1;
113+
} else {
114+
cur = &udp_tunnel_gro_types[udp_tunnel_gro_type_nr++];
115+
refcount_set(&cur->count, 1);
116+
cur->gro_receive = up->gro_receive;
117+
}
118+
} else {
119+
/*
120+
* The stack cleanups only successfully added tunnel, the
121+
* lookup on removal should never fail.
122+
*/
123+
if (WARN_ON_ONCE(!cur))
124+
goto out;
125+
126+
if (!refcount_dec_and_test(&cur->count))
127+
goto out;
128+
129+
/* Avoid gaps, so that the enable tunnel has always id 0 */
130+
*cur = udp_tunnel_gro_types[--udp_tunnel_gro_type_nr];
131+
}
132+
133+
if (udp_tunnel_gro_type_nr == 1) {
134+
static_call_update(udp_tunnel_gro_rcv,
135+
udp_tunnel_gro_types[0].gro_receive);
136+
static_branch_enable(&udp_tunnel_static_call);
137+
} else if (old_gro_type_nr == 1) {
138+
static_branch_disable(&udp_tunnel_static_call);
139+
static_call_update(udp_tunnel_gro_rcv, dummy_gro_rcv);
140+
}
141+
142+
out:
143+
mutex_unlock(&udp_tunnel_gro_type_lock);
144+
}
145+
EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_rcv);
146+
147+
static void udp_tunnel_gro_init(void)
148+
{
149+
mutex_init(&udp_tunnel_gro_type_lock);
150+
}
151+
152+
static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk,
153+
struct list_head *head,
154+
struct sk_buff *skb)
155+
{
156+
if (static_branch_likely(&udp_tunnel_static_call)) {
157+
if (unlikely(gro_recursion_inc_test(skb))) {
158+
NAPI_GRO_CB(skb)->flush |= 1;
159+
return NULL;
160+
}
161+
return static_call(udp_tunnel_gro_rcv)(sk, head, skb);
162+
}
163+
return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
164+
}
165+
166+
#else
167+
168+
static void udp_tunnel_gro_init(void) {}
169+
170+
static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk,
171+
struct list_head *head,
172+
struct sk_buff *skb)
173+
{
174+
return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
175+
}
176+
46177
#endif
47178

48179
static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
@@ -654,7 +785,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
654785

655786
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
656787
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
657-
pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
788+
pp = udp_tunnel_gro_rcv(sk, head, skb);
658789

659790
out:
660791
skb_gro_flush_final(skb, pp, flush);
@@ -804,5 +935,7 @@ int __init udpv4_offload_init(void)
804935
.gro_complete = udp4_gro_complete,
805936
},
806937
};
938+
939+
udp_tunnel_gro_init();
807940
return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
808941
}

0 commit comments

Comments
 (0)