@@ -52,11 +52,6 @@ static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
52
52
return msk -> subflow ;
53
53
}
54
54
55
- static bool __mptcp_needs_tcp_fallback (const struct mptcp_sock * msk )
56
- {
57
- return msk -> first && !sk_is_mptcp (msk -> first );
58
- }
59
-
60
55
static struct socket * mptcp_is_tcpsk (struct sock * sk )
61
56
{
62
57
struct socket * sock = sk -> sk_socket ;
@@ -94,7 +89,7 @@ static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
94
89
if (unlikely (sock ))
95
90
return sock ;
96
91
97
- if (likely (!__mptcp_needs_tcp_fallback (msk )))
92
+ if (likely (!__mptcp_check_fallback (msk )))
98
93
return NULL ;
99
94
100
95
return msk -> subflow ;
@@ -133,6 +128,11 @@ static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state)
133
128
list_add (& subflow -> node , & msk -> conn_list );
134
129
subflow -> request_mptcp = 1 ;
135
130
131
+ /* accept() will wait on first subflow sk_wq, and we always wakes up
132
+ * via msk->sk_socket
133
+ */
134
+ RCU_INIT_POINTER (msk -> first -> sk_wq , & sk -> sk_socket -> wq );
135
+
136
136
set_state :
137
137
if (state != MPTCP_SAME_STATE )
138
138
inet_sk_state_store (sk , state );
@@ -229,6 +229,15 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
229
229
if (!skb )
230
230
break ;
231
231
232
+ if (__mptcp_check_fallback (msk )) {
233
+ /* if we are running under the workqueue, TCP could have
234
+ * collapsed skbs between dummy map creation and now
235
+ * be sure to adjust the size
236
+ */
237
+ map_remaining = skb -> len ;
238
+ subflow -> map_data_len = skb -> len ;
239
+ }
240
+
232
241
offset = seq - TCP_SKB_CB (skb )-> seq ;
233
242
fin = TCP_SKB_CB (skb )-> tcp_flags & TCPHDR_FIN ;
234
243
if (fin ) {
@@ -466,8 +475,15 @@ static void mptcp_clean_una(struct sock *sk)
466
475
{
467
476
struct mptcp_sock * msk = mptcp_sk (sk );
468
477
struct mptcp_data_frag * dtmp , * dfrag ;
469
- u64 snd_una = atomic64_read (& msk -> snd_una );
470
478
bool cleaned = false;
479
+ u64 snd_una ;
480
+
481
+ /* on fallback we just need to ignore snd_una, as this is really
482
+ * plain TCP
483
+ */
484
+ if (__mptcp_check_fallback (msk ))
485
+ atomic64_set (& msk -> snd_una , msk -> write_seq );
486
+ snd_una = atomic64_read (& msk -> snd_una );
471
487
472
488
list_for_each_entry_safe (dfrag , dtmp , & msk -> rtx_queue , list ) {
473
489
if (after64 (dfrag -> data_seq + dfrag -> data_len , snd_una ))
@@ -740,7 +756,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
740
756
int mss_now = 0 , size_goal = 0 , ret = 0 ;
741
757
struct mptcp_sock * msk = mptcp_sk (sk );
742
758
struct page_frag * pfrag ;
743
- struct socket * ssock ;
744
759
size_t copied = 0 ;
745
760
struct sock * ssk ;
746
761
bool tx_ok ;
@@ -759,15 +774,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
759
774
goto out ;
760
775
}
761
776
762
- fallback :
763
- ssock = __mptcp_tcp_fallback (msk );
764
- if (unlikely (ssock )) {
765
- release_sock (sk );
766
- pr_debug ("fallback passthrough" );
767
- ret = sock_sendmsg (ssock , msg );
768
- return ret >= 0 ? ret + copied : (copied ? copied : ret );
769
- }
770
-
771
777
pfrag = sk_page_frag (sk );
772
778
restart :
773
779
mptcp_clean_una (sk );
@@ -819,17 +825,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
819
825
}
820
826
break ;
821
827
}
822
- if (ret == 0 && unlikely (__mptcp_needs_tcp_fallback (msk ))) {
823
- /* Can happen for passive sockets:
824
- * 3WHS negotiated MPTCP, but first packet after is
825
- * plain TCP (e.g. due to middlebox filtering unknown
826
- * options).
827
- *
828
- * Fall back to TCP.
829
- */
830
- release_sock (ssk );
831
- goto fallback ;
832
- }
833
828
834
829
copied += ret ;
835
830
@@ -972,7 +967,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
972
967
int nonblock , int flags , int * addr_len )
973
968
{
974
969
struct mptcp_sock * msk = mptcp_sk (sk );
975
- struct socket * ssock ;
976
970
int copied = 0 ;
977
971
int target ;
978
972
long timeo ;
@@ -981,16 +975,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
981
975
return - EOPNOTSUPP ;
982
976
983
977
lock_sock (sk );
984
- ssock = __mptcp_tcp_fallback (msk );
985
- if (unlikely (ssock )) {
986
- fallback :
987
- release_sock (sk );
988
- pr_debug ("fallback-read subflow=%p" ,
989
- mptcp_subflow_ctx (ssock -> sk ));
990
- copied = sock_recvmsg (ssock , msg , flags );
991
- return copied ;
992
- }
993
-
994
978
timeo = sock_rcvtimeo (sk , nonblock );
995
979
996
980
len = min_t (size_t , len , INT_MAX );
@@ -1056,9 +1040,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1056
1040
1057
1041
pr_debug ("block timeout %ld" , timeo );
1058
1042
mptcp_wait_data (sk , & timeo );
1059
- ssock = __mptcp_tcp_fallback (msk );
1060
- if (unlikely (ssock ))
1061
- goto fallback ;
1062
1043
}
1063
1044
1064
1045
if (skb_queue_empty (& sk -> sk_receive_queue )) {
@@ -1335,8 +1316,6 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how,
1335
1316
break ;
1336
1317
}
1337
1318
1338
- /* Wake up anyone sleeping in poll. */
1339
- ssk -> sk_state_change (ssk );
1340
1319
release_sock (ssk );
1341
1320
}
1342
1321
@@ -1660,12 +1639,6 @@ void mptcp_finish_connect(struct sock *ssk)
1660
1639
sk = subflow -> conn ;
1661
1640
msk = mptcp_sk (sk );
1662
1641
1663
- if (!subflow -> mp_capable ) {
1664
- MPTCP_INC_STATS (sock_net (sk ),
1665
- MPTCP_MIB_MPCAPABLEACTIVEFALLBACK );
1666
- return ;
1667
- }
1668
-
1669
1642
pr_debug ("msk=%p, token=%u" , sk , subflow -> token );
1670
1643
1671
1644
mptcp_crypto_key_sha (subflow -> remote_key , NULL , & ack_seq );
@@ -1971,23 +1944,10 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
1971
1944
{
1972
1945
struct sock * sk = sock -> sk ;
1973
1946
struct mptcp_sock * msk ;
1974
- struct socket * ssock ;
1975
1947
__poll_t mask = 0 ;
1976
1948
1977
1949
msk = mptcp_sk (sk );
1978
- lock_sock (sk );
1979
- ssock = __mptcp_tcp_fallback (msk );
1980
- if (!ssock )
1981
- ssock = __mptcp_nmpc_socket (msk );
1982
- if (ssock ) {
1983
- mask = ssock -> ops -> poll (file , ssock , wait );
1984
- release_sock (sk );
1985
- return mask ;
1986
- }
1987
-
1988
- release_sock (sk );
1989
1950
sock_poll_wait (file , sock , wait );
1990
- lock_sock (sk );
1991
1951
1992
1952
if (test_bit (MPTCP_DATA_READY , & msk -> flags ))
1993
1953
mask = EPOLLIN | EPOLLRDNORM ;
@@ -1997,27 +1957,18 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
1997
1957
if (sk -> sk_shutdown & RCV_SHUTDOWN )
1998
1958
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP ;
1999
1959
2000
- release_sock (sk );
2001
-
2002
1960
return mask ;
2003
1961
}
2004
1962
2005
1963
static int mptcp_shutdown (struct socket * sock , int how )
2006
1964
{
2007
1965
struct mptcp_sock * msk = mptcp_sk (sock -> sk );
2008
1966
struct mptcp_subflow_context * subflow ;
2009
- struct socket * ssock ;
2010
1967
int ret = 0 ;
2011
1968
2012
1969
pr_debug ("sk=%p, how=%d" , msk , how );
2013
1970
2014
1971
lock_sock (sock -> sk );
2015
- ssock = __mptcp_tcp_fallback (msk );
2016
- if (ssock ) {
2017
- release_sock (sock -> sk );
2018
- return inet_shutdown (ssock , how );
2019
- }
2020
-
2021
1972
if (how == SHUT_WR || how == SHUT_RDWR )
2022
1973
inet_sk_state_store (sock -> sk , TCP_FIN_WAIT1 );
2023
1974
@@ -2043,6 +1994,9 @@ static int mptcp_shutdown(struct socket *sock, int how)
2043
1994
mptcp_subflow_shutdown (tcp_sk , how , 1 , msk -> write_seq );
2044
1995
}
2045
1996
1997
+ /* Wake up anyone sleeping in poll. */
1998
+ sock -> sk -> sk_state_change (sock -> sk );
1999
+
2046
2000
out_unlock :
2047
2001
release_sock (sock -> sk );
2048
2002
0 commit comments