]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mptcp: do not queue excessive data on subflows
authorPaolo Abeni <pabeni@redhat.com>
Wed, 20 Jan 2021 14:39:12 +0000 (15:39 +0100)
committerJakub Kicinski <kuba@kernel.org>
Sat, 23 Jan 2021 03:21:02 +0000 (19:21 -0800)
The current packet scheduler can enqueue up to sndbuf
data on each subflow. If the send buffer is large and
the subflows are not symmetric, this could lead to
suboptimal aggregate bandwidth utilization.

Limit the amount of queued data to the maximum send
window.

Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/mptcp/protocol.c

index d07e60330df569662cd1c3d482a90f749eb9f7a4..e741201acc98f626b15eda332e4e79eed2be8828 100644 (file)
@@ -1389,7 +1389,7 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
                        continue;
 
                nr_active += !subflow->backup;
-               if (!sk_stream_memory_free(subflow->tcp_sock))
+               if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd)
                        continue;
 
                pace = READ_ONCE(ssk->sk_pacing_rate);
@@ -1415,7 +1415,7 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
        if (send_info[0].ssk) {
                msk->last_snd = send_info[0].ssk;
                msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
-                                      sk_stream_wspace(msk->last_snd));
+                                      tcp_sk(msk->last_snd)->snd_wnd);
                return msk->last_snd;
        }