]> git.baikalelectronics.ru Git - kernel.git/commitdiff
tcp: refactor setting the initial congestion window
authorYuchung Cheng <ycheng@google.com>
Mon, 29 Apr 2019 22:46:20 +0000 (15:46 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 1 May 2019 15:47:54 +0000 (11:47 -0400)
Relocate the congestion window initialization from tcp_init_metrics()
to tcp_init_transfer() to improve code readability.

Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_metrics.c

index f7567a3698eb4d6c24dbe6a2f110413ce14d24a7..1fa15beb83806696a6b4f5eddb7e45c13a2dea45 100644 (file)
@@ -457,18 +457,6 @@ void tcp_init_sock(struct sock *sk)
 }
 EXPORT_SYMBOL(tcp_init_sock);
 
-void tcp_init_transfer(struct sock *sk, int bpf_op)
-{
-       struct inet_connection_sock *icsk = inet_csk(sk);
-
-       tcp_mtup_init(sk);
-       icsk->icsk_af_ops->rebuild_header(sk);
-       tcp_init_metrics(sk);
-       tcp_call_bpf(sk, bpf_op, 0, NULL);
-       tcp_init_congestion_control(sk);
-       tcp_init_buffer_space(sk);
-}
-
 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
 {
        struct sk_buff *skb = tcp_write_queue_tail(sk);
index 706a99ec73f6fffd9e367ad16b72d1b4281d7e39..077d9abdfcf503b39c6d7e15fdec18ce408c4119 100644 (file)
@@ -5647,6 +5647,32 @@ discard:
 }
 EXPORT_SYMBOL(tcp_rcv_established);
 
+void tcp_init_transfer(struct sock *sk, int bpf_op)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tcp_mtup_init(sk);
+       icsk->icsk_af_ops->rebuild_header(sk);
+       tcp_init_metrics(sk);
+
+       /* Initialize the congestion window to start the transfer.
+        * Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
+        * retransmitted. In light of RFC6298 more aggressive 1sec
+        * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
+        * retransmission has occurred.
+        */
+       if (tp->total_retrans > 1 && tp->undo_marker)
+               tp->snd_cwnd = 1;
+       else
+               tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
+       tp->snd_cwnd_stamp = tcp_jiffies32;
+
+       tcp_call_bpf(sk, bpf_op, 0, NULL);
+       tcp_init_congestion_control(sk);
+       tcp_init_buffer_space(sk);
+}
+
 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
index d4d687330e2ba3a6fc094b5b4d9581e1e9f9196b..c4848e7a0aad123bcb6589be6247fc5007796699 100644 (file)
@@ -512,16 +512,6 @@ reset:
 
                inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
        }
-       /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
-        * retransmitted. In light of RFC6298 more aggressive 1sec
-        * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
-        * retransmission has occurred.
-        */
-       if (tp->total_retrans > 1 && tp->undo_marker)
-               tp->snd_cwnd = 1;
-       else
-               tp->snd_cwnd = tcp_init_cwnd(tp, dst);
-       tp->snd_cwnd_stamp = tcp_jiffies32;
 }
 
 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)