tcp: move tcp_enter_cwr()
authorYuchung Cheng <ycheng@google.com>
Sun, 2 Sep 2012 17:38:02 +0000 (17:38 +0000)
committerDavid S. Miller <davem@davemloft.net>
Mon, 3 Sep 2012 18:34:02 +0000 (14:34 -0400)
To prepare replacing rate halving with PRR algorithm in CWR state.

Signed-off-by: Yuchung Cheng <ycheng@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/tcp_input.c

index 8c304a4007987c9794f851acc68c056a9b41ed78..3ab0c7573a0b905f4dba285a951dbb1fe9878144 100644 (file)
@@ -743,29 +743,6 @@ __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
-/* Set slow start threshold and cwnd not falling to slow start */
-void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       const struct inet_connection_sock *icsk = inet_csk(sk);
-
-       tp->prior_ssthresh = 0;
-       tp->bytes_acked = 0;
-       if (icsk->icsk_ca_state < TCP_CA_CWR) {
-               tp->undo_marker = 0;
-               if (set_ssthresh)
-                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
-               tp->snd_cwnd = min(tp->snd_cwnd,
-                                  tcp_packets_in_flight(tp) + 1U);
-               tp->snd_cwnd_cnt = 0;
-               tp->high_seq = tp->snd_nxt;
-               tp->snd_cwnd_stamp = tcp_time_stamp;
-               TCP_ECN_queue_cwr(tp);
-
-               tcp_set_ca_state(sk, TCP_CA_CWR);
-       }
-}
-
 /*
  * Packet counting of FACK is based on in-order assumptions, therefore TCP
  * disables it when reordering is detected
@@ -2741,6 +2718,29 @@ static inline void tcp_complete_cwr(struct sock *sk)
        tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
 
+/* Set slow start threshold and cwnd not falling to slow start */
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+
+       tp->prior_ssthresh = 0;
+       tp->bytes_acked = 0;
+       if (icsk->icsk_ca_state < TCP_CA_CWR) {
+               tp->undo_marker = 0;
+               if (set_ssthresh)
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+               tp->snd_cwnd = min(tp->snd_cwnd,
+                                  tcp_packets_in_flight(tp) + 1U);
+               tp->snd_cwnd_cnt = 0;
+               tp->high_seq = tp->snd_nxt;
+               tp->snd_cwnd_stamp = tcp_time_stamp;
+               TCP_ECN_queue_cwr(tp);
+
+               tcp_set_ca_state(sk, TCP_CA_CWR);
+       }
+}
+
 static void tcp_try_keep_open(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);