tcp: PRR uses CRB mode by default and SS mode conditionally
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp_input.c
index c9ab964189a0162c7de19d4319f6c3e56194117b..ad1482dd215ef00616ffb15a7086092b091852d3 100644 (file)
@@ -109,6 +109,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 #define FLAG_SYN_ACKED         0x10 /* This ACK acknowledged SYN.              */
 #define FLAG_DATA_SACKED       0x20 /* New SACK.                               */
 #define FLAG_ECE               0x40 /* ECE in this ACK                         */
+#define FLAG_LOST_RETRANS      0x80 /* This ACK marks some retransmission lost */
 #define FLAG_SLOWPATH          0x100 /* Do not skip RFC checks for window update.*/
 #define FLAG_ORIG_SACK_ACKED   0x200 /* Never retransmitted data are (s)acked  */
 #define FLAG_SND_UNA_ADVANCED  0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -359,7 +360,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !sk_under_memory_pressure(sk)) {
+           !tcp_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -446,7 +447,7 @@ static void tcp_clamp_window(struct sock *sk)
 
        if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
-           !sk_under_memory_pressure(sk) &&
+           !tcp_under_memory_pressure(sk) &&
            sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
                sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
                                    sysctl_tcp_rmem[2]);
@@ -1037,7 +1038,7 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
  * highest SACK block). Also calculate the lowest snd_nxt among the remaining
  * retransmitted skbs to avoid some costly processing per ACKs.
  */
-static void tcp_mark_lost_retrans(struct sock *sk)
+static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -1078,7 +1079,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
                if (after(received_upto, ack_seq)) {
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                        tp->retrans_out -= tcp_skb_pcount(skb);
-
+                       *flag |= FLAG_LOST_RETRANS;
                        tcp_skb_mark_lost_uncond_verify(tp, skb);
                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
                } else {
@@ -1130,7 +1131,12 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
 struct tcp_sacktag_state {
        int     reord;
        int     fack_count;
-       long    rtt_us; /* RTT measured by SACKing never-retransmitted data */
+       /* Timestamps for earliest and latest never-retransmitted segment
+        * that was SACKed. RTO needs the earliest RTT to stay conservative,
+        * but congestion control should still get an accurate delay signal.
+        */
+       struct skb_mstamp first_sackt;
+       struct skb_mstamp last_sackt;
        int     flag;
 };
 
@@ -1233,14 +1239,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
                                                           state->reord);
                                if (!after(end_seq, tp->high_seq))
                                        state->flag |= FLAG_ORIG_SACK_ACKED;
-                               /* Pick the earliest sequence sacked for RTT */
-                               if (state->rtt_us < 0) {
-                                       struct skb_mstamp now;
-
-                                       skb_mstamp_get(&now);
-                                       state->rtt_us = skb_mstamp_us_delta(&now,
-                                                               xmit_time);
-                               }
+                               if (state->first_sackt.v64 == 0)
+                                       state->first_sackt = *xmit_time;
+                               state->last_sackt = *xmit_time;
                        }
 
                        if (sacked & TCPCB_LOST) {
@@ -1316,16 +1317,12 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
         * code can come after this skb later on it's better to keep
         * setting gso_size to something.
         */
-       if (!skb_shinfo(prev)->gso_size) {
-               skb_shinfo(prev)->gso_size = mss;
-               skb_shinfo(prev)->gso_type = sk->sk_gso_type;
-       }
+       if (!TCP_SKB_CB(prev)->tcp_gso_size)
+               TCP_SKB_CB(prev)->tcp_gso_size = mss;
 
        /* CHECKME: To clear or not to clear? Mimics normal skb currently */
-       if (tcp_skb_pcount(skb) <= 1) {
-               skb_shinfo(skb)->gso_size = 0;
-               skb_shinfo(skb)->gso_type = 0;
-       }
+       if (tcp_skb_pcount(skb) <= 1)
+               TCP_SKB_CB(skb)->tcp_gso_size = 0;
 
        /* Difference in this won't matter, both ACKed by the same cumul. ACK */
        TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1634,7 +1631,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
 
 static int
 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
-                       u32 prior_snd_una, long *sack_rtt_us)
+                       u32 prior_snd_una, struct tcp_sacktag_state *state)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1642,7 +1639,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
        struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
        struct tcp_sack_block sp[TCP_NUM_SACKS];
        struct tcp_sack_block *cache;
-       struct tcp_sacktag_state state;
        struct sk_buff *skb;
        int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
        int used_sacks;
@@ -1650,9 +1646,8 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
        int i, j;
        int first_sack_index;
 
-       state.flag = 0;
-       state.reord = tp->packets_out;
-       state.rtt_us = -1L;
+       state->flag = 0;
+       state->reord = tp->packets_out;
 
        if (!tp->sacked_out) {
                if (WARN_ON(tp->fackets_out))
@@ -1663,7 +1658,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
        found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
                                         num_sacks, prior_snd_una);
        if (found_dup_sack)
-               state.flag |= FLAG_DSACKING_ACK;
+               state->flag |= FLAG_DSACKING_ACK;
 
        /* Eliminate too old ACKs, but take into
         * account more or less fresh ones, they can
@@ -1728,7 +1723,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
        }
 
        skb = tcp_write_queue_head(sk);
-       state.fack_count = 0;
+       state->fack_count = 0;
        i = 0;
 
        if (!tp->sacked_out) {
@@ -1762,10 +1757,10 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
 
                        /* Head todo? */
                        if (before(start_seq, cache->start_seq)) {
-                               skb = tcp_sacktag_skip(skb, sk, &state,
+                               skb = tcp_sacktag_skip(skb, sk, state,
                                                       start_seq);
                                skb = tcp_sacktag_walk(skb, sk, next_dup,
-                                                      &state,
+                                                      state,
                                                       start_seq,
                                                       cache->start_seq,
                                                       dup_sack);
@@ -1776,7 +1771,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                                goto advance_sp;
 
                        skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
-                                                      &state,
+                                                      state,
                                                       cache->end_seq);
 
                        /* ...tail remains todo... */
@@ -1785,12 +1780,12 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                                skb = tcp_highest_sack(sk);
                                if (!skb)
                                        break;
-                               state.fack_count = tp->fackets_out;
+                               state->fack_count = tp->fackets_out;
                                cache++;
                                goto walk;
                        }
 
-                       skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
+                       skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq);
                        /* Check overlap against next cached too (past this one already) */
                        cache++;
                        continue;
@@ -1800,12 +1795,12 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                        skb = tcp_highest_sack(sk);
                        if (!skb)
                                break;
-                       state.fack_count = tp->fackets_out;
+                       state->fack_count = tp->fackets_out;
                }
-               skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
+               skb = tcp_sacktag_skip(skb, sk, state, start_seq);
 
 walk:
-               skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
+               skb = tcp_sacktag_walk(skb, sk, next_dup, state,
                                       start_seq, end_seq, dup_sack);
 
 advance_sp:
@@ -1820,11 +1815,11 @@ advance_sp:
        for (j = 0; j < used_sacks; j++)
                tp->recv_sack_cache[i++] = sp[j];
 
-       if ((state.reord < tp->fackets_out) &&
+       if ((state->reord < tp->fackets_out) &&
            ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
-               tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
+               tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
 
-       tcp_mark_lost_retrans(sk);
+       tcp_mark_lost_retrans(sk, &state->flag);
        tcp_verify_left_out(tp);
 out:
 
@@ -1834,8 +1829,7 @@ out:
        WARN_ON((int)tp->retrans_out < 0);
        WARN_ON((int)tcp_packets_in_flight(tp) < 0);
 #endif
-       *sack_rtt_us = state.rtt_us;
-       return state.flag;
+       return state->flag;
 }
 
 /* Limits sacked_out so that sum with lost_out isn't ever larger than
@@ -2255,7 +2249,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
                            (oldcnt >= packets))
                                break;
 
-                       mss = skb_shinfo(skb)->gso_size;
+                       mss = tcp_skb_mss(skb);
                        err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
                                           mss, GFP_ATOMIC);
                        if (err < 0)
@@ -2482,15 +2476,14 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
        return false;
 }
 
-/* The cwnd reduction in CWR and Recovery use the PRR algorithm
- * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
+/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
  * It computes the number of packets to send (sndcnt) based on packets newly
  * delivered:
  *   1) If the packets in flight is larger than ssthresh, PRR spreads the
  *     cwnd reductions across a full RTT.
- *   2) If packets in flight is lower than ssthresh (such as due to excess
- *     losses and/or application stalls), do not perform any further cwnd
- *     reductions, but instead slow start up to ssthresh.
+ *   2) Otherwise PRR uses packet conservation to send as much as delivered.
+ *      But when the retransmits are acked without further losses, PRR
+ *      slow starts cwnd up to ssthresh to speed up the recovery.
  */
 static void tcp_init_cwnd_reduction(struct sock *sk)
 {
@@ -2507,7 +2500,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
 }
 
 static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
-                              int fast_rexmit)
+                              int fast_rexmit, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int sndcnt = 0;
@@ -2516,16 +2509,18 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
                                 (tp->packets_out - tp->sacked_out);
 
        tp->prr_delivered += newly_acked_sacked;
-       if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+       if (delta < 0) {
                u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
                               tp->prior_cwnd - 1;
                sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
-       } else {
+       } else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
+                  !(flag & FLAG_LOST_RETRANS)) {
                sndcnt = min_t(int, delta,
                               max_t(int, tp->prr_delivered - tp->prr_out,
                                     newly_acked_sacked) + 1);
+       } else {
+               sndcnt = min(delta, newly_acked_sacked);
        }
-
        sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
        tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
 }
@@ -2555,6 +2550,7 @@ void tcp_enter_cwr(struct sock *sk)
                tcp_set_ca_state(sk, TCP_CA_CWR);
        }
 }
+EXPORT_SYMBOL(tcp_enter_cwr);
 
 static void tcp_try_keep_open(struct sock *sk)
 {
@@ -2585,7 +2581,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
        } else {
-               tcp_cwnd_reduction(sk, prior_unsacked, 0);
+               tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
        }
 }
 
@@ -2682,7 +2678,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
        tp->prior_ssthresh = 0;
        tcp_init_undo(tp);
 
-       if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+       if (!tcp_in_cwnd_reduction(sk)) {
                if (!ece_ack)
                        tp->prior_ssthresh = tcp_current_ssthresh(sk);
                tcp_init_cwnd_reduction(sk);
@@ -2742,7 +2738,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
 
 /* Undo during fast recovery after partial ACK. */
 static bool tcp_try_undo_partial(struct sock *sk, const int acked,
-                                const int prior_unsacked)
+                                const int prior_unsacked, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2758,7 +2754,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
                 * mark more packets lost or retransmit more.
                 */
                if (tp->retrans_out) {
-                       tcp_cwnd_reduction(sk, prior_unsacked, 0);
+                       tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
                        return true;
                }
 
@@ -2845,7 +2841,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                        if (tcp_is_reno(tp) && is_dupack)
                                tcp_add_reno_sack(sk);
                } else {
-                       if (tcp_try_undo_partial(sk, acked, prior_unsacked))
+                       if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
                                return;
                        /* Partial ACK arrived. Force fast retransmit. */
                        do_lost = tcp_is_reno(tp) ||
@@ -2858,9 +2854,10 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                break;
        case TCP_CA_Loss:
                tcp_process_loss(sk, flag, is_dupack);
-               if (icsk->icsk_ca_state != TCP_CA_Open)
+               if (icsk->icsk_ca_state != TCP_CA_Open &&
+                   !(flag & FLAG_LOST_RETRANS))
                        return;
-               /* Fall through to processing in Open state. */
+               /* Change state if cwnd is undone or retransmits are lost */
        default:
                if (tcp_is_reno(tp)) {
                        if (flag & FLAG_SND_UNA_ADVANCED)
@@ -2895,7 +2892,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
 
        if (do_lost)
                tcp_update_scoreboard(sk, fast_rexmit);
-       tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
+       tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
        tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3055,7 +3052,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
  * arrived at the other end.
  */
 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
-                              u32 prior_snd_una, long sack_rtt_us)
+                              u32 prior_snd_una,
+                              struct tcp_sacktag_state *sack)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct skb_mstamp first_ackt, last_ackt, now;
@@ -3063,8 +3061,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
        u32 prior_sacked = tp->sacked_out;
        u32 reord = tp->packets_out;
        bool fully_acked = true;
-       long ca_seq_rtt_us = -1L;
+       long sack_rtt_us = -1L;
        long seq_rtt_us = -1L;
+       long ca_rtt_us = -1L;
        struct sk_buff *skb;
        u32 pkts_acked = 0;
        bool rtt_update;
@@ -3153,15 +3152,16 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
        skb_mstamp_get(&now);
        if (likely(first_ackt.v64)) {
                seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
-               ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+               ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+       }
+       if (sack->first_sackt.v64) {
+               sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt);
+               ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
        }
 
        rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
 
        if (flag & FLAG_ACKED) {
-               const struct tcp_congestion_ops *ca_ops
-                       = inet_csk(sk)->icsk_ca_ops;
-
                tcp_rearm_rto(sk);
                if (unlikely(icsk->icsk_mtup.probe_size &&
                             !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
@@ -3184,11 +3184,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 
                tp->fackets_out -= min(pkts_acked, tp->fackets_out);
 
-               if (ca_ops->pkts_acked) {
-                       long rtt_us = min_t(ulong, ca_seq_rtt_us, sack_rtt_us);
-                       ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
-               }
-
        } else if (skb && rtt_update && sack_rtt_us >= 0 &&
                   sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
                /* Do not re-arm RTO if the sack RTT is measured from data sent
@@ -3198,6 +3193,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                tcp_rearm_rto(sk);
        }
 
+       if (icsk->icsk_ca_ops->pkts_acked)
+               icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us);
+
 #if FASTRETRANS_DEBUG > 0
        WARN_ON((int)tp->sacked_out < 0);
        WARN_ON((int)tp->lost_out < 0);
@@ -3238,7 +3236,7 @@ static void tcp_ack_probe(struct sock *sk)
                 * This function is not for random using!
                 */
        } else {
-               unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+               unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
 
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
                                          when, TCP_RTO_MAX);
@@ -3466,6 +3464,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct tcp_sacktag_state sack_state;
        u32 prior_snd_una = tp->snd_una;
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
@@ -3474,7 +3473,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        int prior_packets = tp->packets_out;
        const int prior_unsacked = tp->packets_out - tp->sacked_out;
        int acked = 0; /* Number of packets newly acked */
-       long sack_rtt_us = -1L;
+
+       sack_state.first_sackt.v64 = 0;
 
        /* We very likely will need to access write queue head. */
        prefetchw(sk->sk_write_queue.next);
@@ -3538,7 +3538,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
                if (TCP_SKB_CB(skb)->sacked)
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
-                                                       &sack_rtt_us);
+                                                       &sack_state);
 
                if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
                        flag |= FLAG_ECE;
@@ -3563,7 +3563,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        /* See if we can take anything off of the retransmit queue. */
        acked = tp->packets_out;
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
-                                   sack_rtt_us);
+                                   &sack_state);
        acked -= tp->packets_out;
 
        /* Advance cwnd if state allows */
@@ -3615,7 +3615,7 @@ old_ack:
         */
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
-                                               &sack_rtt_us);
+                                               &sack_state);
                tcp_fastretrans_alert(sk, acked, prior_unsacked,
                                      is_dupack, flag);
        }
@@ -4514,10 +4514,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 
                if (eaten <= 0) {
 queue_and_out:
-                       if (eaten < 0 &&
-                           tcp_try_rmem_schedule(sk, skb, skb->truesize))
-                               goto drop;
-
+                       if (eaten < 0) {
+                               if (skb_queue_len(&sk->sk_receive_queue) == 0)
+                                       sk_forced_mem_schedule(sk, skb->truesize);
+                               else if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+                                       goto drop;
+                       }
                        eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
                }
                tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
@@ -4788,7 +4790,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
-       else if (sk_under_memory_pressure(sk))
+       else if (tcp_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
@@ -4832,7 +4834,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
                return false;
 
        /* If we are under global TCP memory pressure, do not expand.  */
-       if (sk_under_memory_pressure(sk))
+       if (tcp_under_memory_pressure(sk))
                return false;
 
        /* If we are under soft global TCP memory pressure, do not expand.  */
@@ -6067,6 +6069,23 @@ static bool tcp_syn_flood_action(struct sock *sk,
        return want_cookie;
 }
 
+static void tcp_reqsk_record_syn(const struct sock *sk,
+                                struct request_sock *req,
+                                const struct sk_buff *skb)
+{
+       if (tcp_sk(sk)->save_syn) {
+               u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
+               u32 *copy;
+
+               copy = kmalloc(len + sizeof(u32), GFP_ATOMIC);
+               if (copy) {
+                       copy[0] = len;
+                       memcpy(&copy[1], skb_network_header(skb), len);
+                       req->saved_syn = copy;
+               }
+       }
+}
+
 int tcp_conn_request(struct request_sock_ops *rsk_ops,
                     const struct tcp_request_sock_ops *af_ops,
                     struct sock *sk, struct sk_buff *skb)
@@ -6199,6 +6218,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                tcp_rsk(req)->tfo_listener = false;
                af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        }
+       tcp_reqsk_record_syn(sk, req, skb);
 
        return 0;