2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
22 * Changes: Pedro Roque : Retransmit queue handled by TCP.
23 * : Fragmentation on mtu decrease
24 * : Segment collapse on retransmit
27 * Linus Torvalds : send_delayed_ack
28 * David S. Miller : Charge memory using the right skb
29 * during syn/ack processing.
30 * David S. Miller : Output engine completely rewritten.
31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
32 * Cacophonix Gaul : draft-minshall-nagle-01
33 * J Hadi Salim : ECN support
37 #define pr_fmt(fmt) "TCP: " fmt
41 #include <linux/compiler.h>
42 #include <linux/gfp.h>
43 #include <linux/module.h>
45 /* People can turn this off for buggy TCP's found in printers etc. */
46 int sysctl_tcp_retrans_collapse __read_mostly = 1;
48 /* People can turn this on to work with those rare, broken TCPs that
49 * interpret the window field as a signed quantity.
51 int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
53 /* Default TSQ limit of two TSO segments */
54 int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
56 /* This limits the percentage of the congestion window which we
57 * will allow a single TSO frame to consume. Building TSO frames
58 * which are too large can cause TCP streams to be bursty.
60 int sysctl_tcp_tso_win_divisor __read_mostly = 3;
62 int sysctl_tcp_mtu_probing __read_mostly = 0;
63 int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
65 /* By default, RFC2861 behavior. */
66 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
68 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
69 int push_one, gfp_t gfp);
71 /* Account for new data that has been sent to the network. */
72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
74 struct inet_connection_sock *icsk = inet_csk(sk);
75 struct tcp_sock *tp = tcp_sk(sk);
76 unsigned int prior_packets = tp->packets_out;
78 tcp_advance_send_head(sk, skb);
79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
81 tp->packets_out += tcp_skb_pcount(skb);
82 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
83 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
88 /* SND.NXT, if window was not shrunk.
89 * If window has been shrunk, what should we make? It is not clear at all.
90 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
91 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
92 * invalid. OK, let's make this for now:
94 static inline __u32 tcp_acceptable_seq(const struct sock *sk)
96 const struct tcp_sock *tp = tcp_sk(sk);
98 if (!before(tcp_wnd_end(tp), tp->snd_nxt))
101 return tcp_wnd_end(tp);
104 /* Calculate mss to advertise in SYN segment.
105 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
107 * 1. It is independent of path mtu.
108 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
109 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
110 * attached devices, because some buggy hosts are confused by
112 * 4. We do not make 3, we advertise MSS, calculated from first
113 * hop device mtu, but allow to raise it to ip_rt_min_advmss.
114 * This may be overridden via information stored in routing table.
115 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
116 * probably even Jumbo".
118 static __u16 tcp_advertise_mss(struct sock *sk)
120 struct tcp_sock *tp = tcp_sk(sk);
121 const struct dst_entry *dst = __sk_dst_get(sk);
122 int mss = tp->advmss;
125 unsigned int metric = dst_metric_advmss(dst);
136 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
137 * This is the first part of cwnd validation mechanism. */
138 static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
140 struct tcp_sock *tp = tcp_sk(sk);
141 s32 delta = tcp_time_stamp - tp->lsndtime;
142 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
143 u32 cwnd = tp->snd_cwnd;
145 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
147 tp->snd_ssthresh = tcp_current_ssthresh(sk);
148 restart_cwnd = min(restart_cwnd, cwnd);
150 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
152 tp->snd_cwnd = max(cwnd, restart_cwnd);
153 tp->snd_cwnd_stamp = tcp_time_stamp;
154 tp->snd_cwnd_used = 0;
157 /* Congestion state accounting after a packet has been sent. */
158 static void tcp_event_data_sent(struct tcp_sock *tp,
161 struct inet_connection_sock *icsk = inet_csk(sk);
162 const u32 now = tcp_time_stamp;
164 if (sysctl_tcp_slow_start_after_idle &&
165 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
166 tcp_cwnd_restart(sk, __sk_dst_get(sk));
170 /* If it is a reply for ato after last received
171 * packet, enter pingpong mode.
173 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
174 icsk->icsk_ack.pingpong = 1;
177 /* Account for an ACK we sent. */
178 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
180 tcp_dec_quickack_mode(sk, pkts);
181 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
184 /* Determine a window scaling and initial window to offer.
185 * Based on the assumption that the given amount of space
186 * will be offered. Store the results in the tp structure.
187 * NOTE: for smooth operation initial space offering should
188 * be a multiple of mss if possible. We assume here that mss >= 1.
189 * This MUST be enforced by all callers.
191 void tcp_select_initial_window(int __space, __u32 mss,
192 __u32 *rcv_wnd, __u32 *window_clamp,
193 int wscale_ok, __u8 *rcv_wscale,
196 unsigned int space = (__space < 0 ? 0 : __space);
198 /* If no clamp set the clamp to the max possible scaled window */
199 if (*window_clamp == 0)
200 (*window_clamp) = (65535 << 14);
201 space = min(*window_clamp, space);
203 /* Quantize space offering to a multiple of mss if possible. */
205 space = (space / mss) * mss;
207 /* NOTE: offering an initial window larger than 32767
208 * will break some buggy TCP stacks. If the admin tells us
209 * it is likely we could be speaking with such a buggy stack
210 * we will truncate our initial window offering to 32K-1
211 * unless the remote has sent us a window scaling option,
212 * which we interpret as a sign the remote TCP is not
213 * misinterpreting the window field as a signed quantity.
215 if (sysctl_tcp_workaround_signed_windows)
216 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
222 /* Set window scaling on max possible window
223 * See RFC1323 for an explanation of the limit to 14
225 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
226 space = min_t(u32, space, *window_clamp);
227 while (space > 65535 && (*rcv_wscale) < 14) {
233 /* Set initial window to a value enough for senders starting with
234 * initial congestion window of sysctl_tcp_default_init_rwnd. Place
235 * a limit on the initial window when mss is larger than 1460.
237 if (mss > (1 << *rcv_wscale)) {
238 int init_cwnd = sysctl_tcp_default_init_rwnd;
240 init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
241 /* when initializing use the value from init_rcv_wnd
242 * rather than the default from above
245 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
247 *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
250 /* Set the clamp no higher than max representable value */
251 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
253 EXPORT_SYMBOL(tcp_select_initial_window);
255 /* Chose a new window to advertise, update state in tcp_sock for the
256 * socket, and return result with RFC1323 scaling applied. The return
257 * value can be stuffed directly into th->window for an outgoing
260 static u16 tcp_select_window(struct sock *sk)
262 struct tcp_sock *tp = tcp_sk(sk);
263 u32 cur_win = tcp_receive_window(tp);
264 u32 new_win = __tcp_select_window(sk);
266 /* Never shrink the offered window */
267 if (new_win < cur_win) {
268 /* Danger Will Robinson!
269 * Don't update rcv_wup/rcv_wnd here or else
270 * we will not be able to advertise a zero
271 * window in time. --DaveM
273 * Relax Will Robinson.
275 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
277 tp->rcv_wnd = new_win;
278 tp->rcv_wup = tp->rcv_nxt;
280 /* Make sure we do not exceed the maximum possible
283 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
284 new_win = min(new_win, MAX_TCP_WINDOW);
286 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
288 /* RFC1323 scaling applied */
289 new_win >>= tp->rx_opt.rcv_wscale;
291 /* If we advertise zero window, disable fast path. */
298 /* Packet ECN state for a SYN-ACK */
299 static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
301 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
302 if (!(tp->ecn_flags & TCP_ECN_OK))
303 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
306 /* Packet ECN state for a SYN. */
307 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
309 struct tcp_sock *tp = tcp_sk(sk);
312 if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
313 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
314 tp->ecn_flags = TCP_ECN_OK;
318 static __inline__ void
319 TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
321 if (inet_rsk(req)->ecn_ok)
325 /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
328 static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
331 struct tcp_sock *tp = tcp_sk(sk);
333 if (tp->ecn_flags & TCP_ECN_OK) {
334 /* Not-retransmitted data segment: set ECT and inject CWR. */
335 if (skb->len != tcp_header_len &&
336 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
338 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
339 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
340 tcp_hdr(skb)->cwr = 1;
341 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
344 /* ACK or retransmitted segment: clear ECT|CE */
345 INET_ECN_dontxmit(sk);
347 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
348 tcp_hdr(skb)->ece = 1;
352 /* Constructs common control bits of non-data skb. If SYN/FIN is present,
353 * auto increment end seqno.
355 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
357 skb->ip_summed = CHECKSUM_PARTIAL;
360 TCP_SKB_CB(skb)->tcp_flags = flags;
361 TCP_SKB_CB(skb)->sacked = 0;
363 skb_shinfo(skb)->gso_segs = 1;
364 skb_shinfo(skb)->gso_size = 0;
365 skb_shinfo(skb)->gso_type = 0;
367 TCP_SKB_CB(skb)->seq = seq;
368 if (flags & (TCPHDR_SYN | TCPHDR_FIN))
370 TCP_SKB_CB(skb)->end_seq = seq;
373 static inline bool tcp_urg_mode(const struct tcp_sock *tp)
375 return tp->snd_una != tp->snd_up;
378 #define OPTION_SACK_ADVERTISE (1 << 0)
379 #define OPTION_TS (1 << 1)
380 #define OPTION_MD5 (1 << 2)
381 #define OPTION_WSCALE (1 << 3)
382 #define OPTION_FAST_OPEN_COOKIE (1 << 8)
384 struct tcp_out_options {
385 u16 options; /* bit field of OPTION_* */
386 u16 mss; /* 0 to disable */
387 u8 ws; /* window scale, 0 to disable */
388 u8 num_sack_blocks; /* number of SACK blocks to include */
389 u8 hash_size; /* bytes in hash_location */
390 __u8 *hash_location; /* temporary pointer, overloaded */
391 __u32 tsval, tsecr; /* need to include OPTION_TS */
392 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
395 /* Write previously computed TCP options to the packet.
397 * Beware: Something in the Internet is very sensitive to the ordering of
398 * TCP options, we learned this through the hard way, so be careful here.
399 * Luckily we can at least blame others for their non-compliance but from
400 * inter-operatibility perspective it seems that we're somewhat stuck with
401 * the ordering which we have been using if we want to keep working with
402 * those broken things (not that it currently hurts anybody as there isn't
403 * particular reason why the ordering would need to be changed).
405 * At least SACK_PERM as the first option is known to lead to a disaster
406 * (but it may well be that other scenarios fail similarly).
408 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
409 struct tcp_out_options *opts)
411 u16 options = opts->options; /* mungable copy */
413 if (unlikely(OPTION_MD5 & options)) {
414 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
415 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
416 /* overload cookie hash location */
417 opts->hash_location = (__u8 *)ptr;
421 if (unlikely(opts->mss)) {
422 *ptr++ = htonl((TCPOPT_MSS << 24) |
423 (TCPOLEN_MSS << 16) |
427 if (likely(OPTION_TS & options)) {
428 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
429 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
430 (TCPOLEN_SACK_PERM << 16) |
431 (TCPOPT_TIMESTAMP << 8) |
433 options &= ~OPTION_SACK_ADVERTISE;
435 *ptr++ = htonl((TCPOPT_NOP << 24) |
437 (TCPOPT_TIMESTAMP << 8) |
440 *ptr++ = htonl(opts->tsval);
441 *ptr++ = htonl(opts->tsecr);
444 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
445 *ptr++ = htonl((TCPOPT_NOP << 24) |
447 (TCPOPT_SACK_PERM << 8) |
451 if (unlikely(OPTION_WSCALE & options)) {
452 *ptr++ = htonl((TCPOPT_NOP << 24) |
453 (TCPOPT_WINDOW << 16) |
454 (TCPOLEN_WINDOW << 8) |
458 if (unlikely(opts->num_sack_blocks)) {
459 struct tcp_sack_block *sp = tp->rx_opt.dsack ?
460 tp->duplicate_sack : tp->selective_acks;
463 *ptr++ = htonl((TCPOPT_NOP << 24) |
466 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
467 TCPOLEN_SACK_PERBLOCK)));
469 for (this_sack = 0; this_sack < opts->num_sack_blocks;
471 *ptr++ = htonl(sp[this_sack].start_seq);
472 *ptr++ = htonl(sp[this_sack].end_seq);
475 tp->rx_opt.dsack = 0;
478 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
479 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
481 *ptr++ = htonl((TCPOPT_EXP << 24) |
482 ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
483 TCPOPT_FASTOPEN_MAGIC);
485 memcpy(ptr, foc->val, foc->len);
486 if ((foc->len & 3) == 2) {
487 u8 *align = ((u8 *)ptr) + foc->len;
488 align[0] = align[1] = TCPOPT_NOP;
490 ptr += (foc->len + 3) >> 2;
494 /* Compute TCP options for SYN packets. This is not the final
495 * network wire format yet.
497 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
498 struct tcp_out_options *opts,
499 struct tcp_md5sig_key **md5)
501 struct tcp_sock *tp = tcp_sk(sk);
502 unsigned int remaining = MAX_TCP_OPTION_SPACE;
503 struct tcp_fastopen_request *fastopen = tp->fastopen_req;
505 #ifdef CONFIG_TCP_MD5SIG
506 *md5 = tp->af_specific->md5_lookup(sk, sk);
508 opts->options |= OPTION_MD5;
509 remaining -= TCPOLEN_MD5SIG_ALIGNED;
515 /* We always get an MSS option. The option bytes which will be seen in
516 * normal data packets should timestamps be used, must be in the MSS
517 * advertised. But we subtract them from tp->mss_cache so that
518 * calculations in tcp_sendmsg are simpler etc. So account for this
519 * fact here if necessary. If we don't do this correctly, as a
520 * receiver we won't recognize data packets as being full sized when we
521 * should, and thus we won't abide by the delayed ACK rules correctly.
522 * SACKs don't matter, we never delay an ACK when we have any of those
524 opts->mss = tcp_advertise_mss(sk);
525 remaining -= TCPOLEN_MSS_ALIGNED;
527 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
528 opts->options |= OPTION_TS;
529 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
530 opts->tsecr = tp->rx_opt.ts_recent;
531 remaining -= TCPOLEN_TSTAMP_ALIGNED;
533 if (likely(sysctl_tcp_window_scaling)) {
534 opts->ws = tp->rx_opt.rcv_wscale;
535 opts->options |= OPTION_WSCALE;
536 remaining -= TCPOLEN_WSCALE_ALIGNED;
538 if (likely(sysctl_tcp_sack)) {
539 opts->options |= OPTION_SACK_ADVERTISE;
540 if (unlikely(!(OPTION_TS & opts->options)))
541 remaining -= TCPOLEN_SACKPERM_ALIGNED;
544 if (fastopen && fastopen->cookie.len >= 0) {
545 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
546 need = (need + 3) & ~3U; /* Align to 32 bits */
547 if (remaining >= need) {
548 opts->options |= OPTION_FAST_OPEN_COOKIE;
549 opts->fastopen_cookie = &fastopen->cookie;
551 tp->syn_fastopen = 1;
555 return MAX_TCP_OPTION_SPACE - remaining;
558 /* Set up TCP options for SYN-ACKs. */
559 static unsigned int tcp_synack_options(struct sock *sk,
560 struct request_sock *req,
561 unsigned int mss, struct sk_buff *skb,
562 struct tcp_out_options *opts,
563 struct tcp_md5sig_key **md5,
564 struct tcp_fastopen_cookie *foc)
566 struct inet_request_sock *ireq = inet_rsk(req);
567 unsigned int remaining = MAX_TCP_OPTION_SPACE;
569 #ifdef CONFIG_TCP_MD5SIG
570 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
572 opts->options |= OPTION_MD5;
573 remaining -= TCPOLEN_MD5SIG_ALIGNED;
575 /* We can't fit any SACK blocks in a packet with MD5 + TS
576 * options. There was discussion about disabling SACK
577 * rather than TS in order to fit in better with old,
578 * buggy kernels, but that was deemed to be unnecessary.
580 ireq->tstamp_ok &= !ireq->sack_ok;
586 /* We always send an MSS option. */
588 remaining -= TCPOLEN_MSS_ALIGNED;
590 if (likely(ireq->wscale_ok)) {
591 opts->ws = ireq->rcv_wscale;
592 opts->options |= OPTION_WSCALE;
593 remaining -= TCPOLEN_WSCALE_ALIGNED;
595 if (likely(ireq->tstamp_ok)) {
596 opts->options |= OPTION_TS;
597 opts->tsval = TCP_SKB_CB(skb)->when;
598 opts->tsecr = req->ts_recent;
599 remaining -= TCPOLEN_TSTAMP_ALIGNED;
601 if (likely(ireq->sack_ok)) {
602 opts->options |= OPTION_SACK_ADVERTISE;
603 if (unlikely(!ireq->tstamp_ok))
604 remaining -= TCPOLEN_SACKPERM_ALIGNED;
607 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
608 need = (need + 3) & ~3U; /* Align to 32 bits */
609 if (remaining >= need) {
610 opts->options |= OPTION_FAST_OPEN_COOKIE;
611 opts->fastopen_cookie = foc;
616 return MAX_TCP_OPTION_SPACE - remaining;
619 /* Compute TCP options for ESTABLISHED sockets. This is not the
620 * final wire format yet.
622 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
623 struct tcp_out_options *opts,
624 struct tcp_md5sig_key **md5)
626 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
627 struct tcp_sock *tp = tcp_sk(sk);
628 unsigned int size = 0;
629 unsigned int eff_sacks;
631 #ifdef CONFIG_TCP_MD5SIG
632 *md5 = tp->af_specific->md5_lookup(sk, sk);
633 if (unlikely(*md5)) {
634 opts->options |= OPTION_MD5;
635 size += TCPOLEN_MD5SIG_ALIGNED;
641 if (likely(tp->rx_opt.tstamp_ok)) {
642 opts->options |= OPTION_TS;
643 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
644 opts->tsecr = tp->rx_opt.ts_recent;
645 size += TCPOLEN_TSTAMP_ALIGNED;
648 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
649 if (unlikely(eff_sacks)) {
650 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
651 opts->num_sack_blocks =
652 min_t(unsigned int, eff_sacks,
653 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
654 TCPOLEN_SACK_PERBLOCK);
655 size += TCPOLEN_SACK_BASE_ALIGNED +
656 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
663 /* TCP SMALL QUEUES (TSQ)
665 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
666 * to reduce RTT and bufferbloat.
667 * We do this using a special skb destructor (tcp_wfree).
669 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
670 * needs to be reallocated in a driver.
671 * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
673 * Since transmit from skb destructor is forbidden, we use a tasklet
674 * to process all sockets that eventually need to send more skbs.
675 * We use one tasklet per cpu, with its own queue of sockets.
678 struct tasklet_struct tasklet;
679 struct list_head head; /* queue of tcp sockets */
681 static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
683 static void tcp_tsq_handler(struct sock *sk)
685 if ((1 << sk->sk_state) &
686 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
687 TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
688 tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
692 * One tasklest per cpu tries to send more skbs.
693 * We run in tasklet context but need to disable irqs when
694 * transfering tsq->head because tcp_wfree() might
695 * interrupt us (non NAPI drivers)
697 static void tcp_tasklet_func(unsigned long data)
699 struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
702 struct list_head *q, *n;
706 local_irq_save(flags);
707 list_splice_init(&tsq->head, &list);
708 local_irq_restore(flags);
710 list_for_each_safe(q, n, &list) {
711 tp = list_entry(q, struct tcp_sock, tsq_node);
712 list_del(&tp->tsq_node);
714 sk = (struct sock *)tp;
717 if (!sock_owned_by_user(sk)) {
720 /* defer the work to tcp_release_cb() */
721 set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
725 clear_bit(TSQ_QUEUED, &tp->tsq_flags);
730 #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
731 (1UL << TCP_WRITE_TIMER_DEFERRED) | \
732 (1UL << TCP_DELACK_TIMER_DEFERRED) | \
733 (1UL << TCP_MTU_REDUCED_DEFERRED))
735 * tcp_release_cb - tcp release_sock() callback
738 * called from release_sock() to perform protocol dependent
739 * actions before socket release.
741 void tcp_release_cb(struct sock *sk)
743 struct tcp_sock *tp = tcp_sk(sk);
744 unsigned long flags, nflags;
746 /* perform an atomic operation only if at least one flag is set */
748 flags = tp->tsq_flags;
749 if (!(flags & TCP_DEFERRED_ALL))
751 nflags = flags & ~TCP_DEFERRED_ALL;
752 } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
754 if (flags & (1UL << TCP_TSQ_DEFERRED))
757 /* Here begins the tricky part :
758 * We are called from release_sock() with :
760 * 2) sk_lock.slock spinlock held
761 * 3) socket owned by us (sk->sk_lock.owned == 1)
763 * But following code is meant to be called from BH handlers,
764 * so we should keep BH disabled, but early release socket ownership
766 sock_release_ownership(sk);
768 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
769 tcp_write_timer_handler(sk);
772 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
773 tcp_delack_timer_handler(sk);
776 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
777 sk->sk_prot->mtu_reduced(sk);
781 EXPORT_SYMBOL(tcp_release_cb);
783 void __init tcp_tasklet_init(void)
787 for_each_possible_cpu(i) {
788 struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
790 INIT_LIST_HEAD(&tsq->head);
791 tasklet_init(&tsq->tasklet,
798 * Write buffer destructor automatically called from kfree_skb.
799 * We cant xmit new skbs from this context, as we might already
802 void tcp_wfree(struct sk_buff *skb)
804 struct sock *sk = skb->sk;
805 struct tcp_sock *tp = tcp_sk(sk);
807 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
808 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
810 struct tsq_tasklet *tsq;
812 /* Keep a ref on socket.
813 * This last ref will be released in tcp_tasklet_func()
815 atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
817 /* queue this socket to tasklet queue */
818 local_irq_save(flags);
819 tsq = &__get_cpu_var(tsq_tasklet);
820 list_add(&tp->tsq_node, &tsq->head);
821 tasklet_schedule(&tsq->tasklet);
822 local_irq_restore(flags);
828 /* This routine actually transmits TCP packets queued in by
829 * tcp_do_sendmsg(). This is used by both the initial
830 * transmission and possible later retransmissions.
831 * All SKB's seen here are completely headerless. It is our
832 * job to build the TCP header, and pass the packet down to
833 * IP so it can do the same plus pass the packet off to the
836 * We are working here with either a clone of the original
837 * SKB, or a fresh unique copy made by the retransmit engine.
839 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
842 const struct inet_connection_sock *icsk = inet_csk(sk);
843 struct inet_sock *inet;
845 struct tcp_skb_cb *tcb;
846 struct tcp_out_options opts;
847 unsigned int tcp_options_size, tcp_header_size;
848 struct tcp_md5sig_key *md5;
852 BUG_ON(!skb || !tcp_skb_pcount(skb));
854 /* If congestion control is doing timestamping, we must
855 * take such a timestamp before we potentially clone/copy.
857 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
858 __net_timestamp(skb);
860 if (likely(clone_it)) {
861 const struct sk_buff *fclone = skb + 1;
863 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
864 fclone->fclone == SKB_FCLONE_CLONE))
865 NET_INC_STATS_BH(sock_net(sk),
866 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
868 if (unlikely(skb_cloned(skb)))
869 skb = pskb_copy(skb, gfp_mask);
871 skb = skb_clone(skb, gfp_mask);
878 tcb = TCP_SKB_CB(skb);
879 memset(&opts, 0, sizeof(opts));
881 if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
882 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
884 tcp_options_size = tcp_established_options(sk, skb, &opts,
886 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
888 if (tcp_packets_in_flight(tp) == 0)
889 tcp_ca_event(sk, CA_EVENT_TX_START);
891 /* if no packet is in qdisc/device queue, then allow XPS to select
894 skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
896 skb_push(skb, tcp_header_size);
897 skb_reset_transport_header(skb);
901 skb->destructor = tcp_wfree;
902 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
904 /* Build TCP header and checksum it. */
906 th->source = inet->inet_sport;
907 th->dest = inet->inet_dport;
908 th->seq = htonl(tcb->seq);
909 th->ack_seq = htonl(tp->rcv_nxt);
910 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
913 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
914 /* RFC1323: The window in SYN & SYN/ACK segments
917 th->window = htons(min(tp->rcv_wnd, 65535U));
919 th->window = htons(tcp_select_window(sk));
924 /* The urg_mode check is necessary during a below snd_una win probe */
925 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
926 if (before(tp->snd_up, tcb->seq + 0x10000)) {
927 th->urg_ptr = htons(tp->snd_up - tcb->seq);
929 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
930 th->urg_ptr = htons(0xFFFF);
935 tcp_options_write((__be32 *)(th + 1), tp, &opts);
936 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
937 TCP_ECN_send(sk, skb, tcp_header_size);
939 #ifdef CONFIG_TCP_MD5SIG
940 /* Calculate the MD5 hash, as we have all we need now */
942 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
943 tp->af_specific->calc_md5_hash(opts.hash_location,
948 icsk->icsk_af_ops->send_check(sk, skb);
950 if (likely(tcb->tcp_flags & TCPHDR_ACK))
951 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
953 if (skb->len != tcp_header_size)
954 tcp_event_data_sent(tp, sk);
956 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
957 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
958 tcp_skb_pcount(skb));
960 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
961 if (likely(err <= 0))
964 tcp_enter_cwr(sk, 1);
966 return net_xmit_eval(err);
969 /* This routine just queues the buffer for sending.
971 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
972 * otherwise socket can stall.
974 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
976 struct tcp_sock *tp = tcp_sk(sk);
978 /* Advance write_seq and place onto the write_queue. */
979 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
980 skb_header_release(skb);
981 tcp_add_write_queue_tail(sk, skb);
982 sk->sk_wmem_queued += skb->truesize;
983 sk_mem_charge(sk, skb->truesize);
986 /* Initialize TSO segments for a packet. */
987 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
988 unsigned int mss_now)
990 /* Make sure we own this skb before messing gso_size/gso_segs */
991 WARN_ON_ONCE(skb_cloned(skb));
993 if (skb->len <= mss_now || !sk_can_gso(sk) ||
994 skb->ip_summed == CHECKSUM_NONE) {
995 /* Avoid the costly divide in the normal
998 skb_shinfo(skb)->gso_segs = 1;
999 skb_shinfo(skb)->gso_size = 0;
1000 skb_shinfo(skb)->gso_type = 0;
1002 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
1003 skb_shinfo(skb)->gso_size = mss_now;
1004 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1008 /* When a modification to fackets out becomes necessary, we need to check
1009 * skb is counted to fackets_out or not.
1011 static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
1014 struct tcp_sock *tp = tcp_sk(sk);
1016 if (!tp->sacked_out || tcp_is_reno(tp))
1019 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
1020 tp->fackets_out -= decr;
1023 /* Pcount in the middle of the write queue got changed, we need to do various
1024 * tweaks to fix counters
1026 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1028 struct tcp_sock *tp = tcp_sk(sk);
1030 tp->packets_out -= decr;
1032 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1033 tp->sacked_out -= decr;
1034 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1035 tp->retrans_out -= decr;
1036 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1037 tp->lost_out -= decr;
1039 /* Reno case is special. Sigh... */
1040 if (tcp_is_reno(tp) && decr > 0)
1041 tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1043 tcp_adjust_fackets_out(sk, skb, decr);
1045 if (tp->lost_skb_hint &&
1046 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1047 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1048 tp->lost_cnt_hint -= decr;
1050 tcp_verify_left_out(tp);
1053 /* Function to create two new TCP segments. Shrinks the given segment
1054 * to the specified size and appends a new segment with the rest of the
1055 * packet to the list. This won't be called frequently, I hope.
1056 * Remember, these are still headerless SKBs at this point.
1058 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1059 unsigned int mss_now)
1061 struct tcp_sock *tp = tcp_sk(sk);
1062 struct sk_buff *buff;
1063 int nsize, old_factor;
1067 if (WARN_ON(len > skb->len))
1070 nsize = skb_headlen(skb) - len;
1074 if (skb_unclone(skb, GFP_ATOMIC))
1077 /* Get a new skb... force flag on. */
1078 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
1080 return -ENOMEM; /* We'll just try again later. */
1082 sk->sk_wmem_queued += buff->truesize;
1083 sk_mem_charge(sk, buff->truesize);
1084 nlen = skb->len - len - nsize;
1085 buff->truesize += nlen;
1086 skb->truesize -= nlen;
1088 /* Correct the sequence numbers. */
1089 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1090 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1091 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1093 /* PSH and FIN should only be set in the second packet. */
1094 flags = TCP_SKB_CB(skb)->tcp_flags;
1095 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1096 TCP_SKB_CB(buff)->tcp_flags = flags;
1097 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1099 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
1100 /* Copy and checksum data tail into the new buffer. */
1101 buff->csum = csum_partial_copy_nocheck(skb->data + len,
1102 skb_put(buff, nsize),
1107 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
1109 skb->ip_summed = CHECKSUM_PARTIAL;
1110 skb_split(skb, buff, len);
1113 buff->ip_summed = skb->ip_summed;
1115 /* Looks stupid, but our code really uses when of
1116 * skbs, which it never sent before. --ANK
1118 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1119 buff->tstamp = skb->tstamp;
1121 old_factor = tcp_skb_pcount(skb);
1123 /* Fix up tso_factor for both original and new SKB. */
1124 tcp_set_skb_tso_segs(sk, skb, mss_now);
1125 tcp_set_skb_tso_segs(sk, buff, mss_now);
1127 /* If this packet has been sent out already, we must
1128 * adjust the various packet counters.
1130 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1131 int diff = old_factor - tcp_skb_pcount(skb) -
1132 tcp_skb_pcount(buff);
1135 tcp_adjust_pcount(sk, skb, diff);
1138 /* Link BUFF into the send queue. */
1139 skb_header_release(buff);
1140 tcp_insert_write_queue_after(skb, buff, sk);
1145 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
1146 * eventually). The difference is that pulled data not copied, but
1147 * immediately discarded.
1149 static void __pskb_trim_head(struct sk_buff *skb, int len)
1153 eat = min_t(int, len, skb_headlen(skb));
1155 __skb_pull(skb, eat);
1162 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1163 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1166 skb_frag_unref(skb, i);
1169 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1171 skb_shinfo(skb)->frags[k].page_offset += eat;
1172 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1178 skb_shinfo(skb)->nr_frags = k;
1180 skb_reset_tail_pointer(skb);
1181 skb->data_len -= len;
1182 skb->len = skb->data_len;
1185 /* Remove acked data from a packet in the transmit queue. */
1186 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1188 if (skb_unclone(skb, GFP_ATOMIC))
1191 __pskb_trim_head(skb, len);
1193 TCP_SKB_CB(skb)->seq += len;
1194 skb->ip_summed = CHECKSUM_PARTIAL;
1196 skb->truesize -= len;
1197 sk->sk_wmem_queued -= len;
1198 sk_mem_uncharge(sk, len);
1199 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1201 /* Any change of skb->len requires recalculation of tso factor. */
1202 if (tcp_skb_pcount(skb) > 1)
1203 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
1208 /* Calculate MSS not accounting any TCP options. */
1209 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1211 const struct tcp_sock *tp = tcp_sk(sk);
1212 const struct inet_connection_sock *icsk = inet_csk(sk);
1215 /* Calculate base mss without TCP options:
1216 It is MMS_S - sizeof(tcphdr) of rfc1122
1218 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1220 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1221 if (icsk->icsk_af_ops->net_frag_header_len) {
1222 const struct dst_entry *dst = __sk_dst_get(sk);
1224 if (dst && dst_allfrag(dst))
1225 mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1228 /* Clamp it (mss_clamp does not include tcp options) */
1229 if (mss_now > tp->rx_opt.mss_clamp)
1230 mss_now = tp->rx_opt.mss_clamp;
1232 /* Now subtract optional transport overhead */
1233 mss_now -= icsk->icsk_ext_hdr_len;
1235 /* Then reserve room for full set of TCP options and 8 bytes of data */
1241 /* Calculate MSS. Not accounting for SACKs here. */
1242 int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1244 /* Subtract TCP options size, not including SACKs */
1245 return __tcp_mtu_to_mss(sk, pmtu) -
1246 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1249 /* Inverse of above */
1250 int tcp_mss_to_mtu(struct sock *sk, int mss)
1252 const struct tcp_sock *tp = tcp_sk(sk);
1253 const struct inet_connection_sock *icsk = inet_csk(sk);
1257 tp->tcp_header_len +
1258 icsk->icsk_ext_hdr_len +
1259 icsk->icsk_af_ops->net_header_len;
1261 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1262 if (icsk->icsk_af_ops->net_frag_header_len) {
1263 const struct dst_entry *dst = __sk_dst_get(sk);
1265 if (dst && dst_allfrag(dst))
1266 mtu += icsk->icsk_af_ops->net_frag_header_len;
1271 /* MTU probing init per socket */
1272 void tcp_mtup_init(struct sock *sk)
1274 struct tcp_sock *tp = tcp_sk(sk);
1275 struct inet_connection_sock *icsk = inet_csk(sk);
1277 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
1278 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1279 icsk->icsk_af_ops->net_header_len;
1280 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
1281 icsk->icsk_mtup.probe_size = 0;
1283 EXPORT_SYMBOL(tcp_mtup_init);
1285 /* This function synchronize snd mss to current pmtu/exthdr set.
1287 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1288 for TCP options, but includes only bare TCP header.
1290 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1291 It is minimum of user_mss and mss received with SYN.
1292 It also does not include TCP options.
1294 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1296 tp->mss_cache is current effective sending mss, including
1297 all tcp options except for SACKs. It is evaluated,
1298 taking into account current pmtu, but never exceeds
1299 tp->rx_opt.mss_clamp.
1301 NOTE1. rfc1122 clearly states that advertised MSS
1302 DOES NOT include either tcp or ip options.
1304 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1305 are READ ONLY outside this function. --ANK (980731)
1307 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1309 struct tcp_sock *tp = tcp_sk(sk);
1310 struct inet_connection_sock *icsk = inet_csk(sk);
1313 if (icsk->icsk_mtup.search_high > pmtu)
1314 icsk->icsk_mtup.search_high = pmtu;
1316 mss_now = tcp_mtu_to_mss(sk, pmtu);
1317 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1319 /* And store cached results */
1320 icsk->icsk_pmtu_cookie = pmtu;
1321 if (icsk->icsk_mtup.enabled)
1322 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1323 tp->mss_cache = mss_now;
1327 EXPORT_SYMBOL(tcp_sync_mss);
1329 /* Compute the current effective MSS, taking SACKs and IP options,
1330 * and even PMTU discovery events into account.
1332 unsigned int tcp_current_mss(struct sock *sk)
1334 const struct tcp_sock *tp = tcp_sk(sk);
1335 const struct dst_entry *dst = __sk_dst_get(sk);
1337 unsigned int header_len;
1338 struct tcp_out_options opts;
1339 struct tcp_md5sig_key *md5;
1341 mss_now = tp->mss_cache;
1344 u32 mtu = dst_mtu(dst);
1345 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1346 mss_now = tcp_sync_mss(sk, mtu);
1349 header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1350 sizeof(struct tcphdr);
1351 /* The mss_cache is sized based on tp->tcp_header_len, which assumes
1352 * some common options. If this is an odd packet (because we have SACK
1353 * blocks etc) then our calculated header_len will be different, and
1354 * we have to adjust mss_now correspondingly */
1355 if (header_len != tp->tcp_header_len) {
1356 int delta = (int) header_len - tp->tcp_header_len;
1363 /* Congestion window validation. (RFC2861) */
1364 static void tcp_cwnd_validate(struct sock *sk)
1366 struct tcp_sock *tp = tcp_sk(sk);
1368 if (tp->packets_out >= tp->snd_cwnd) {
1369 /* Network is feed fully. */
1370 tp->snd_cwnd_used = 0;
1371 tp->snd_cwnd_stamp = tcp_time_stamp;
1373 /* Network starves. */
1374 if (tp->packets_out > tp->snd_cwnd_used)
1375 tp->snd_cwnd_used = tp->packets_out;
1377 if (sysctl_tcp_slow_start_after_idle &&
1378 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1379 tcp_cwnd_application_limited(sk);
1383 /* Returns the portion of skb which can be sent right away without
1384 * introducing MSS oddities to segment boundaries. In rare cases where
1385 * mss_now != mss_cache, we will request caller to create a small skb
1386 * per input skb which could be mostly avoided here (if desired).
1388 * We explicitly want to create a request for splitting write queue tail
1389 * to a small skb for Nagle purposes while avoiding unnecessary modulos,
1390 * thus all the complexity (cwnd_len is always MSS multiple which we
1391 * return whenever allowed by the other factors). Basically we need the
1392 * modulo only when the receiver window alone is the limiting factor or
1393 * when we would be allowed to send the split-due-to-Nagle skb fully.
1395 static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
1396 unsigned int mss_now, unsigned int max_segs)
1398 const struct tcp_sock *tp = tcp_sk(sk);
1399 u32 needed, window, max_len;
1401 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1402 max_len = mss_now * max_segs;
1404 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1407 needed = min(skb->len, window);
1409 if (max_len <= needed)
1412 return needed - needed % mss_now;
1415 /* Can at least one segment of SKB be sent right now, according to the
1416 * congestion window rules? If so, return how many segments are allowed.
1418 static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1419 const struct sk_buff *skb)
1421 u32 in_flight, cwnd;
1423 /* Don't be strict about the congestion window for the final FIN. */
1424 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
1425 tcp_skb_pcount(skb) == 1)
1428 in_flight = tcp_packets_in_flight(tp);
1429 cwnd = tp->snd_cwnd;
1430 if (in_flight < cwnd)
1431 return (cwnd - in_flight);
1436 /* Initialize TSO state of a skb.
1437 * This must be invoked the first time we consider transmitting
1438 * SKB onto the wire.
1440 static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1441 unsigned int mss_now)
1443 int tso_segs = tcp_skb_pcount(skb);
1445 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1446 tcp_set_skb_tso_segs(sk, skb, mss_now);
1447 tso_segs = tcp_skb_pcount(skb);
1452 /* Minshall's variant of the Nagle send check. */
1453 static inline bool tcp_minshall_check(const struct tcp_sock *tp)
1455 return after(tp->snd_sml, tp->snd_una) &&
1456 !after(tp->snd_sml, tp->snd_nxt);
1459 /* Return false, if packet can be sent now without violation Nagle's rules:
1460 * 1. It is full sized.
1461 * 2. Or it contains FIN. (already checked by caller)
1462 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1463 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1464 * With Minshall's modification: all sent small packets are ACKed.
1466 static inline bool tcp_nagle_check(const struct tcp_sock *tp,
1467 const struct sk_buff *skb,
1468 unsigned int mss_now, int nonagle)
1470 return skb->len < mss_now &&
1471 ((nonagle & TCP_NAGLE_CORK) ||
1472 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1475 /* Return true if the Nagle test allows this packet to be
1478 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1479 unsigned int cur_mss, int nonagle)
1481 /* Nagle rule does not apply to frames, which sit in the middle of the
1482 * write_queue (they have no chances to get new data).
1484 * This is implemented in the callers, where they modify the 'nonagle'
1485 * argument based upon the location of SKB in the send queue.
1487 if (nonagle & TCP_NAGLE_PUSH)
1490 /* Don't use the nagle rule for urgent data (or for the final FIN). */
1491 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1494 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1500 /* Does at least the first segment of SKB fit into the send window? */
1501 static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1502 const struct sk_buff *skb,
1503 unsigned int cur_mss)
1505 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1507 if (skb->len > cur_mss)
1508 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1510 return !after(end_seq, tcp_wnd_end(tp));
1513 /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1514 * should be put on the wire right now. If so, it returns the number of
1515 * packets allowed by the congestion window.
1517 static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1518 unsigned int cur_mss, int nonagle)
1520 const struct tcp_sock *tp = tcp_sk(sk);
1521 unsigned int cwnd_quota;
1523 tcp_init_tso_segs(sk, skb, cur_mss);
1525 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1528 cwnd_quota = tcp_cwnd_test(tp, skb);
1529 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1535 /* Test if sending is allowed right now. */
1536 bool tcp_may_send_now(struct sock *sk)
1538 const struct tcp_sock *tp = tcp_sk(sk);
1539 struct sk_buff *skb = tcp_send_head(sk);
1542 tcp_snd_test(sk, skb, tcp_current_mss(sk),
1543 (tcp_skb_is_last(sk, skb) ?
1544 tp->nonagle : TCP_NAGLE_PUSH));
1547 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1548 * which is put after SKB on the list. It is very much like
1549 * tcp_fragment() except that it may make several kinds of assumptions
1550 * in order to speed up the splitting operation. In particular, we
1551 * know that all the data is in scatter-gather pages, and that the
1552 * packet has never been sent out before (and thus is not cloned).
1554 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1555 unsigned int mss_now, gfp_t gfp)
1557 struct sk_buff *buff;
1558 int nlen = skb->len - len;
1561 /* All of a TSO frame must be composed of paged data. */
1562 if (skb->len != skb->data_len)
1563 return tcp_fragment(sk, skb, len, mss_now);
1565 buff = sk_stream_alloc_skb(sk, 0, gfp);
1566 if (unlikely(buff == NULL))
1569 sk->sk_wmem_queued += buff->truesize;
1570 sk_mem_charge(sk, buff->truesize);
1571 buff->truesize += nlen;
1572 skb->truesize -= nlen;
1574 /* Correct the sequence numbers. */
1575 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1576 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1577 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1579 /* PSH and FIN should only be set in the second packet. */
1580 flags = TCP_SKB_CB(skb)->tcp_flags;
1581 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1582 TCP_SKB_CB(buff)->tcp_flags = flags;
1584 /* This packet was never sent out yet, so no SACK bits. */
1585 TCP_SKB_CB(buff)->sacked = 0;
1587 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1588 skb_split(skb, buff, len);
1590 /* Fix up tso_factor for both original and new SKB. */
1591 tcp_set_skb_tso_segs(sk, skb, mss_now);
1592 tcp_set_skb_tso_segs(sk, buff, mss_now);
1594 /* Link BUFF into the send queue. */
1595 skb_header_release(buff);
1596 tcp_insert_write_queue_after(skb, buff, sk);
1601 /* Try to defer sending, if possible, in order to minimize the amount
1602 * of TSO splitting we do. View it as a kind of TSO Nagle test.
1604 * This algorithm is from John Heffner.
1606 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1608 struct tcp_sock *tp = tcp_sk(sk);
1609 const struct inet_connection_sock *icsk = inet_csk(sk);
1610 u32 send_win, cong_win, limit, in_flight;
1613 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1616 if (icsk->icsk_ca_state != TCP_CA_Open)
1619 /* Defer for less than two clock ticks. */
1620 if (tp->tso_deferred &&
1621 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1624 in_flight = tcp_packets_in_flight(tp);
1626 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1628 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1630 /* From in_flight test above, we know that cwnd > in_flight. */
1631 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1633 limit = min(send_win, cong_win);
1635 /* If a full-sized TSO skb can be sent, do it. */
1636 if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
1637 tp->xmit_size_goal_segs * tp->mss_cache))
1640 /* Middle in queue won't get any more data, full sendable already? */
1641 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1644 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1646 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1648 /* If at least some fraction of a window is available,
1651 chunk /= win_divisor;
1655 /* Different approach, try not to defer past a single
1656 * ACK. Receiver should ACK every other full sized
1657 * frame, so if we have space for more than 3 frames
1660 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1664 /* Ok, it looks like it is advisable to defer.
1665 * Do not rearm the timer if already set to not break TCP ACK clocking.
1667 if (!tp->tso_deferred)
1668 tp->tso_deferred = 1 | (jiffies << 1);
1673 tp->tso_deferred = 0;
1677 /* Create a new MTU probe if we are ready.
1678 * MTU probe is regularly attempting to increase the path MTU by
1679 * deliberately sending larger packets. This discovers routing
1680 * changes resulting in larger path MTUs.
1682 * Returns 0 if we should wait to probe (no cwnd available),
1683 * 1 if a probe was sent,
1686 static int tcp_mtu_probe(struct sock *sk)
1688 struct tcp_sock *tp = tcp_sk(sk);
1689 struct inet_connection_sock *icsk = inet_csk(sk);
1690 struct sk_buff *skb, *nskb, *next;
1697 /* Not currently probing/verifying,
1699 * have enough cwnd, and
1700 * not SACKing (the variable headers throw things off) */
1701 if (!icsk->icsk_mtup.enabled ||
1702 icsk->icsk_mtup.probe_size ||
1703 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1704 tp->snd_cwnd < 11 ||
1705 tp->rx_opt.num_sacks || tp->rx_opt.dsack)
1708 /* Very simple search strategy: just double the MSS. */
1709 mss_now = tcp_current_mss(sk);
1710 probe_size = 2 * tp->mss_cache;
1711 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
1712 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1713 /* TODO: set timer for probe_converge_event */
1717 /* Have enough data in the send queue to probe? */
1718 if (tp->write_seq - tp->snd_nxt < size_needed)
1721 if (tp->snd_wnd < size_needed)
1723 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
1726 /* Do we need to wait to drain cwnd? With none in flight, don't stall */
1727 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1728 if (!tcp_packets_in_flight(tp))
1734 /* We're allowed to probe. Build it now. */
1735 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1737 sk->sk_wmem_queued += nskb->truesize;
1738 sk_mem_charge(sk, nskb->truesize);
1740 skb = tcp_send_head(sk);
1742 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1743 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1744 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
1745 TCP_SKB_CB(nskb)->sacked = 0;
1747 nskb->ip_summed = skb->ip_summed;
1749 tcp_insert_write_queue_before(nskb, skb, sk);
1752 tcp_for_write_queue_from_safe(skb, next, sk) {
1753 copy = min_t(int, skb->len, probe_size - len);
1754 if (nskb->ip_summed)
1755 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1757 nskb->csum = skb_copy_and_csum_bits(skb, 0,
1758 skb_put(nskb, copy),
1761 if (skb->len <= copy) {
1762 /* We've eaten all the data from this skb.
1764 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1765 tcp_unlink_write_queue(skb, sk);
1766 sk_wmem_free_skb(sk, skb);
1768 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1769 ~(TCPHDR_FIN|TCPHDR_PSH);
1770 if (!skb_shinfo(skb)->nr_frags) {
1771 skb_pull(skb, copy);
1772 if (skb->ip_summed != CHECKSUM_PARTIAL)
1773 skb->csum = csum_partial(skb->data,
1776 __pskb_trim_head(skb, copy);
1777 tcp_set_skb_tso_segs(sk, skb, mss_now);
1779 TCP_SKB_CB(skb)->seq += copy;
1784 if (len >= probe_size)
1787 tcp_init_tso_segs(sk, nskb, nskb->len);
1789 /* We're ready to send. If this fails, the probe will
1790 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1791 TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1792 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1793 /* Decrement cwnd here because we are sending
1794 * effectively two packets. */
1796 tcp_event_new_data_sent(sk, nskb);
1798 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1799 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1800 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1808 /* This routine writes packets to the network. It advances the
1809 * send_head. This happens as incoming acks open up the remote
1812 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1813 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1814 * account rare use of URG, this is not a big flaw.
1816 * Send at most one packet when push_one > 0. Temporarily ignore
1817 * cwnd limit to force at most one packet out when push_one == 2.
1819 * Returns true, if no segments are in flight and we have queued segments,
1820 * but cannot send anything now because of SWS or another problem.
1822 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1823 int push_one, gfp_t gfp)
1825 struct tcp_sock *tp = tcp_sk(sk);
1826 struct sk_buff *skb;
1827 unsigned int tso_segs, sent_pkts;
1834 /* Do MTU probing. */
1835 result = tcp_mtu_probe(sk);
1838 } else if (result > 0) {
1843 while ((skb = tcp_send_head(sk))) {
1846 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1849 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
1850 goto repair; /* Skip network transmission */
1852 cwnd_quota = tcp_cwnd_test(tp, skb);
1855 /* Force out a loss probe pkt. */
1861 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1864 if (tso_segs == 1) {
1865 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1866 (tcp_skb_is_last(sk, skb) ?
1867 nonagle : TCP_NAGLE_PUSH))))
1870 if (!push_one && tcp_tso_should_defer(sk, skb))
1874 /* TCP Small Queues :
1875 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
1877 * - better RTT estimation and ACK scheduling
1880 * Alas, some drivers / subsystems require a fair amount
1881 * of queued bytes to ensure line rate.
1882 * One example is wifi aggregation (802.11 AMPDU)
1884 limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
1885 sk->sk_pacing_rate >> 10);
1887 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1888 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1889 /* It is possible TX completion already happened
1890 * before we set TSQ_THROTTLED, so we must
1891 * test again the condition.
1892 * We abuse smp_mb__after_clear_bit() because
1893 * there is no smp_mb__after_set_bit() yet
1895 smp_mb__after_clear_bit();
1896 if (atomic_read(&sk->sk_wmem_alloc) > limit)
1901 if (tso_segs > 1 && !tcp_urg_mode(tp))
1902 limit = tcp_mss_split_point(sk, skb, mss_now,
1905 sk->sk_gso_max_segs));
1907 if (skb->len > limit &&
1908 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
1911 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1913 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
1917 /* Advance the send_head. This one is sent out.
1918 * This call will increment packets_out.
1920 tcp_event_new_data_sent(sk, skb);
1922 tcp_minshall_update(tp, mss_now, skb);
1923 sent_pkts += tcp_skb_pcount(skb);
1929 if (likely(sent_pkts)) {
1930 if (tcp_in_cwnd_reduction(sk))
1931 tp->prr_out += sent_pkts;
1933 /* Send one loss probe per tail loss episode. */
1935 tcp_schedule_loss_probe(sk);
1936 tcp_cwnd_validate(sk);
1939 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
1942 bool tcp_schedule_loss_probe(struct sock *sk)
1944 struct inet_connection_sock *icsk = inet_csk(sk);
1945 struct tcp_sock *tp = tcp_sk(sk);
1946 u32 timeout, tlp_time_stamp, rto_time_stamp;
1947 u32 rtt = tp->srtt >> 3;
1949 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
1951 /* No consecutive loss probes. */
1952 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
1956 /* Don't do any loss probe on a Fast Open connection before 3WHS
1959 if (sk->sk_state == TCP_SYN_RECV)
1962 /* TLP is only scheduled when next timer event is RTO. */
1963 if (icsk->icsk_pending != ICSK_TIME_RETRANS)
1966 /* Schedule a loss probe in 2*RTT for SACK capable connections
1967 * in Open state, that are either limited by cwnd or application.
1969 if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
1970 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
1973 if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
1977 /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
1978 * for delayed ack when there's one outstanding packet.
1981 if (tp->packets_out == 1)
1982 timeout = max_t(u32, timeout,
1983 (rtt + (rtt >> 1) + TCP_DELACK_MAX));
1984 timeout = max_t(u32, timeout, msecs_to_jiffies(10));
1986 /* If RTO is shorter, just schedule TLP in its place. */
1987 tlp_time_stamp = tcp_time_stamp + timeout;
1988 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
1989 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
1990 s32 delta = rto_time_stamp - tcp_time_stamp;
1995 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
2000 /* When probe timeout (PTO) fires, send a new segment if one exists, else
2001 * retransmit the last segment.
2003 void tcp_send_loss_probe(struct sock *sk)
2005 struct tcp_sock *tp = tcp_sk(sk);
2006 struct sk_buff *skb;
2008 int mss = tcp_current_mss(sk);
2011 if (tcp_send_head(sk) != NULL) {
2012 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2016 /* At most one outstanding TLP retransmission. */
2017 if (tp->tlp_high_seq)
2020 /* Retransmit last segment. */
2021 skb = tcp_write_queue_tail(sk);
2025 pcount = tcp_skb_pcount(skb);
2026 if (WARN_ON(!pcount))
2029 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2030 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
2032 skb = tcp_write_queue_tail(sk);
2035 if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2038 /* Probe with zero data doesn't trigger fast recovery. */
2040 err = __tcp_retransmit_skb(sk, skb);
2042 /* Record snd_nxt for loss detection. */
2044 tp->tlp_high_seq = tp->snd_nxt;
2047 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2048 inet_csk(sk)->icsk_rto,
2052 NET_INC_STATS_BH(sock_net(sk),
2053 LINUX_MIB_TCPLOSSPROBES);
2057 /* Push out any pending frames which were held back due to
2058 * TCP_CORK or attempt at coalescing tiny packets.
2059 * The socket must be locked by the caller.
2061 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
2064 /* If we are closed, the bytes will have to remain here.
2065 * In time closedown will finish, we empty the write queue and
2066 * all will be happy.
2068 if (unlikely(sk->sk_state == TCP_CLOSE))
2071 if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2072 sk_gfp_atomic(sk, GFP_ATOMIC)))
2073 tcp_check_probe_timer(sk);
2076 /* Send _single_ skb sitting at the send head. This function requires
2077 * true push pending frames to setup probe timer etc.
2079 void tcp_push_one(struct sock *sk, unsigned int mss_now)
2081 struct sk_buff *skb = tcp_send_head(sk);
2083 BUG_ON(!skb || skb->len < mss_now);
2085 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2088 /* This function returns the amount that we can raise the
2089 * usable window based on the following constraints
2091 * 1. The window can never be shrunk once it is offered (RFC 793)
2092 * 2. We limit memory per socket
2095 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
2096 * RECV.NEXT + RCV.WIN fixed until:
2097 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2099 * i.e. don't raise the right edge of the window until you can raise
2100 * it at least MSS bytes.
2102 * Unfortunately, the recommended algorithm breaks header prediction,
2103 * since header prediction assumes th->window stays fixed.
2105 * Strictly speaking, keeping th->window fixed violates the receiver
2106 * side SWS prevention criteria. The problem is that under this rule
2107 * a stream of single byte packets will cause the right side of the
2108 * window to always advance by a single byte.
2110 * Of course, if the sender implements sender side SWS prevention
2111 * then this will not be a problem.
2113 * BSD seems to make the following compromise:
2115 * If the free space is less than the 1/4 of the maximum
2116 * space available and the free space is less than 1/2 mss,
2117 * then set the window to 0.
2118 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
2119 * Otherwise, just prevent the window from shrinking
2120 * and from being larger than the largest representable value.
2122 * This prevents incremental opening of the window in the regime
2123 * where TCP is limited by the speed of the reader side taking
2124 * data out of the TCP receive queue. It does nothing about
2125 * those cases where the window is constrained on the sender side
2126 * because the pipeline is full.
2128 * BSD also seems to "accidentally" limit itself to windows that are a
2129 * multiple of MSS, at least until the free space gets quite small.
2130 * This would appear to be a side effect of the mbuf implementation.
2131 * Combining these two algorithms results in the observed behavior
2132 * of having a fixed window size at almost all times.
2134 * Below we obtain similar behavior by forcing the offered window to
2135 * a multiple of the mss when it is feasible to do so.
2137 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
2138 * Regular options like TIMESTAMP are taken into account.
2140 u32 __tcp_select_window(struct sock *sk)
2142 struct inet_connection_sock *icsk = inet_csk(sk);
2143 struct tcp_sock *tp = tcp_sk(sk);
2144 /* MSS for the peer's data. Previous versions used mss_clamp
2145 * here. I don't know if the value based on our guesses
2146 * of peer's MSS is better for the performance. It's more correct
2147 * but may be worse for the performance because of rcv_mss
2148 * fluctuations. --SAW 1998/11/1
2150 int mss = icsk->icsk_ack.rcv_mss;
2151 int free_space = tcp_space(sk);
2152 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
2155 if (mss > full_space)
2158 if (free_space < (full_space >> 1)) {
2159 icsk->icsk_ack.quick = 0;
2161 if (sk_under_memory_pressure(sk))
2162 tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2165 if (free_space < mss)
2169 if (free_space > tp->rcv_ssthresh)
2170 free_space = tp->rcv_ssthresh;
2172 /* Don't do rounding if we are using window scaling, since the
2173 * scaled window will not line up with the MSS boundary anyway.
2175 window = tp->rcv_wnd;
2176 if (tp->rx_opt.rcv_wscale) {
2177 window = free_space;
2179 /* Advertise enough space so that it won't get scaled away.
2180 * Import case: prevent zero window announcement if
2181 * 1<<rcv_wscale > mss.
2183 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
2184 window = (((window >> tp->rx_opt.rcv_wscale) + 1)
2185 << tp->rx_opt.rcv_wscale);
2187 /* Get the largest window that is a nice multiple of mss.
2188 * Window clamp already applied above.
2189 * If our current window offering is within 1 mss of the
2190 * free space we just keep it. This prevents the divide
2191 * and multiply from happening most of the time.
2192 * We also don't do any window rounding when the free space
2195 if (window <= free_space - mss || window > free_space)
2196 window = (free_space / mss) * mss;
2197 else if (mss == full_space &&
2198 free_space > window + (full_space >> 1))
2199 window = free_space;
2205 /* Collapses two adjacent SKB's during retransmission. */
2206 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2208 struct tcp_sock *tp = tcp_sk(sk);
2209 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2210 int skb_size, next_skb_size;
2212 skb_size = skb->len;
2213 next_skb_size = next_skb->len;
2215 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
2217 tcp_highest_sack_combine(sk, next_skb, skb);
2219 tcp_unlink_write_queue(next_skb, sk);
2221 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
2224 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
2225 skb->ip_summed = CHECKSUM_PARTIAL;
2227 if (skb->ip_summed != CHECKSUM_PARTIAL)
2228 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
2230 /* Update sequence range on original skb. */
2231 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
2233 /* Merge over control information. This moves PSH/FIN etc. over */
2234 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
2236 /* All done, get rid of second SKB and account for it so
2237 * packet counting does not break.
2239 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2241 /* changed transmit queue under us so clear hints */
2242 tcp_clear_retrans_hints_partial(tp);
2243 if (next_skb == tp->retransmit_skb_hint)
2244 tp->retransmit_skb_hint = skb;
2246 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2248 sk_wmem_free_skb(sk, next_skb);
2251 /* Check if coalescing SKBs is legal. */
2252 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2254 if (tcp_skb_pcount(skb) > 1)
2256 /* TODO: SACK collapsing could be used to remove this condition */
2257 if (skb_shinfo(skb)->nr_frags != 0)
2259 if (skb_cloned(skb))
2261 if (skb == tcp_send_head(sk))
2263 /* Some heurestics for collapsing over SACK'd could be invented */
2264 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2270 /* Collapse packets in the retransmit queue to make to create
2271 * less packets on the wire. This is only done on retransmission.
2273 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2276 struct tcp_sock *tp = tcp_sk(sk);
2277 struct sk_buff *skb = to, *tmp;
2280 if (!sysctl_tcp_retrans_collapse)
2282 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2285 tcp_for_write_queue_from_safe(skb, tmp, sk) {
2286 if (!tcp_can_collapse(sk, skb))
2298 /* Punt if not enough space exists in the first SKB for
2299 * the data in the second
2301 if (skb->len > skb_availroom(to))
2304 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
2307 tcp_collapse_retrans(sk, to);
2311 /* This retransmits one SKB. Policy decisions and retransmit queue
2312 * state updates are done by the caller. Returns non-zero if an
2313 * error occurred which prevented the send.
2315 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2317 struct tcp_sock *tp = tcp_sk(sk);
2318 struct inet_connection_sock *icsk = inet_csk(sk);
2319 unsigned int cur_mss;
2321 /* Inconslusive MTU probe */
2322 if (icsk->icsk_mtup.probe_size) {
2323 icsk->icsk_mtup.probe_size = 0;
2326 /* Do not sent more than we queued. 1/4 is reserved for possible
2327 * copying overhead: fragmentation, tunneling, mangling etc.
2329 if (atomic_read(&sk->sk_wmem_alloc) >
2330 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2333 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2334 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2336 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2340 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
2341 return -EHOSTUNREACH; /* Routing failure or similar. */
2343 cur_mss = tcp_current_mss(sk);
2345 /* If receiver has shrunk his window, and skb is out of
2346 * new window, do not retransmit it. The exception is the
2347 * case, when window is shrunk to zero. In this case
2348 * our retransmit serves as a zero window probe.
2350 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2351 TCP_SKB_CB(skb)->seq != tp->snd_una)
2354 if (skb->len > cur_mss) {
2355 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
2356 return -ENOMEM; /* We'll try again later. */
2358 int oldpcount = tcp_skb_pcount(skb);
2360 if (unlikely(oldpcount > 1)) {
2361 if (skb_unclone(skb, GFP_ATOMIC))
2363 tcp_init_tso_segs(sk, skb, cur_mss);
2364 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2368 tcp_retrans_try_collapse(sk, skb, cur_mss);
2370 /* Some Solaris stacks overoptimize and ignore the FIN on a
2371 * retransmit when old data is attached. So strip it off
2372 * since it is cheap to do so and saves bytes on the network.
2375 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2376 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2377 if (!pskb_trim(skb, 0)) {
2378 /* Reuse, even though it does some unnecessary work */
2379 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
2380 TCP_SKB_CB(skb)->tcp_flags);
2381 skb->ip_summed = CHECKSUM_NONE;
2385 /* Make a copy, if the first transmission SKB clone we made
2386 * is still in somebody's hands, else make a clone.
2388 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2390 /* make sure skb->data is aligned on arches that require it
2391 * and check if ack-trimming & collapsing extended the headroom
2392 * beyond what csum_start can cover.
2394 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2395 skb_headroom(skb) >= 0xFFFF)) {
2396 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2398 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2401 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2405 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2407 struct tcp_sock *tp = tcp_sk(sk);
2408 int err = __tcp_retransmit_skb(sk, skb);
2411 /* Update global TCP statistics. */
2412 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
2414 tp->total_retrans++;
2416 #if FASTRETRANS_DEBUG > 0
2417 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2418 net_dbg_ratelimited("retrans_out leaked\n");
2421 if (!tp->retrans_out)
2422 tp->lost_retrans_low = tp->snd_nxt;
2423 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
2424 tp->retrans_out += tcp_skb_pcount(skb);
2426 /* Save stamp of the first retransmit. */
2427 if (!tp->retrans_stamp)
2428 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2430 tp->undo_retrans += tcp_skb_pcount(skb);
2432 /* snd_nxt is stored to detect loss of retransmitted segment,
2433 * see tcp_input.c tcp_sacktag_write_queue().
2435 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
2440 /* Check if we forward retransmits are possible in the current
2441 * window/congestion state.
2443 static bool tcp_can_forward_retransmit(struct sock *sk)
2445 const struct inet_connection_sock *icsk = inet_csk(sk);
2446 const struct tcp_sock *tp = tcp_sk(sk);
2448 /* Forward retransmissions are possible only during Recovery. */
2449 if (icsk->icsk_ca_state != TCP_CA_Recovery)
2452 /* No forward retransmissions in Reno are possible. */
2453 if (tcp_is_reno(tp))
2456 /* Yeah, we have to make difficult choice between forward transmission
2457 * and retransmission... Both ways have their merits...
2459 * For now we do not retransmit anything, while we have some new
2460 * segments to send. In the other cases, follow rule 3 for
2461 * NextSeg() specified in RFC3517.
2464 if (tcp_may_send_now(sk))
2470 /* This gets called after a retransmit timeout, and the initially
2471 * retransmitted data is acknowledged. It tries to continue
2472 * resending the rest of the retransmit queue, until either
2473 * we've sent it all or the congestion window limit is reached.
2474 * If doing SACK, the first ACK which comes back for a timeout
2475 * based retransmit packet might feed us FACK information again.
2476 * If so, we use it to avoid unnecessarily retransmissions.
2478 void tcp_xmit_retransmit_queue(struct sock *sk)
2480 const struct inet_connection_sock *icsk = inet_csk(sk);
2481 struct tcp_sock *tp = tcp_sk(sk);
2482 struct sk_buff *skb;
2483 struct sk_buff *hole = NULL;
2486 int fwd_rexmitting = 0;
2488 if (!tp->packets_out)
2492 tp->retransmit_high = tp->snd_una;
2494 if (tp->retransmit_skb_hint) {
2495 skb = tp->retransmit_skb_hint;
2496 last_lost = TCP_SKB_CB(skb)->end_seq;
2497 if (after(last_lost, tp->retransmit_high))
2498 last_lost = tp->retransmit_high;
2500 skb = tcp_write_queue_head(sk);
2501 last_lost = tp->snd_una;
2504 tcp_for_write_queue_from(skb, sk) {
2505 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2507 if (skb == tcp_send_head(sk))
2509 /* we could do better than to assign each time */
2511 tp->retransmit_skb_hint = skb;
2513 /* Assume this retransmit will generate
2514 * only one packet for congestion window
2515 * calculation purposes. This works because
2516 * tcp_retransmit_skb() will chop up the
2517 * packet to be MSS sized and all the
2518 * packet counting works out.
2520 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2523 if (fwd_rexmitting) {
2525 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2527 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2529 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2530 tp->retransmit_high = last_lost;
2531 if (!tcp_can_forward_retransmit(sk))
2533 /* Backtrack if necessary to non-L'ed skb */
2541 } else if (!(sacked & TCPCB_LOST)) {
2542 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2547 last_lost = TCP_SKB_CB(skb)->end_seq;
2548 if (icsk->icsk_ca_state != TCP_CA_Loss)
2549 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2551 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2554 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2557 if (tcp_retransmit_skb(sk, skb)) {
2558 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2561 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2563 if (tcp_in_cwnd_reduction(sk))
2564 tp->prr_out += tcp_skb_pcount(skb);
2566 if (skb == tcp_write_queue_head(sk))
2567 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2568 inet_csk(sk)->icsk_rto,
2573 /* Send a fin. The caller locks the socket for us. This cannot be
2574 * allowed to fail queueing a FIN frame under any circumstances.
2576 void tcp_send_fin(struct sock *sk)
2578 struct tcp_sock *tp = tcp_sk(sk);
2579 struct sk_buff *skb = tcp_write_queue_tail(sk);
2582 /* Optimization, tack on the FIN if we have a queue of
2583 * unsent frames. But be careful about outgoing SACKS
2586 mss_now = tcp_current_mss(sk);
2588 if (tcp_send_head(sk) != NULL) {
2589 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
2590 TCP_SKB_CB(skb)->end_seq++;
2593 /* Socket is locked, keep trying until memory is available. */
2595 skb = alloc_skb_fclone(MAX_TCP_HEADER,
2602 /* Reserve space for headers and prepare control bits. */
2603 skb_reserve(skb, MAX_TCP_HEADER);
2604 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2605 tcp_init_nondata_skb(skb, tp->write_seq,
2606 TCPHDR_ACK | TCPHDR_FIN);
2607 tcp_queue_skb(sk, skb);
2609 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
2612 /* We get here when a process closes a file descriptor (either due to
2613 * an explicit close() or as a byproduct of exit()'ing) and there
2614 * was unread data in the receive queue. This behavior is recommended
2615 * by RFC 2525, section 2.17. -DaveM
2617 void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2619 struct sk_buff *skb;
2621 /* NOTE: No TCP options attached and we never retransmit this. */
2622 skb = alloc_skb(MAX_TCP_HEADER, priority);
2624 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2628 /* Reserve space for headers and prepare control bits. */
2629 skb_reserve(skb, MAX_TCP_HEADER);
2630 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2631 TCPHDR_ACK | TCPHDR_RST);
2633 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2634 if (tcp_transmit_skb(sk, skb, 0, priority))
2635 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2637 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
2640 /* Send a crossed SYN-ACK during socket establishment.
2641 * WARNING: This routine must only be called when we have already sent
2642 * a SYN packet that crossed the incoming SYN that caused this routine
2643 * to get called. If this assumption fails then the initial rcv_wnd
2644 * and rcv_wscale values will not be correct.
2646 int tcp_send_synack(struct sock *sk)
2648 struct sk_buff *skb;
2650 skb = tcp_write_queue_head(sk);
2651 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2652 pr_debug("%s: wrong queue state\n", __func__);
2655 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
2656 if (skb_cloned(skb)) {
2657 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2660 tcp_unlink_write_queue(skb, sk);
2661 skb_header_release(nskb);
2662 __tcp_add_write_queue_head(sk, nskb);
2663 sk_wmem_free_skb(sk, skb);
2664 sk->sk_wmem_queued += nskb->truesize;
2665 sk_mem_charge(sk, nskb->truesize);
2669 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
2670 TCP_ECN_send_synack(tcp_sk(sk), skb);
2672 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2673 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2677 * tcp_make_synack - Prepare a SYN-ACK.
2678 * sk: listener socket
2679 * dst: dst entry attached to the SYNACK
2680 * req: request_sock pointer
2682 * Allocate one skb and build a SYNACK packet.
2683 * @dst is consumed : Caller should not use it again.
2685 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2686 struct request_sock *req,
2687 struct tcp_fastopen_cookie *foc)
2689 struct tcp_out_options opts;
2690 struct inet_request_sock *ireq = inet_rsk(req);
2691 struct tcp_sock *tp = tcp_sk(sk);
2693 struct sk_buff *skb;
2694 struct tcp_md5sig_key *md5;
2695 int tcp_header_size;
2698 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2699 if (unlikely(!skb)) {
2703 /* Reserve space for headers. */
2704 skb_reserve(skb, MAX_TCP_HEADER);
2706 skb_dst_set(skb, dst);
2707 security_skb_owned_by(skb, sk);
2709 mss = dst_metric_advmss(dst);
2710 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2711 mss = tp->rx_opt.user_mss;
2713 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2715 /* Set this up on the first call only */
2716 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2718 /* limit the window selection if the user enforce a smaller rx buffer */
2719 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2720 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2721 req->window_clamp = tcp_full_space(sk);
2723 /* tcp_full_space because it is guaranteed to be the first packet */
2724 tcp_select_initial_window(tcp_full_space(sk),
2725 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2730 dst_metric(dst, RTAX_INITRWND));
2731 ireq->rcv_wscale = rcv_wscale;
2734 memset(&opts, 0, sizeof(opts));
2735 #ifdef CONFIG_SYN_COOKIES
2736 if (unlikely(req->cookie_ts))
2737 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2740 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2741 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
2744 skb_push(skb, tcp_header_size);
2745 skb_reset_transport_header(skb);
2748 memset(th, 0, sizeof(struct tcphdr));
2751 TCP_ECN_make_synack(req, th);
2752 th->source = ireq->loc_port;
2753 th->dest = ireq->rmt_port;
2754 /* Setting of flags are superfluous here for callers (and ECE is
2755 * not even correctly set)
2757 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2758 TCPHDR_SYN | TCPHDR_ACK);
2760 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2761 /* XXX data is queued and acked as is. No buffer/window check */
2762 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
2764 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2765 th->window = htons(min(req->rcv_wnd, 65535U));
2766 tcp_options_write((__be32 *)(th + 1), tp, &opts);
2767 th->doff = (tcp_header_size >> 2);
2768 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
2770 #ifdef CONFIG_TCP_MD5SIG
2771 /* Okay, we have all we need - do the md5 hash if needed */
2773 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
2774 md5, NULL, req, skb);
2780 EXPORT_SYMBOL(tcp_make_synack);
2782 /* Do all connect socket setups that can be done AF independent. */
2783 void tcp_connect_init(struct sock *sk)
2785 const struct dst_entry *dst = __sk_dst_get(sk);
2786 struct tcp_sock *tp = tcp_sk(sk);
2789 /* We'll fix this up when we get a response from the other end.
2790 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2792 tp->tcp_header_len = sizeof(struct tcphdr) +
2793 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2795 #ifdef CONFIG_TCP_MD5SIG
2796 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2797 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2800 /* If user gave his TCP_MAXSEG, record it to clamp */
2801 if (tp->rx_opt.user_mss)
2802 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2805 tcp_sync_mss(sk, dst_mtu(dst));
2807 if (!tp->window_clamp)
2808 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2809 tp->advmss = dst_metric_advmss(dst);
2810 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2811 tp->advmss = tp->rx_opt.user_mss;
2813 tcp_initialize_rcv_mss(sk);
2815 /* limit the window selection if the user enforce a smaller rx buffer */
2816 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2817 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2818 tp->window_clamp = tcp_full_space(sk);
2820 tcp_select_initial_window(tcp_full_space(sk),
2821 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2824 sysctl_tcp_window_scaling,
2826 dst_metric(dst, RTAX_INITRWND));
2828 tp->rx_opt.rcv_wscale = rcv_wscale;
2829 tp->rcv_ssthresh = tp->rcv_wnd;
2832 sock_reset_flag(sk, SOCK_DONE);
2835 tp->snd_una = tp->write_seq;
2836 tp->snd_sml = tp->write_seq;
2837 tp->snd_up = tp->write_seq;
2838 tp->snd_nxt = tp->write_seq;
2840 if (likely(!tp->repair))
2843 tp->rcv_tstamp = tcp_time_stamp;
2844 tp->rcv_wup = tp->rcv_nxt;
2845 tp->copied_seq = tp->rcv_nxt;
2847 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2848 inet_csk(sk)->icsk_retransmits = 0;
2849 tcp_clear_retrans(tp);
2852 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2854 struct tcp_sock *tp = tcp_sk(sk);
2855 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2857 tcb->end_seq += skb->len;
2858 skb_header_release(skb);
2859 __tcp_add_write_queue_tail(sk, skb);
2860 sk->sk_wmem_queued += skb->truesize;
2861 sk_mem_charge(sk, skb->truesize);
2862 tp->write_seq = tcb->end_seq;
2863 tp->packets_out += tcp_skb_pcount(skb);
2866 /* Build and send a SYN with data and (cached) Fast Open cookie. However,
2867 * queue a data-only packet after the regular SYN, such that regular SYNs
2868 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
2869 * only the SYN sequence, the data are retransmitted in the first ACK.
2870 * If cookie is not cached or other error occurs, falls back to send a
2871 * regular SYN with Fast Open cookie request option.
2873 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2875 struct tcp_sock *tp = tcp_sk(sk);
2876 struct tcp_fastopen_request *fo = tp->fastopen_req;
2877 int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
2878 struct sk_buff *syn_data = NULL, *data;
2879 unsigned long last_syn_loss = 0;
2881 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
2882 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
2883 &syn_loss, &last_syn_loss);
2884 /* Recurring FO SYN losses: revert to regular handshake temporarily */
2886 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
2887 fo->cookie.len = -1;
2891 if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
2892 fo->cookie.len = -1;
2893 else if (fo->cookie.len <= 0)
2896 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and
2897 * user-MSS. Reserve maximum option space for middleboxes that add
2898 * private TCP options. The cost is reduced data space in SYN :(
2900 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
2901 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2902 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
2903 MAX_TCP_OPTION_SPACE;
2905 space = min_t(size_t, space, fo->size);
2907 /* limit to order-0 allocations */
2908 space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
2910 syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
2912 if (syn_data == NULL)
2915 for (i = 0; i < iovlen && syn_data->len < space; ++i) {
2916 struct iovec *iov = &fo->data->msg_iov[i];
2917 unsigned char __user *from = iov->iov_base;
2918 int len = iov->iov_len;
2920 if (syn_data->len + len > space)
2921 len = space - syn_data->len;
2922 else if (i + 1 == iovlen)
2923 /* No more data pending in inet_wait_for_connect() */
2926 if (skb_add_data(syn_data, from, len))
2930 /* Queue a data-only packet after the regular SYN for retransmission */
2931 data = pskb_copy(syn_data, sk->sk_allocation);
2934 TCP_SKB_CB(data)->seq++;
2935 TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
2936 TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
2937 tcp_connect_queue_skb(sk, data);
2938 fo->copied = data->len;
2940 if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
2941 tp->syn_data = (fo->copied > 0);
2942 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
2948 /* Send a regular SYN with Fast Open cookie request option */
2949 if (fo->cookie.len > 0)
2951 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
2953 tp->syn_fastopen = 0;
2954 kfree_skb(syn_data);
2956 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
2960 /* Build a SYN and send it off. */
2961 int tcp_connect(struct sock *sk)
2963 struct tcp_sock *tp = tcp_sk(sk);
2964 struct sk_buff *buff;
2967 tcp_connect_init(sk);
2969 if (unlikely(tp->repair)) {
2970 tcp_finish_connect(sk, NULL);
2974 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2975 if (unlikely(buff == NULL))
2978 /* Reserve space for headers. */
2979 skb_reserve(buff, MAX_TCP_HEADER);
2981 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
2982 tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
2983 tcp_connect_queue_skb(sk, buff);
2984 TCP_ECN_send_syn(sk, buff);
2986 /* Send off SYN; include data in Fast Open. */
2987 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
2988 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2989 if (err == -ECONNREFUSED)
2992 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2993 * in order to make this packet get counted in tcpOutSegs.
2995 tp->snd_nxt = tp->write_seq;
2996 tp->pushed_seq = tp->write_seq;
2997 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
2999 /* Timer for repeating the SYN until an answer. */
3000 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3001 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
3004 EXPORT_SYMBOL(tcp_connect);
3006 /* Send out a delayed ack, the caller does the policy checking
3007 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
3010 void tcp_send_delayed_ack(struct sock *sk)
3012 struct inet_connection_sock *icsk = inet_csk(sk);
3013 int ato = icsk->icsk_ack.ato;
3014 unsigned long timeout;
3016 if (ato > TCP_DELACK_MIN) {
3017 const struct tcp_sock *tp = tcp_sk(sk);
3018 int max_ato = HZ / 2;
3020 if (icsk->icsk_ack.pingpong ||
3021 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
3022 max_ato = TCP_DELACK_MAX;
3024 /* Slow path, intersegment interval is "high". */
3026 /* If some rtt estimate is known, use it to bound delayed ack.
3027 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
3031 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
3037 ato = min(ato, max_ato);
3040 /* Stay within the limit we were given */
3041 timeout = jiffies + ato;
3043 /* Use new timeout only if there wasn't a older one earlier. */
3044 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
3045 /* If delack timer was blocked or is about to expire,
3048 if (icsk->icsk_ack.blocked ||
3049 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
3054 if (!time_before(timeout, icsk->icsk_ack.timeout))
3055 timeout = icsk->icsk_ack.timeout;
3057 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3058 icsk->icsk_ack.timeout = timeout;
3059 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
3062 /* This routine sends an ack and also updates the window. */
3063 void tcp_send_ack(struct sock *sk)
3065 struct sk_buff *buff;
3067 /* If we have been reset, we may not send again. */
3068 if (sk->sk_state == TCP_CLOSE)
3071 /* We are not putting this on the write queue, so
3072 * tcp_transmit_skb() will set the ownership to this
3075 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
3077 inet_csk_schedule_ack(sk);
3078 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
3079 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
3080 TCP_DELACK_MAX, TCP_RTO_MAX);
3084 /* Reserve space for headers and prepare control bits. */
3085 skb_reserve(buff, MAX_TCP_HEADER);
3086 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
3088 /* Send it off, this clears delayed acks for us. */
3089 TCP_SKB_CB(buff)->when = tcp_time_stamp;
3090 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
3093 /* This routine sends a packet with an out of date sequence
3094 * number. It assumes the other end will try to ack it.
3096 * Question: what should we make while urgent mode?
3097 * 4.4BSD forces sending single byte of data. We cannot send
3098 * out of window data, because we have SND.NXT==SND.MAX...
3100 * Current solution: to send TWO zero-length segments in urgent mode:
3101 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
3102 * out-of-date with SND.UNA-1 to probe window.
3104 static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
3106 struct tcp_sock *tp = tcp_sk(sk);
3107 struct sk_buff *skb;
3109 /* We don't queue it, tcp_transmit_skb() sets ownership. */
3110 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
3114 /* Reserve space for headers and set control bits. */
3115 skb_reserve(skb, MAX_TCP_HEADER);
3116 /* Use a previous sequence. This should cause the other
3117 * end to send an ack. Don't queue or clone SKB, just
3120 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3121 TCP_SKB_CB(skb)->when = tcp_time_stamp;
3122 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
3125 void tcp_send_window_probe(struct sock *sk)
3127 if (sk->sk_state == TCP_ESTABLISHED) {
3128 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3129 tcp_xmit_probe_skb(sk, 0);
3133 /* Initiate keepalive or window probe from timer. */
3134 int tcp_write_wakeup(struct sock *sk)
3136 struct tcp_sock *tp = tcp_sk(sk);
3137 struct sk_buff *skb;
3139 if (sk->sk_state == TCP_CLOSE)
3142 if ((skb = tcp_send_head(sk)) != NULL &&
3143 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
3145 unsigned int mss = tcp_current_mss(sk);
3146 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
3148 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
3149 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
3151 /* We are probing the opening of a window
3152 * but the window size is != 0
3153 * must have been a result SWS avoidance ( sender )
3155 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
3157 seg_size = min(seg_size, mss);
3158 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3159 if (tcp_fragment(sk, skb, seg_size, mss))
3161 } else if (!tcp_skb_pcount(skb))
3162 tcp_set_skb_tso_segs(sk, skb, mss);
3164 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3165 TCP_SKB_CB(skb)->when = tcp_time_stamp;
3166 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3168 tcp_event_new_data_sent(sk, skb);
3171 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3172 tcp_xmit_probe_skb(sk, 1);
3173 return tcp_xmit_probe_skb(sk, 0);
3177 /* A window probe timeout has occurred. If window is not closed send
3178 * a partial packet else a zero probe.
3180 void tcp_send_probe0(struct sock *sk)
3182 struct inet_connection_sock *icsk = inet_csk(sk);
3183 struct tcp_sock *tp = tcp_sk(sk);
3186 err = tcp_write_wakeup(sk);
3188 if (tp->packets_out || !tcp_send_head(sk)) {
3189 /* Cancel probe timer, if it is not required. */
3190 icsk->icsk_probes_out = 0;
3191 icsk->icsk_backoff = 0;
3196 if (icsk->icsk_backoff < sysctl_tcp_retries2)
3197 icsk->icsk_backoff++;
3198 icsk->icsk_probes_out++;
3199 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3200 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
3203 /* If packet was not sent due to local congestion,
3204 * do not backoff and do not remember icsk_probes_out.
3205 * Let local senders to fight for local resources.
3207 * Use accumulated backoff yet.
3209 if (!icsk->icsk_probes_out)
3210 icsk->icsk_probes_out = 1;
3211 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3212 min(icsk->icsk_rto << icsk->icsk_backoff,
3213 TCP_RESOURCE_PROBE_INTERVAL),