1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * These functions implement the sctp_outq class. The outqueue handles
10 * bundling and queueing of outgoing SCTP chunks.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
29 * Please send any bug reports or fixes you make to the
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Perry Melange <pmelange@null.cc.uic.edu>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Jon Grimm <jgrimm@us.ibm.com>
45 * Any bugs reported given to us we will try to fix... any fixes shared will
46 * be incorporated into the next SCTP release.
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #include <linux/types.h>
52 #include <linux/list.h> /* For struct list_head */
53 #include <linux/socket.h>
55 #include <linux/slab.h>
56 #include <net/sock.h> /* For skb_set_owner_w */
58 #include <net/sctp/sctp.h>
59 #include <net/sctp/sm.h>
61 /* Declare internal functions here. */
62 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
63 static void sctp_check_transmitted(struct sctp_outq *q,
64 struct list_head *transmitted_queue,
65 struct sctp_transport *transport,
66 union sctp_addr *saddr,
67 struct sctp_sackhdr *sack,
68 __u32 *highest_new_tsn);
70 static void sctp_mark_missing(struct sctp_outq *q,
71 struct list_head *transmitted_queue,
72 struct sctp_transport *transport,
73 __u32 highest_new_tsn,
74 int count_of_newacks);
76 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
78 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
80 /* Add data to the front of the queue. */
81 static inline void sctp_outq_head_data(struct sctp_outq *q,
82 struct sctp_chunk *ch)
84 list_add(&ch->list, &q->out_chunk_list);
85 q->out_qlen += ch->skb->len;
88 /* Take data from the front of the queue. */
89 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
91 struct sctp_chunk *ch = NULL;
93 if (!list_empty(&q->out_chunk_list)) {
94 struct list_head *entry = q->out_chunk_list.next;
96 ch = list_entry(entry, struct sctp_chunk, list);
98 q->out_qlen -= ch->skb->len;
102 /* Add data chunk to the end of the queue. */
103 static inline void sctp_outq_tail_data(struct sctp_outq *q,
104 struct sctp_chunk *ch)
106 list_add_tail(&ch->list, &q->out_chunk_list);
107 q->out_qlen += ch->skb->len;
111 * SFR-CACC algorithm:
112 * D) If count_of_newacks is greater than or equal to 2
113 * and t was not sent to the current primary then the
114 * sender MUST NOT increment missing report count for t.
116 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
117 struct sctp_transport *transport,
118 int count_of_newacks)
120 if (count_of_newacks >=2 && transport != primary)
126 * SFR-CACC algorithm:
127 * F) If count_of_newacks is less than 2, let d be the
128 * destination to which t was sent. If cacc_saw_newack
129 * is 0 for destination d, then the sender MUST NOT
130 * increment missing report count for t.
132 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
133 int count_of_newacks)
135 if (count_of_newacks < 2 &&
136 (transport && !transport->cacc.cacc_saw_newack))
142 * SFR-CACC algorithm:
143 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
144 * execute steps C, D, F.
146 * C has been implemented in sctp_outq_sack
148 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
149 struct sctp_transport *transport,
150 int count_of_newacks)
152 if (!primary->cacc.cycling_changeover) {
153 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
155 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
163 * SFR-CACC algorithm:
164 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
165 * than next_tsn_at_change of the current primary, then
166 * the sender MUST NOT increment missing report count
169 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
171 if (primary->cacc.cycling_changeover &&
172 TSN_lt(tsn, primary->cacc.next_tsn_at_change))
178 * SFR-CACC algorithm:
179 * 3) If the missing report count for TSN t is to be
180 * incremented according to [RFC2960] and
181 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
182 * then the sender MUST further execute steps 3.1 and
183 * 3.2 to determine if the missing report count for
184 * TSN t SHOULD NOT be incremented.
186 * 3.3) If 3.1 and 3.2 do not dictate that the missing
187 * report count for t should not be incremented, then
188 * the sender SHOULD increment missing report count for
189 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
191 static inline int sctp_cacc_skip(struct sctp_transport *primary,
192 struct sctp_transport *transport,
193 int count_of_newacks,
196 if (primary->cacc.changeover_active &&
197 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
198 sctp_cacc_skip_3_2(primary, tsn)))
203 /* Initialize an existing sctp_outq. This does the boring stuff.
204 * You still need to define handlers if you really want to DO
205 * something with this structure...
207 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
209 memset(q, 0, sizeof(struct sctp_outq));
212 INIT_LIST_HEAD(&q->out_chunk_list);
213 INIT_LIST_HEAD(&q->control_chunk_list);
214 INIT_LIST_HEAD(&q->retransmit);
215 INIT_LIST_HEAD(&q->sacked);
216 INIT_LIST_HEAD(&q->abandoned);
221 /* Free the outqueue structure and any related pending chunks.
223 static void __sctp_outq_teardown(struct sctp_outq *q)
225 struct sctp_transport *transport;
226 struct list_head *lchunk, *temp;
227 struct sctp_chunk *chunk, *tmp;
229 /* Throw away unacknowledged chunks. */
230 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
232 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
233 chunk = list_entry(lchunk, struct sctp_chunk,
235 /* Mark as part of a failed message. */
236 sctp_chunk_fail(chunk, q->error);
237 sctp_chunk_free(chunk);
241 /* Throw away chunks that have been gap ACKed. */
242 list_for_each_safe(lchunk, temp, &q->sacked) {
243 list_del_init(lchunk);
244 chunk = list_entry(lchunk, struct sctp_chunk,
246 sctp_chunk_fail(chunk, q->error);
247 sctp_chunk_free(chunk);
250 /* Throw away any chunks in the retransmit queue. */
251 list_for_each_safe(lchunk, temp, &q->retransmit) {
252 list_del_init(lchunk);
253 chunk = list_entry(lchunk, struct sctp_chunk,
255 sctp_chunk_fail(chunk, q->error);
256 sctp_chunk_free(chunk);
259 /* Throw away any chunks that are in the abandoned queue. */
260 list_for_each_safe(lchunk, temp, &q->abandoned) {
261 list_del_init(lchunk);
262 chunk = list_entry(lchunk, struct sctp_chunk,
264 sctp_chunk_fail(chunk, q->error);
265 sctp_chunk_free(chunk);
268 /* Throw away any leftover data chunks. */
269 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
271 /* Mark as send failure. */
272 sctp_chunk_fail(chunk, q->error);
273 sctp_chunk_free(chunk);
276 /* Throw away any leftover control chunks. */
277 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
278 list_del_init(&chunk->list);
279 sctp_chunk_free(chunk);
283 void sctp_outq_teardown(struct sctp_outq *q)
285 __sctp_outq_teardown(q);
286 sctp_outq_init(q->asoc, q);
289 /* Free the outqueue structure and any related pending chunks. */
290 void sctp_outq_free(struct sctp_outq *q)
292 /* Throw away leftover chunks. */
293 __sctp_outq_teardown(q);
296 /* Put a new chunk in an sctp_outq. */
297 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
299 struct net *net = sock_net(q->asoc->base.sk);
302 pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
303 chunk && chunk->chunk_hdr ?
304 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
307 /* If it is data, queue it up, otherwise, send it
310 if (sctp_chunk_is_data(chunk)) {
311 /* Is it OK to queue data chunks? */
312 /* From 9. Termination of Association
314 * When either endpoint performs a shutdown, the
315 * association on each peer will stop accepting new
316 * data from its user and only deliver data in queue
317 * at the time of sending or receiving the SHUTDOWN
320 switch (q->asoc->state) {
321 case SCTP_STATE_CLOSED:
322 case SCTP_STATE_SHUTDOWN_PENDING:
323 case SCTP_STATE_SHUTDOWN_SENT:
324 case SCTP_STATE_SHUTDOWN_RECEIVED:
325 case SCTP_STATE_SHUTDOWN_ACK_SENT:
326 /* Cannot send after transport endpoint shutdown */
331 pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
332 __func__, q, chunk, chunk && chunk->chunk_hdr ?
333 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
336 sctp_outq_tail_data(q, chunk);
337 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
338 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
340 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
345 list_add_tail(&chunk->list, &q->control_chunk_list);
346 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
353 error = sctp_outq_flush(q, 0);
358 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
359 * and the abandoned list are in ascending order.
361 static void sctp_insert_list(struct list_head *head, struct list_head *new)
363 struct list_head *pos;
364 struct sctp_chunk *nchunk, *lchunk;
368 nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
369 ntsn = ntohl(nchunk->subh.data_hdr->tsn);
371 list_for_each(pos, head) {
372 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
373 ltsn = ntohl(lchunk->subh.data_hdr->tsn);
374 if (TSN_lt(ntsn, ltsn)) {
375 list_add(new, pos->prev);
381 list_add_tail(new, head);
384 /* Mark all the eligible packets on a transport for retransmission. */
385 void sctp_retransmit_mark(struct sctp_outq *q,
386 struct sctp_transport *transport,
389 struct list_head *lchunk, *ltemp;
390 struct sctp_chunk *chunk;
392 /* Walk through the specified transmitted queue. */
393 list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
394 chunk = list_entry(lchunk, struct sctp_chunk,
397 /* If the chunk is abandoned, move it to abandoned list. */
398 if (sctp_chunk_abandoned(chunk)) {
399 list_del_init(lchunk);
400 sctp_insert_list(&q->abandoned, lchunk);
402 /* If this chunk has not been previousely acked,
403 * stop considering it 'outstanding'. Our peer
404 * will most likely never see it since it will
405 * not be retransmitted
407 if (!chunk->tsn_gap_acked) {
408 if (chunk->transport)
409 chunk->transport->flight_size -=
410 sctp_data_size(chunk);
411 q->outstanding_bytes -= sctp_data_size(chunk);
412 q->asoc->peer.rwnd += sctp_data_size(chunk);
417 /* If we are doing retransmission due to a timeout or pmtu
418 * discovery, only the chunks that are not yet acked should
419 * be added to the retransmit queue.
421 if ((reason == SCTP_RTXR_FAST_RTX &&
422 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
423 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
424 /* RFC 2960 6.2.1 Processing a Received SACK
426 * C) Any time a DATA chunk is marked for
427 * retransmission (via either T3-rtx timer expiration
428 * (Section 6.3.3) or via fast retransmit
429 * (Section 7.2.4)), add the data size of those
430 * chunks to the rwnd.
432 q->asoc->peer.rwnd += sctp_data_size(chunk);
433 q->outstanding_bytes -= sctp_data_size(chunk);
434 if (chunk->transport)
435 transport->flight_size -= sctp_data_size(chunk);
437 /* sctpimpguide-05 Section 2.8.2
438 * M5) If a T3-rtx timer expires, the
439 * 'TSN.Missing.Report' of all affected TSNs is set
442 chunk->tsn_missing_report = 0;
444 /* If a chunk that is being used for RTT measurement
445 * has to be retransmitted, we cannot use this chunk
446 * anymore for RTT measurements. Reset rto_pending so
447 * that a new RTT measurement is started when a new
448 * data chunk is sent.
450 if (chunk->rtt_in_progress) {
451 chunk->rtt_in_progress = 0;
452 transport->rto_pending = 0;
455 /* Move the chunk to the retransmit queue. The chunks
456 * on the retransmit queue are always kept in order.
458 list_del_init(lchunk);
459 sctp_insert_list(&q->retransmit, lchunk);
463 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
464 "flight_size:%d, pba:%d\n", __func__, transport, reason,
465 transport->cwnd, transport->ssthresh, transport->flight_size,
466 transport->partial_bytes_acked);
469 /* Mark all the eligible packets on a transport for retransmission and force
472 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
473 sctp_retransmit_reason_t reason)
475 struct net *net = sock_net(q->asoc->base.sk);
479 case SCTP_RTXR_T3_RTX:
480 SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
481 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
482 /* Update the retran path if the T3-rtx timer has expired for
483 * the current retran path.
485 if (transport == transport->asoc->peer.retran_path)
486 sctp_assoc_update_retran_path(transport->asoc);
487 transport->asoc->rtx_data_chunks +=
488 transport->asoc->unack_data;
490 case SCTP_RTXR_FAST_RTX:
491 SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
492 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
495 case SCTP_RTXR_PMTUD:
496 SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
498 case SCTP_RTXR_T1_RTX:
499 SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
500 transport->asoc->init_retries++;
506 sctp_retransmit_mark(q, transport, reason);
508 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
509 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
510 * following the procedures outlined in C1 - C5.
512 if (reason == SCTP_RTXR_T3_RTX)
513 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
515 /* Flush the queues only on timeout, since fast_rtx is only
516 * triggered during sack processing and the queue
517 * will be flushed at the end.
519 if (reason != SCTP_RTXR_FAST_RTX)
520 error = sctp_outq_flush(q, /* rtx_timeout */ 1);
523 q->asoc->base.sk->sk_err = -error;
527 * Transmit DATA chunks on the retransmit queue. Upon return from
528 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
529 * need to be transmitted by the caller.
530 * We assume that pkt->transport has already been set.
532 * The return value is a normal kernel error return value.
534 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
535 int rtx_timeout, int *start_timer)
537 struct list_head *lqueue;
538 struct sctp_transport *transport = pkt->transport;
540 struct sctp_chunk *chunk, *chunk1;
546 lqueue = &q->retransmit;
547 fast_rtx = q->fast_rtx;
549 /* This loop handles time-out retransmissions, fast retransmissions,
550 * and retransmissions due to opening of whindow.
552 * RFC 2960 6.3.3 Handle T3-rtx Expiration
554 * E3) Determine how many of the earliest (i.e., lowest TSN)
555 * outstanding DATA chunks for the address for which the
556 * T3-rtx has expired will fit into a single packet, subject
557 * to the MTU constraint for the path corresponding to the
558 * destination transport address to which the retransmission
559 * is being sent (this may be different from the address for
560 * which the timer expires [see Section 6.4]). Call this value
561 * K. Bundle and retransmit those K DATA chunks in a single
562 * packet to the destination endpoint.
564 * [Just to be painfully clear, if we are retransmitting
565 * because a timeout just happened, we should send only ONE
566 * packet of retransmitted data.]
568 * For fast retransmissions we also send only ONE packet. However,
569 * if we are just flushing the queue due to open window, we'll
570 * try to send as much as possible.
572 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
573 /* If the chunk is abandoned, move it to abandoned list. */
574 if (sctp_chunk_abandoned(chunk)) {
575 list_del_init(&chunk->transmitted_list);
576 sctp_insert_list(&q->abandoned,
577 &chunk->transmitted_list);
581 /* Make sure that Gap Acked TSNs are not retransmitted. A
582 * simple approach is just to move such TSNs out of the
583 * way and into a 'transmitted' queue and skip to the
586 if (chunk->tsn_gap_acked) {
587 list_move_tail(&chunk->transmitted_list,
588 &transport->transmitted);
592 /* If we are doing fast retransmit, ignore non-fast_rtransmit
595 if (fast_rtx && !chunk->fast_retransmit)
599 /* Attempt to append this chunk to the packet. */
600 status = sctp_packet_append_chunk(pkt, chunk);
603 case SCTP_XMIT_PMTU_FULL:
604 if (!pkt->has_data && !pkt->has_cookie_echo) {
605 /* If this packet did not contain DATA then
606 * retransmission did not happen, so do it
607 * again. We'll ignore the error here since
608 * control chunks are already freed so there
609 * is nothing we can do.
611 sctp_packet_transmit(pkt);
615 /* Send this packet. */
616 error = sctp_packet_transmit(pkt);
618 /* If we are retransmitting, we should only
619 * send a single packet.
620 * Otherwise, try appending this chunk again.
622 if (rtx_timeout || fast_rtx)
627 /* Bundle next chunk in the next round. */
630 case SCTP_XMIT_RWND_FULL:
631 /* Send this packet. */
632 error = sctp_packet_transmit(pkt);
634 /* Stop sending DATA as there is no more room
640 case SCTP_XMIT_NAGLE_DELAY:
641 /* Send this packet. */
642 error = sctp_packet_transmit(pkt);
644 /* Stop sending DATA because of nagle delay. */
649 /* The append was successful, so add this chunk to
650 * the transmitted list.
652 list_move_tail(&chunk->transmitted_list,
653 &transport->transmitted);
655 /* Mark the chunk as ineligible for fast retransmit
656 * after it is retransmitted.
658 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
659 chunk->fast_retransmit = SCTP_DONT_FRTX;
662 q->asoc->stats.rtxchunks++;
666 /* Set the timer if there were no errors */
667 if (!error && !timer)
674 /* If we are here due to a retransmit timeout or a fast
675 * retransmit and if there are any chunks left in the retransmit
676 * queue that could not fit in the PMTU sized packet, they need
677 * to be marked as ineligible for a subsequent fast retransmit.
679 if (rtx_timeout || fast_rtx) {
680 list_for_each_entry(chunk1, lqueue, transmitted_list) {
681 if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
682 chunk1->fast_retransmit = SCTP_DONT_FRTX;
686 *start_timer = timer;
688 /* Clear fast retransmit hint */
695 /* Cork the outqueue so queued chunks are really queued. */
696 int sctp_outq_uncork(struct sctp_outq *q)
701 return sctp_outq_flush(q, 0);
706 * Try to flush an outqueue.
708 * Description: Send everything in q which we legally can, subject to
709 * congestion limitations.
710 * * Note: This function can be called from multiple contexts so appropriate
711 * locking concerns must be made. Today we use the sock lock to protect
714 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
716 struct sctp_packet *packet;
717 struct sctp_packet singleton;
718 struct sctp_association *asoc = q->asoc;
719 __u16 sport = asoc->base.bind_addr.port;
720 __u16 dport = asoc->peer.port;
721 __u32 vtag = asoc->peer.i.init_tag;
722 struct sctp_transport *transport = NULL;
723 struct sctp_transport *new_transport;
724 struct sctp_chunk *chunk, *tmp;
730 /* These transports have chunks to send. */
731 struct list_head transport_list;
732 struct list_head *ltransport;
734 INIT_LIST_HEAD(&transport_list);
740 * When bundling control chunks with DATA chunks, an
741 * endpoint MUST place control chunks first in the outbound
742 * SCTP packet. The transmitter MUST transmit DATA chunks
743 * within a SCTP packet in increasing order of TSN.
747 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
749 * F1) This means that until such time as the ASCONF
750 * containing the add is acknowledged, the sender MUST
751 * NOT use the new IP address as a source for ANY SCTP
752 * packet except on carrying an ASCONF Chunk.
754 if (asoc->src_out_of_asoc_ok &&
755 chunk->chunk_hdr->type != SCTP_CID_ASCONF)
758 list_del_init(&chunk->list);
760 /* Pick the right transport to use. */
761 new_transport = chunk->transport;
763 if (!new_transport) {
765 * If we have a prior transport pointer, see if
766 * the destination address of the chunk
767 * matches the destination address of the
768 * current transport. If not a match, then
769 * try to look up the transport with a given
770 * destination address. We do this because
771 * after processing ASCONFs, we may have new
772 * transports created.
775 sctp_cmp_addr_exact(&chunk->dest,
777 new_transport = transport;
779 new_transport = sctp_assoc_lookup_paddr(asoc,
782 /* if we still don't have a new transport, then
783 * use the current active path.
786 new_transport = asoc->peer.active_path;
787 } else if ((new_transport->state == SCTP_INACTIVE) ||
788 (new_transport->state == SCTP_UNCONFIRMED) ||
789 (new_transport->state == SCTP_PF)) {
790 /* If the chunk is Heartbeat or Heartbeat Ack,
791 * send it to chunk->transport, even if it's
794 * 3.3.6 Heartbeat Acknowledgement:
796 * A HEARTBEAT ACK is always sent to the source IP
797 * address of the IP datagram containing the
798 * HEARTBEAT chunk to which this ack is responding.
801 * ASCONF_ACKs also must be sent to the source.
803 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
804 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
805 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
806 new_transport = asoc->peer.active_path;
809 /* Are we switching transports?
810 * Take care of transport locks.
812 if (new_transport != transport) {
813 transport = new_transport;
814 if (list_empty(&transport->send_ready)) {
815 list_add_tail(&transport->send_ready,
818 packet = &transport->packet;
819 sctp_packet_config(packet, vtag,
820 asoc->peer.ecn_capable);
823 switch (chunk->chunk_hdr->type) {
827 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
828 * COMPLETE with any other chunks. [Send them immediately.]
831 case SCTP_CID_INIT_ACK:
832 case SCTP_CID_SHUTDOWN_COMPLETE:
833 sctp_packet_init(&singleton, transport, sport, dport);
834 sctp_packet_config(&singleton, vtag, 0);
835 sctp_packet_append_chunk(&singleton, chunk);
836 error = sctp_packet_transmit(&singleton);
842 if (sctp_test_T_bit(chunk)) {
843 packet->vtag = asoc->c.my_vtag;
845 /* The following chunks are "response" chunks, i.e.
846 * they are generated in response to something we
847 * received. If we are sending these, then we can
848 * send only 1 packet containing these chunks.
850 case SCTP_CID_HEARTBEAT_ACK:
851 case SCTP_CID_SHUTDOWN_ACK:
852 case SCTP_CID_COOKIE_ACK:
853 case SCTP_CID_COOKIE_ECHO:
855 case SCTP_CID_ECN_CWR:
856 case SCTP_CID_ASCONF_ACK:
861 case SCTP_CID_HEARTBEAT:
862 case SCTP_CID_SHUTDOWN:
863 case SCTP_CID_ECN_ECNE:
864 case SCTP_CID_ASCONF:
865 case SCTP_CID_FWD_TSN:
866 status = sctp_packet_transmit_chunk(packet, chunk,
868 if (status != SCTP_XMIT_OK) {
869 /* put the chunk back */
870 list_add(&chunk->list, &q->control_chunk_list);
872 asoc->stats.octrlchunks++;
873 /* PR-SCTP C5) If a FORWARD TSN is sent, the
874 * sender MUST assure that at least one T3-rtx
877 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
878 sctp_transport_reset_timers(transport);
883 /* We built a chunk with an illegal type! */
888 if (q->asoc->src_out_of_asoc_ok)
891 /* Is it OK to send data chunks? */
892 switch (asoc->state) {
893 case SCTP_STATE_COOKIE_ECHOED:
894 /* Only allow bundling when this packet has a COOKIE-ECHO
897 if (!packet || !packet->has_cookie_echo)
901 case SCTP_STATE_ESTABLISHED:
902 case SCTP_STATE_SHUTDOWN_PENDING:
903 case SCTP_STATE_SHUTDOWN_RECEIVED:
905 * RFC 2960 6.1 Transmission of DATA Chunks
907 * C) When the time comes for the sender to transmit,
908 * before sending new DATA chunks, the sender MUST
909 * first transmit any outstanding DATA chunks which
910 * are marked for retransmission (limited by the
913 if (!list_empty(&q->retransmit)) {
914 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
916 if (transport == asoc->peer.retran_path)
919 /* Switch transports & prepare the packet. */
921 transport = asoc->peer.retran_path;
923 if (list_empty(&transport->send_ready)) {
924 list_add_tail(&transport->send_ready,
928 packet = &transport->packet;
929 sctp_packet_config(packet, vtag,
930 asoc->peer.ecn_capable);
932 error = sctp_outq_flush_rtx(q, packet,
933 rtx_timeout, &start_timer);
936 sctp_transport_reset_timers(transport);
938 /* This can happen on COOKIE-ECHO resend. Only
939 * one chunk can get bundled with a COOKIE-ECHO.
941 if (packet->has_cookie_echo)
944 /* Don't send new data if there is still data
945 * waiting to retransmit.
947 if (!list_empty(&q->retransmit))
951 /* Apply Max.Burst limitation to the current transport in
952 * case it will be used for new data. We are going to
953 * rest it before we return, but we want to apply the limit
954 * to the currently queued data.
957 sctp_transport_burst_limited(transport);
959 /* Finally, transmit new packets. */
960 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
961 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
964 if (chunk->sinfo.sinfo_stream >=
965 asoc->c.sinit_num_ostreams) {
967 /* Mark as failed send. */
968 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
969 sctp_chunk_free(chunk);
973 /* Has this chunk expired? */
974 if (sctp_chunk_abandoned(chunk)) {
975 sctp_chunk_fail(chunk, 0);
976 sctp_chunk_free(chunk);
980 /* If there is a specified transport, use it.
981 * Otherwise, we want to use the active path.
983 new_transport = chunk->transport;
984 if (!new_transport ||
985 ((new_transport->state == SCTP_INACTIVE) ||
986 (new_transport->state == SCTP_UNCONFIRMED) ||
987 (new_transport->state == SCTP_PF)))
988 new_transport = asoc->peer.active_path;
989 if (new_transport->state == SCTP_UNCONFIRMED)
992 /* Change packets if necessary. */
993 if (new_transport != transport) {
994 transport = new_transport;
996 /* Schedule to have this transport's
999 if (list_empty(&transport->send_ready)) {
1000 list_add_tail(&transport->send_ready,
1004 packet = &transport->packet;
1005 sctp_packet_config(packet, vtag,
1006 asoc->peer.ecn_capable);
1007 /* We've switched transports, so apply the
1008 * Burst limit to the new transport.
1010 sctp_transport_burst_limited(transport);
1013 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
1015 __func__, q, chunk, chunk && chunk->chunk_hdr ?
1016 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1017 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1018 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1019 atomic_read(&chunk->skb->users) : -1);
1021 /* Add the chunk to the packet. */
1022 status = sctp_packet_transmit_chunk(packet, chunk, 0);
1025 case SCTP_XMIT_PMTU_FULL:
1026 case SCTP_XMIT_RWND_FULL:
1027 case SCTP_XMIT_NAGLE_DELAY:
1028 /* We could not append this chunk, so put
1029 * the chunk back on the output queue.
1031 pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1032 __func__, ntohl(chunk->subh.data_hdr->tsn),
1035 sctp_outq_head_data(q, chunk);
1036 goto sctp_flush_out;
1040 /* The sender is in the SHUTDOWN-PENDING state,
1041 * The sender MAY set the I-bit in the DATA
1044 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1045 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1046 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1047 asoc->stats.ouodchunks++;
1049 asoc->stats.oodchunks++;
1057 /* BUG: We assume that the sctp_packet_transmit()
1058 * call below will succeed all the time and add the
1059 * chunk to the transmitted list and restart the
1061 * It is possible that the call can fail under OOM
1064 * Is this really a problem? Won't this behave
1067 list_add_tail(&chunk->transmitted_list,
1068 &transport->transmitted);
1070 sctp_transport_reset_timers(transport);
1074 /* Only let one DATA chunk get bundled with a
1075 * COOKIE-ECHO chunk.
1077 if (packet->has_cookie_echo)
1078 goto sctp_flush_out;
1089 /* Before returning, examine all the transports touched in
1090 * this call. Right now, we bluntly force clear all the
1091 * transports. Things might change after we implement Nagle.
1092 * But such an examination is still required.
1096 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
1097 struct sctp_transport *t = list_entry(ltransport,
1098 struct sctp_transport,
1100 packet = &t->packet;
1101 if (!sctp_packet_empty(packet))
1102 error = sctp_packet_transmit(packet);
1104 /* Clear the burst limited state, if any */
1105 sctp_transport_burst_reset(t);
1111 /* Update unack_data based on the incoming SACK chunk */
1112 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1113 struct sctp_sackhdr *sack)
1115 sctp_sack_variable_t *frags;
1119 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1121 frags = sack->variable;
1122 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1123 unack_data -= ((ntohs(frags[i].gab.end) -
1124 ntohs(frags[i].gab.start) + 1));
1127 assoc->unack_data = unack_data;
1130 /* This is where we REALLY process a SACK.
1132 * Process the SACK against the outqueue. Mostly, this just frees
1133 * things off the transmitted queue.
1135 int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1137 struct sctp_association *asoc = q->asoc;
1138 struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1139 struct sctp_transport *transport;
1140 struct sctp_chunk *tchunk = NULL;
1141 struct list_head *lchunk, *transport_list, *temp;
1142 sctp_sack_variable_t *frags = sack->variable;
1143 __u32 sack_ctsn, ctsn, tsn;
1144 __u32 highest_tsn, highest_new_tsn;
1146 unsigned int outstanding;
1147 struct sctp_transport *primary = asoc->peer.primary_path;
1148 int count_of_newacks = 0;
1152 /* Grab the association's destination address list. */
1153 transport_list = &asoc->peer.transport_addr_list;
1155 sack_ctsn = ntohl(sack->cum_tsn_ack);
1156 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1157 asoc->stats.gapcnt += gap_ack_blocks;
1159 * SFR-CACC algorithm:
1160 * On receipt of a SACK the sender SHOULD execute the
1161 * following statements.
1163 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1164 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1165 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1167 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1168 * is set the receiver of the SACK MUST take the following actions:
1170 * A) Initialize the cacc_saw_newack to 0 for all destination
1173 * Only bother if changeover_active is set. Otherwise, this is
1174 * totally suboptimal to do on every SACK.
1176 if (primary->cacc.changeover_active) {
1177 u8 clear_cycling = 0;
1179 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1180 primary->cacc.changeover_active = 0;
1184 if (clear_cycling || gap_ack_blocks) {
1185 list_for_each_entry(transport, transport_list,
1188 transport->cacc.cycling_changeover = 0;
1190 transport->cacc.cacc_saw_newack = 0;
1195 /* Get the highest TSN in the sack. */
1196 highest_tsn = sack_ctsn;
1198 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1200 if (TSN_lt(asoc->highest_sacked, highest_tsn))
1201 asoc->highest_sacked = highest_tsn;
1203 highest_new_tsn = sack_ctsn;
1205 /* Run through the retransmit queue. Credit bytes received
1206 * and free those chunks that we can.
1208 sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1210 /* Run through the transmitted queue.
1211 * Credit bytes received and free those chunks which we can.
1213 * This is a MASSIVE candidate for optimization.
1215 list_for_each_entry(transport, transport_list, transports) {
1216 sctp_check_transmitted(q, &transport->transmitted,
1217 transport, &chunk->source, sack,
1220 * SFR-CACC algorithm:
1221 * C) Let count_of_newacks be the number of
1222 * destinations for which cacc_saw_newack is set.
1224 if (transport->cacc.cacc_saw_newack)
1225 count_of_newacks ++;
1228 /* Move the Cumulative TSN Ack Point if appropriate. */
1229 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1230 asoc->ctsn_ack_point = sack_ctsn;
1234 if (gap_ack_blocks) {
1236 if (asoc->fast_recovery && accum_moved)
1237 highest_new_tsn = highest_tsn;
1239 list_for_each_entry(transport, transport_list, transports)
1240 sctp_mark_missing(q, &transport->transmitted, transport,
1241 highest_new_tsn, count_of_newacks);
1244 /* Update unack_data field in the assoc. */
1245 sctp_sack_update_unack_data(asoc, sack);
1247 ctsn = asoc->ctsn_ack_point;
1249 /* Throw away stuff rotting on the sack queue. */
1250 list_for_each_safe(lchunk, temp, &q->sacked) {
1251 tchunk = list_entry(lchunk, struct sctp_chunk,
1253 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1254 if (TSN_lte(tsn, ctsn)) {
1255 list_del_init(&tchunk->transmitted_list);
1256 sctp_chunk_free(tchunk);
1260 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1261 * number of bytes still outstanding after processing the
1262 * Cumulative TSN Ack and the Gap Ack Blocks.
1265 sack_a_rwnd = ntohl(sack->a_rwnd);
1266 outstanding = q->outstanding_bytes;
1268 if (outstanding < sack_a_rwnd)
1269 sack_a_rwnd -= outstanding;
1273 asoc->peer.rwnd = sack_a_rwnd;
1275 sctp_generate_fwdtsn(q, sack_ctsn);
1277 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
1278 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1279 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1280 asoc->adv_peer_ack_point);
1282 /* See if all chunks are acked.
1283 * Make sure the empty queue handler will get run later.
1285 q->empty = (list_empty(&q->out_chunk_list) &&
1286 list_empty(&q->retransmit));
1290 list_for_each_entry(transport, transport_list, transports) {
1291 q->empty = q->empty && list_empty(&transport->transmitted);
1296 pr_debug("%s: sack queue is empty\n", __func__);
1301 /* Is the outqueue empty? */
1302 int sctp_outq_is_empty(const struct sctp_outq *q)
1307 /********************************************************************
1308 * 2nd Level Abstractions
1309 ********************************************************************/
1311 /* Go through a transport's transmitted list or the association's retransmit
1312 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1313 * The retransmit list will not have an associated transport.
1315 * I added coherent debug information output. --xguo
1317 * Instead of printing 'sacked' or 'kept' for each TSN on the
1318 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1319 * KEPT TSN6-TSN7, etc.
1321 static void sctp_check_transmitted(struct sctp_outq *q,
1322 struct list_head *transmitted_queue,
1323 struct sctp_transport *transport,
1324 union sctp_addr *saddr,
1325 struct sctp_sackhdr *sack,
1326 __u32 *highest_new_tsn_in_sack)
1328 struct list_head *lchunk;
1329 struct sctp_chunk *tchunk;
1330 struct list_head tlist;
1334 __u8 restart_timer = 0;
1335 int bytes_acked = 0;
1336 int migrate_bytes = 0;
1337 bool forward_progress = false;
1339 sack_ctsn = ntohl(sack->cum_tsn_ack);
1341 INIT_LIST_HEAD(&tlist);
1343 /* The while loop will skip empty transmitted queues. */
1344 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1345 tchunk = list_entry(lchunk, struct sctp_chunk,
1348 if (sctp_chunk_abandoned(tchunk)) {
1349 /* Move the chunk to abandoned list. */
1350 sctp_insert_list(&q->abandoned, lchunk);
1352 /* If this chunk has not been acked, stop
1353 * considering it as 'outstanding'.
1355 if (!tchunk->tsn_gap_acked) {
1356 if (tchunk->transport)
1357 tchunk->transport->flight_size -=
1358 sctp_data_size(tchunk);
1359 q->outstanding_bytes -= sctp_data_size(tchunk);
1364 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1365 if (sctp_acked(sack, tsn)) {
1366 /* If this queue is the retransmit queue, the
1367 * retransmit timer has already reclaimed
1368 * the outstanding bytes for this chunk, so only
1369 * count bytes associated with a transport.
1372 /* If this chunk is being used for RTT
1373 * measurement, calculate the RTT and update
1374 * the RTO using this value.
1376 * 6.3.1 C5) Karn's algorithm: RTT measurements
1377 * MUST NOT be made using packets that were
1378 * retransmitted (and thus for which it is
1379 * ambiguous whether the reply was for the
1380 * first instance of the packet or a later
1383 if (!tchunk->tsn_gap_acked &&
1384 tchunk->rtt_in_progress) {
1385 tchunk->rtt_in_progress = 0;
1386 rtt = jiffies - tchunk->sent_at;
1387 sctp_transport_update_rto(transport,
1392 /* If the chunk hasn't been marked as ACKED,
1393 * mark it and account bytes_acked if the
1394 * chunk had a valid transport (it will not
1395 * have a transport if ASCONF had deleted it
1396 * while DATA was outstanding).
1398 if (!tchunk->tsn_gap_acked) {
1399 tchunk->tsn_gap_acked = 1;
1400 *highest_new_tsn_in_sack = tsn;
1401 bytes_acked += sctp_data_size(tchunk);
1402 if (!tchunk->transport)
1403 migrate_bytes += sctp_data_size(tchunk);
1404 forward_progress = true;
1407 if (TSN_lte(tsn, sack_ctsn)) {
1408 /* RFC 2960 6.3.2 Retransmission Timer Rules
1410 * R3) Whenever a SACK is received
1411 * that acknowledges the DATA chunk
1412 * with the earliest outstanding TSN
1413 * for that address, restart T3-rtx
1414 * timer for that address with its
1418 forward_progress = true;
1420 if (!tchunk->tsn_gap_acked) {
1422 * SFR-CACC algorithm:
1423 * 2) If the SACK contains gap acks
1424 * and the flag CHANGEOVER_ACTIVE is
1425 * set the receiver of the SACK MUST
1426 * take the following action:
1428 * B) For each TSN t being acked that
1429 * has not been acked in any SACK so
1430 * far, set cacc_saw_newack to 1 for
1431 * the destination that the TSN was
1435 sack->num_gap_ack_blocks &&
1436 q->asoc->peer.primary_path->cacc.
1438 transport->cacc.cacc_saw_newack
1442 list_add_tail(&tchunk->transmitted_list,
1445 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1446 * M2) Each time a SACK arrives reporting
1447 * 'Stray DATA chunk(s)' record the highest TSN
1448 * reported as newly acknowledged, call this
1449 * value 'HighestTSNinSack'. A newly
1450 * acknowledged DATA chunk is one not
1451 * previously acknowledged in a SACK.
1453 * When the SCTP sender of data receives a SACK
1454 * chunk that acknowledges, for the first time,
1455 * the receipt of a DATA chunk, all the still
1456 * unacknowledged DATA chunks whose TSN is
1457 * older than that newly acknowledged DATA
1458 * chunk, are qualified as 'Stray DATA chunks'.
1460 list_add_tail(lchunk, &tlist);
1463 if (tchunk->tsn_gap_acked) {
1464 pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1467 tchunk->tsn_gap_acked = 0;
1469 if (tchunk->transport)
1470 bytes_acked -= sctp_data_size(tchunk);
1472 /* RFC 2960 6.3.2 Retransmission Timer Rules
1474 * R4) Whenever a SACK is received missing a
1475 * TSN that was previously acknowledged via a
1476 * Gap Ack Block, start T3-rtx for the
1477 * destination address to which the DATA
1478 * chunk was originally
1479 * transmitted if it is not already running.
1484 list_add_tail(lchunk, &tlist);
1490 struct sctp_association *asoc = transport->asoc;
1492 /* We may have counted DATA that was migrated
1493 * to this transport due to DEL-IP operation.
1494 * Subtract those bytes, since the were never
1495 * send on this transport and shouldn't be
1496 * credited to this transport.
1498 bytes_acked -= migrate_bytes;
1500 /* 8.2. When an outstanding TSN is acknowledged,
1501 * the endpoint shall clear the error counter of
1502 * the destination transport address to which the
1503 * DATA chunk was last sent.
1504 * The association's overall error counter is
1507 transport->error_count = 0;
1508 transport->asoc->overall_error_count = 0;
1509 forward_progress = true;
1512 * While in SHUTDOWN PENDING, we may have started
1513 * the T5 shutdown guard timer after reaching the
1514 * retransmission limit. Stop that timer as soon
1515 * as the receiver acknowledged any data.
1517 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1518 del_timer(&asoc->timers
1519 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1520 sctp_association_put(asoc);
1522 /* Mark the destination transport address as
1523 * active if it is not so marked.
1525 if ((transport->state == SCTP_INACTIVE ||
1526 transport->state == SCTP_UNCONFIRMED) &&
1527 sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1528 sctp_assoc_control_transport(
1532 SCTP_RECEIVED_SACK);
1535 sctp_transport_raise_cwnd(transport, sack_ctsn,
1538 transport->flight_size -= bytes_acked;
1539 if (transport->flight_size == 0)
1540 transport->partial_bytes_acked = 0;
1541 q->outstanding_bytes -= bytes_acked + migrate_bytes;
1543 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1544 * When a sender is doing zero window probing, it
1545 * should not timeout the association if it continues
1546 * to receive new packets from the receiver. The
1547 * reason is that the receiver MAY keep its window
1548 * closed for an indefinite time.
1549 * A sender is doing zero window probing when the
1550 * receiver's advertised window is zero, and there is
1551 * only one data chunk in flight to the receiver.
1553 * Allow the association to timeout while in SHUTDOWN
1554 * PENDING or SHUTDOWN RECEIVED in case the receiver
1555 * stays in zero window mode forever.
1557 if (!q->asoc->peer.rwnd &&
1558 !list_empty(&tlist) &&
1559 (sack_ctsn+2 == q->asoc->next_tsn) &&
1560 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1561 pr_debug("%s: sack received for zero window "
1562 "probe:%u\n", __func__, sack_ctsn);
1564 q->asoc->overall_error_count = 0;
1565 transport->error_count = 0;
1569 /* RFC 2960 6.3.2 Retransmission Timer Rules
1571 * R2) Whenever all outstanding data sent to an address have
1572 * been acknowledged, turn off the T3-rtx timer of that
1575 if (!transport->flight_size) {
1576 if (del_timer(&transport->T3_rtx_timer))
1577 sctp_transport_put(transport);
1578 } else if (restart_timer) {
1579 if (!mod_timer(&transport->T3_rtx_timer,
1580 jiffies + transport->rto))
1581 sctp_transport_hold(transport);
1584 if (forward_progress) {
1586 dst_confirm(transport->dst);
1590 list_splice(&tlist, transmitted_queue);
1593 /* Mark chunks as missing and consequently may get retransmitted. */
1594 static void sctp_mark_missing(struct sctp_outq *q,
1595 struct list_head *transmitted_queue,
1596 struct sctp_transport *transport,
1597 __u32 highest_new_tsn_in_sack,
1598 int count_of_newacks)
1600 struct sctp_chunk *chunk;
1602 char do_fast_retransmit = 0;
1603 struct sctp_association *asoc = q->asoc;
1604 struct sctp_transport *primary = asoc->peer.primary_path;
1606 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1608 tsn = ntohl(chunk->subh.data_hdr->tsn);
1610 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1611 * 'Unacknowledged TSN's', if the TSN number of an
1612 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1613 * value, increment the 'TSN.Missing.Report' count on that
1614 * chunk if it has NOT been fast retransmitted or marked for
1615 * fast retransmit already.
1617 if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1618 !chunk->tsn_gap_acked &&
1619 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1621 /* SFR-CACC may require us to skip marking
1622 * this chunk as missing.
1624 if (!transport || !sctp_cacc_skip(primary,
1626 count_of_newacks, tsn)) {
1627 chunk->tsn_missing_report++;
1629 pr_debug("%s: tsn:0x%x missing counter:%d\n",
1630 __func__, tsn, chunk->tsn_missing_report);
1634 * M4) If any DATA chunk is found to have a
1635 * 'TSN.Missing.Report'
1636 * value larger than or equal to 3, mark that chunk for
1637 * retransmission and start the fast retransmit procedure.
1640 if (chunk->tsn_missing_report >= 3) {
1641 chunk->fast_retransmit = SCTP_NEED_FRTX;
1642 do_fast_retransmit = 1;
1647 if (do_fast_retransmit)
1648 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1650 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1651 "flight_size:%d, pba:%d\n", __func__, transport,
1652 transport->cwnd, transport->ssthresh,
1653 transport->flight_size, transport->partial_bytes_acked);
1657 /* Is the given TSN acked by this packet? */
1658 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1661 sctp_sack_variable_t *frags;
1663 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1665 if (TSN_lte(tsn, ctsn))
1668 /* 3.3.4 Selective Acknowledgement (SACK) (3):
1671 * These fields contain the Gap Ack Blocks. They are repeated
1672 * for each Gap Ack Block up to the number of Gap Ack Blocks
1673 * defined in the Number of Gap Ack Blocks field. All DATA
1674 * chunks with TSNs greater than or equal to (Cumulative TSN
1675 * Ack + Gap Ack Block Start) and less than or equal to
1676 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1677 * Block are assumed to have been received correctly.
1680 frags = sack->variable;
1682 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1683 if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1684 TSN_lte(gap, ntohs(frags[i].gab.end)))
1693 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1694 int nskips, __be16 stream)
1698 for (i = 0; i < nskips; i++) {
1699 if (skiplist[i].stream == stream)
1705 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1706 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1708 struct sctp_association *asoc = q->asoc;
1709 struct sctp_chunk *ftsn_chunk = NULL;
1710 struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1714 struct sctp_chunk *chunk;
1715 struct list_head *lchunk, *temp;
1717 if (!asoc->peer.prsctp_capable)
1720 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1723 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1724 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1726 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1727 asoc->adv_peer_ack_point = ctsn;
1729 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1730 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1731 * the chunk next in the out-queue space is marked as "abandoned" as
1732 * shown in the following example:
1734 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1735 * and the Advanced.Peer.Ack.Point is updated to this value:
1737 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1738 * normal SACK processing local advancement
1740 * Adv.Ack.Pt-> 102 acked 102 acked
1741 * 103 abandoned 103 abandoned
1742 * 104 abandoned Adv.Ack.P-> 104 abandoned
1744 * 106 acked 106 acked
1747 * In this example, the data sender successfully advanced the
1748 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1750 list_for_each_safe(lchunk, temp, &q->abandoned) {
1751 chunk = list_entry(lchunk, struct sctp_chunk,
1753 tsn = ntohl(chunk->subh.data_hdr->tsn);
1755 /* Remove any chunks in the abandoned queue that are acked by
1758 if (TSN_lte(tsn, ctsn)) {
1759 list_del_init(lchunk);
1760 sctp_chunk_free(chunk);
1762 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1763 asoc->adv_peer_ack_point = tsn;
1764 if (chunk->chunk_hdr->flags &
1765 SCTP_DATA_UNORDERED)
1767 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1769 chunk->subh.data_hdr->stream);
1770 ftsn_skip_arr[skip_pos].stream =
1771 chunk->subh.data_hdr->stream;
1772 ftsn_skip_arr[skip_pos].ssn =
1773 chunk->subh.data_hdr->ssn;
1774 if (skip_pos == nskips)
1783 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1784 * is greater than the Cumulative TSN ACK carried in the received
1785 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1786 * chunk containing the latest value of the
1787 * "Advanced.Peer.Ack.Point".
1789 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1790 * list each stream and sequence number in the forwarded TSN. This
1791 * information will enable the receiver to easily find any
1792 * stranded TSN's waiting on stream reorder queues. Each stream
1793 * SHOULD only be reported once; this means that if multiple
1794 * abandoned messages occur in the same stream then only the
1795 * highest abandoned stream sequence number is reported. If the
1796 * total size of the FORWARD TSN does NOT fit in a single MTU then
1797 * the sender of the FORWARD TSN SHOULD lower the
1798 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1801 if (asoc->adv_peer_ack_point > ctsn)
1802 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1803 nskips, &ftsn_skip_arr[0]);
1806 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1807 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);