Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[firefly-linux-kernel-4.4.55.git] / net / sctp / sm_sideeffect.c
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  *
6  * This file is part of the SCTP kernel implementation
7  *
8  * These functions work with the state functions in sctp_sm_statefuns.c
9  * to implement that state operations.  These functions implement the
10  * steps which require modifying existing data structures.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, write to
26  * the Free Software Foundation, 59 Temple Place - Suite 330,
27  * Boston, MA 02111-1307, USA.
28  *
29  * Please send any bug reports or fixes you make to the
30  * email address(es):
31  *    lksctp developers <lksctp-developers@lists.sourceforge.net>
32  *
33  * Or submit a bug report through the following website:
34  *    http://www.sf.net/projects/lksctp
35  *
36  * Written or modified by:
37  *    La Monte H.P. Yarroll <piggy@acm.org>
38  *    Karl Knutson          <karl@athena.chicago.il.us>
39  *    Jon Grimm             <jgrimm@austin.ibm.com>
40  *    Hui Huang             <hui.huang@nokia.com>
41  *    Dajiang Zhang         <dajiang.zhang@nokia.com>
42  *    Daisy Chang           <daisyc@us.ibm.com>
43  *    Sridhar Samudrala     <sri@us.ibm.com>
44  *    Ardelle Fan           <ardelle.fan@intel.com>
45  *
46  * Any bugs reported given to us we will try to fix... any fixes shared will
47  * be incorporated into the next SCTP release.
48  */
49
50 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51
52 #include <linux/skbuff.h>
53 #include <linux/types.h>
54 #include <linux/socket.h>
55 #include <linux/ip.h>
56 #include <linux/gfp.h>
57 #include <net/sock.h>
58 #include <net/sctp/sctp.h>
59 #include <net/sctp/sm.h>
60
61 static int sctp_cmd_interpreter(sctp_event_t event_type,
62                                 sctp_subtype_t subtype,
63                                 sctp_state_t state,
64                                 struct sctp_endpoint *ep,
65                                 struct sctp_association *asoc,
66                                 void *event_arg,
67                                 sctp_disposition_t status,
68                                 sctp_cmd_seq_t *commands,
69                                 gfp_t gfp);
70 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
71                              sctp_state_t state,
72                              struct sctp_endpoint *ep,
73                              struct sctp_association *asoc,
74                              void *event_arg,
75                              sctp_disposition_t status,
76                              sctp_cmd_seq_t *commands,
77                              gfp_t gfp);
78
79 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
80                                      struct sctp_transport *t);
81 /********************************************************************
82  * Helper functions
83  ********************************************************************/
84
85 /* A helper function for delayed processing of INET ECN CE bit. */
86 static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
87                                 __u32 lowest_tsn)
88 {
89         /* Save the TSN away for comparison when we receive CWR */
90
91         asoc->last_ecne_tsn = lowest_tsn;
92         asoc->need_ecne = 1;
93 }
94
95 /* Helper function for delayed processing of SCTP ECNE chunk.  */
96 /* RFC 2960 Appendix A
97  *
98  * RFC 2481 details a specific bit for a sender to send in
99  * the header of its next outbound TCP segment to indicate to
100  * its peer that it has reduced its congestion window.  This
101  * is termed the CWR bit.  For SCTP the same indication is made
102  * by including the CWR chunk.  This chunk contains one data
103  * element, i.e. the TSN number that was sent in the ECNE chunk.
104  * This element represents the lowest TSN number in the datagram
105  * that was originally marked with the CE bit.
106  */
107 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
108                                            __u32 lowest_tsn,
109                                            struct sctp_chunk *chunk)
110 {
111         struct sctp_chunk *repl;
112
113         /* Our previously transmitted packet ran into some congestion
114          * so we should take action by reducing cwnd and ssthresh
115          * and then ACK our peer that we we've done so by
116          * sending a CWR.
117          */
118
119         /* First, try to determine if we want to actually lower
120          * our cwnd variables.  Only lower them if the ECNE looks more
121          * recent than the last response.
122          */
123         if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
124                 struct sctp_transport *transport;
125
126                 /* Find which transport's congestion variables
127                  * need to be adjusted.
128                  */
129                 transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
130
131                 /* Update the congestion variables. */
132                 if (transport)
133                         sctp_transport_lower_cwnd(transport,
134                                                   SCTP_LOWER_CWND_ECNE);
135                 asoc->last_cwr_tsn = lowest_tsn;
136         }
137
138         /* Always try to quiet the other end.  In case of lost CWR,
139          * resend last_cwr_tsn.
140          */
141         repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
142
143         /* If we run out of memory, it will look like a lost CWR.  We'll
144          * get back in sync eventually.
145          */
146         return repl;
147 }
148
149 /* Helper function to do delayed processing of ECN CWR chunk.  */
150 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
151                                  __u32 lowest_tsn)
152 {
153         /* Turn off ECNE getting auto-prepended to every outgoing
154          * packet
155          */
156         asoc->need_ecne = 0;
157 }
158
159 /* Generate SACK if necessary.  We call this at the end of a packet.  */
160 static int sctp_gen_sack(struct sctp_association *asoc, int force,
161                          sctp_cmd_seq_t *commands)
162 {
163         __u32 ctsn, max_tsn_seen;
164         struct sctp_chunk *sack;
165         struct sctp_transport *trans = asoc->peer.last_data_from;
166         int error = 0;
167
168         if (force ||
169             (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
170             (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
171                 asoc->peer.sack_needed = 1;
172
173         ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
174         max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
175
176         /* From 12.2 Parameters necessary per association (i.e. the TCB):
177          *
178          * Ack State : This flag indicates if the next received packet
179          *           : is to be responded to with a SACK. ...
180          *           : When DATA chunks are out of order, SACK's
181          *           : are not delayed (see Section 6).
182          *
183          * [This is actually not mentioned in Section 6, but we
184          * implement it here anyway. --piggy]
185          */
186         if (max_tsn_seen != ctsn)
187                 asoc->peer.sack_needed = 1;
188
189         /* From 6.2  Acknowledgement on Reception of DATA Chunks:
190          *
191          * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
192          * an acknowledgement SHOULD be generated for at least every
193          * second packet (not every second DATA chunk) received, and
194          * SHOULD be generated within 200 ms of the arrival of any
195          * unacknowledged DATA chunk. ...
196          */
197         if (!asoc->peer.sack_needed) {
198                 asoc->peer.sack_cnt++;
199
200                 /* Set the SACK delay timeout based on the
201                  * SACK delay for the last transport
202                  * data was received from, or the default
203                  * for the association.
204                  */
205                 if (trans) {
206                         /* We will need a SACK for the next packet.  */
207                         if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
208                                 asoc->peer.sack_needed = 1;
209
210                         asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
211                                 trans->sackdelay;
212                 } else {
213                         /* We will need a SACK for the next packet.  */
214                         if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
215                                 asoc->peer.sack_needed = 1;
216
217                         asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
218                                 asoc->sackdelay;
219                 }
220
221                 /* Restart the SACK timer. */
222                 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
223                                 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
224         } else {
225                 asoc->a_rwnd = asoc->rwnd;
226                 sack = sctp_make_sack(asoc);
227                 if (!sack)
228                         goto nomem;
229
230                 asoc->peer.sack_needed = 0;
231                 asoc->peer.sack_cnt = 0;
232
233                 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
234
235                 /* Stop the SACK timer.  */
236                 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
237                                 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
238         }
239
240         return error;
241 nomem:
242         error = -ENOMEM;
243         return error;
244 }
245
246 /* When the T3-RTX timer expires, it calls this function to create the
247  * relevant state machine event.
248  */
249 void sctp_generate_t3_rtx_event(unsigned long peer)
250 {
251         int error;
252         struct sctp_transport *transport = (struct sctp_transport *) peer;
253         struct sctp_association *asoc = transport->asoc;
254         struct net *net = sock_net(asoc->base.sk);
255
256         /* Check whether a task is in the sock.  */
257
258         sctp_bh_lock_sock(asoc->base.sk);
259         if (sock_owned_by_user(asoc->base.sk)) {
260                 SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
261
262                 /* Try again later.  */
263                 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
264                         sctp_transport_hold(transport);
265                 goto out_unlock;
266         }
267
268         /* Is this transport really dead and just waiting around for
269          * the timer to let go of the reference?
270          */
271         if (transport->dead)
272                 goto out_unlock;
273
274         /* Run through the state machine.  */
275         error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
276                            SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
277                            asoc->state,
278                            asoc->ep, asoc,
279                            transport, GFP_ATOMIC);
280
281         if (error)
282                 asoc->base.sk->sk_err = -error;
283
284 out_unlock:
285         sctp_bh_unlock_sock(asoc->base.sk);
286         sctp_transport_put(transport);
287 }
288
289 /* This is a sa interface for producing timeout events.  It works
290  * for timeouts which use the association as their parameter.
291  */
292 static void sctp_generate_timeout_event(struct sctp_association *asoc,
293                                         sctp_event_timeout_t timeout_type)
294 {
295         struct net *net = sock_net(asoc->base.sk);
296         int error = 0;
297
298         sctp_bh_lock_sock(asoc->base.sk);
299         if (sock_owned_by_user(asoc->base.sk)) {
300                 SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n",
301                                   __func__,
302                                   timeout_type);
303
304                 /* Try again later.  */
305                 if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
306                         sctp_association_hold(asoc);
307                 goto out_unlock;
308         }
309
310         /* Is this association really dead and just waiting around for
311          * the timer to let go of the reference?
312          */
313         if (asoc->base.dead)
314                 goto out_unlock;
315
316         /* Run through the state machine.  */
317         error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
318                            SCTP_ST_TIMEOUT(timeout_type),
319                            asoc->state, asoc->ep, asoc,
320                            (void *)timeout_type, GFP_ATOMIC);
321
322         if (error)
323                 asoc->base.sk->sk_err = -error;
324
325 out_unlock:
326         sctp_bh_unlock_sock(asoc->base.sk);
327         sctp_association_put(asoc);
328 }
329
330 static void sctp_generate_t1_cookie_event(unsigned long data)
331 {
332         struct sctp_association *asoc = (struct sctp_association *) data;
333         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
334 }
335
336 static void sctp_generate_t1_init_event(unsigned long data)
337 {
338         struct sctp_association *asoc = (struct sctp_association *) data;
339         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
340 }
341
342 static void sctp_generate_t2_shutdown_event(unsigned long data)
343 {
344         struct sctp_association *asoc = (struct sctp_association *) data;
345         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
346 }
347
348 static void sctp_generate_t4_rto_event(unsigned long data)
349 {
350         struct sctp_association *asoc = (struct sctp_association *) data;
351         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
352 }
353
354 static void sctp_generate_t5_shutdown_guard_event(unsigned long data)
355 {
356         struct sctp_association *asoc = (struct sctp_association *)data;
357         sctp_generate_timeout_event(asoc,
358                                     SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
359
360 } /* sctp_generate_t5_shutdown_guard_event() */
361
362 static void sctp_generate_autoclose_event(unsigned long data)
363 {
364         struct sctp_association *asoc = (struct sctp_association *) data;
365         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
366 }
367
368 /* Generate a heart beat event.  If the sock is busy, reschedule.   Make
369  * sure that the transport is still valid.
370  */
371 void sctp_generate_heartbeat_event(unsigned long data)
372 {
373         int error = 0;
374         struct sctp_transport *transport = (struct sctp_transport *) data;
375         struct sctp_association *asoc = transport->asoc;
376         struct net *net = sock_net(asoc->base.sk);
377
378         sctp_bh_lock_sock(asoc->base.sk);
379         if (sock_owned_by_user(asoc->base.sk)) {
380                 SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
381
382                 /* Try again later.  */
383                 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
384                         sctp_transport_hold(transport);
385                 goto out_unlock;
386         }
387
388         /* Is this structure just waiting around for us to actually
389          * get destroyed?
390          */
391         if (transport->dead)
392                 goto out_unlock;
393
394         error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
395                            SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
396                            asoc->state, asoc->ep, asoc,
397                            transport, GFP_ATOMIC);
398
399          if (error)
400                  asoc->base.sk->sk_err = -error;
401
402 out_unlock:
403         sctp_bh_unlock_sock(asoc->base.sk);
404         sctp_transport_put(transport);
405 }
406
407 /* Handle the timeout of the ICMP protocol unreachable timer.  Trigger
408  * the correct state machine transition that will close the association.
409  */
410 void sctp_generate_proto_unreach_event(unsigned long data)
411 {
412         struct sctp_transport *transport = (struct sctp_transport *) data;
413         struct sctp_association *asoc = transport->asoc;
414         struct net *net = sock_net(asoc->base.sk);
415         
416         sctp_bh_lock_sock(asoc->base.sk);
417         if (sock_owned_by_user(asoc->base.sk)) {
418                 SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
419
420                 /* Try again later.  */
421                 if (!mod_timer(&transport->proto_unreach_timer,
422                                 jiffies + (HZ/20)))
423                         sctp_association_hold(asoc);
424                 goto out_unlock;
425         }
426
427         /* Is this structure just waiting around for us to actually
428          * get destroyed?
429          */
430         if (asoc->base.dead)
431                 goto out_unlock;
432
433         sctp_do_sm(net, SCTP_EVENT_T_OTHER,
434                    SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
435                    asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
436
437 out_unlock:
438         sctp_bh_unlock_sock(asoc->base.sk);
439         sctp_association_put(asoc);
440 }
441
442
443 /* Inject a SACK Timeout event into the state machine.  */
444 static void sctp_generate_sack_event(unsigned long data)
445 {
446         struct sctp_association *asoc = (struct sctp_association *) data;
447         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
448 }
449
450 sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
451         NULL,
452         sctp_generate_t1_cookie_event,
453         sctp_generate_t1_init_event,
454         sctp_generate_t2_shutdown_event,
455         NULL,
456         sctp_generate_t4_rto_event,
457         sctp_generate_t5_shutdown_guard_event,
458         NULL,
459         sctp_generate_sack_event,
460         sctp_generate_autoclose_event,
461 };
462
463
464 /* RFC 2960 8.2 Path Failure Detection
465  *
466  * When its peer endpoint is multi-homed, an endpoint should keep a
467  * error counter for each of the destination transport addresses of the
468  * peer endpoint.
469  *
470  * Each time the T3-rtx timer expires on any address, or when a
471  * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
472  * the error counter of that destination address will be incremented.
473  * When the value in the error counter exceeds the protocol parameter
474  * 'Path.Max.Retrans' of that destination address, the endpoint should
475  * mark the destination transport address as inactive, and a
476  * notification SHOULD be sent to the upper layer.
477  *
478  */
479 static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
480                                          struct sctp_association *asoc,
481                                          struct sctp_transport *transport,
482                                          int is_hb)
483 {
484         /* The check for association's overall error counter exceeding the
485          * threshold is done in the state function.
486          */
487         /* We are here due to a timer expiration.  If the timer was
488          * not a HEARTBEAT, then normal error tracking is done.
489          * If the timer was a heartbeat, we only increment error counts
490          * when we already have an outstanding HEARTBEAT that has not
491          * been acknowledged.
492          * Additionally, some tranport states inhibit error increments.
493          */
494         if (!is_hb) {
495                 asoc->overall_error_count++;
496                 if (transport->state != SCTP_INACTIVE)
497                         transport->error_count++;
498          } else if (transport->hb_sent) {
499                 if (transport->state != SCTP_UNCONFIRMED)
500                         asoc->overall_error_count++;
501                 if (transport->state != SCTP_INACTIVE)
502                         transport->error_count++;
503         }
504
505         /* If the transport error count is greater than the pf_retrans
506          * threshold, and less than pathmaxrtx, then mark this transport
507          * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1,
508          * point 1
509          */
510         if ((transport->state != SCTP_PF) &&
511            (asoc->pf_retrans < transport->pathmaxrxt) &&
512            (transport->error_count > asoc->pf_retrans)) {
513
514                 sctp_assoc_control_transport(asoc, transport,
515                                              SCTP_TRANSPORT_PF,
516                                              0);
517
518                 /* Update the hb timer to resend a heartbeat every rto */
519                 sctp_cmd_hb_timer_update(commands, transport);
520         }
521
522         if (transport->state != SCTP_INACTIVE &&
523             (transport->error_count > transport->pathmaxrxt)) {
524                 SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p",
525                                          " transport IP: port:%d failed.\n",
526                                          asoc,
527                                          (&transport->ipaddr),
528                                          ntohs(transport->ipaddr.v4.sin_port));
529                 sctp_assoc_control_transport(asoc, transport,
530                                              SCTP_TRANSPORT_DOWN,
531                                              SCTP_FAILED_THRESHOLD);
532         }
533
534         /* E2) For the destination address for which the timer
535          * expires, set RTO <- RTO * 2 ("back off the timer").  The
536          * maximum value discussed in rule C7 above (RTO.max) may be
537          * used to provide an upper bound to this doubling operation.
538          *
539          * Special Case:  the first HB doesn't trigger exponential backoff.
540          * The first unacknowledged HB triggers it.  We do this with a flag
541          * that indicates that we have an outstanding HB.
542          */
543         if (!is_hb || transport->hb_sent) {
544                 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
545                 sctp_max_rto(asoc, transport);
546         }
547 }
548
549 /* Worker routine to handle INIT command failure.  */
550 static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
551                                  struct sctp_association *asoc,
552                                  unsigned int error)
553 {
554         struct sctp_ulpevent *event;
555
556         event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC,
557                                                 (__u16)error, 0, 0, NULL,
558                                                 GFP_ATOMIC);
559
560         if (event)
561                 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
562                                 SCTP_ULPEVENT(event));
563
564         sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
565                         SCTP_STATE(SCTP_STATE_CLOSED));
566
567         /* SEND_FAILED sent later when cleaning up the association. */
568         asoc->outqueue.error = error;
569         sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
570 }
571
572 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED.  */
573 static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
574                                   struct sctp_association *asoc,
575                                   sctp_event_t event_type,
576                                   sctp_subtype_t subtype,
577                                   struct sctp_chunk *chunk,
578                                   unsigned int error)
579 {
580         struct sctp_ulpevent *event;
581         struct sctp_chunk *abort;
582         /* Cancel any partial delivery in progress. */
583         sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
584
585         if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
586                 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
587                                                 (__u16)error, 0, 0, chunk,
588                                                 GFP_ATOMIC);
589         else
590                 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
591                                                 (__u16)error, 0, 0, NULL,
592                                                 GFP_ATOMIC);
593         if (event)
594                 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
595                                 SCTP_ULPEVENT(event));
596
597         if (asoc->overall_error_count >= asoc->max_retrans) {
598                 abort = sctp_make_violation_max_retrans(asoc, chunk);
599                 if (abort)
600                         sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
601                                         SCTP_CHUNK(abort));
602         }
603
604         sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
605                         SCTP_STATE(SCTP_STATE_CLOSED));
606
607         /* SEND_FAILED sent later when cleaning up the association. */
608         asoc->outqueue.error = error;
609         sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
610 }
611
612 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
613  * inside the cookie.  In reality, this is only used for INIT-ACK processing
614  * since all other cases use "temporary" associations and can do all
615  * their work in statefuns directly.
616  */
617 static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
618                                  struct sctp_association *asoc,
619                                  struct sctp_chunk *chunk,
620                                  sctp_init_chunk_t *peer_init,
621                                  gfp_t gfp)
622 {
623         int error;
624
625         /* We only process the init as a sideeffect in a single
626          * case.   This is when we process the INIT-ACK.   If we
627          * fail during INIT processing (due to malloc problems),
628          * just return the error and stop processing the stack.
629          */
630         if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
631                 error = -ENOMEM;
632         else
633                 error = 0;
634
635         return error;
636 }
637
638 /* Helper function to break out starting up of heartbeat timers.  */
639 static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
640                                      struct sctp_association *asoc)
641 {
642         struct sctp_transport *t;
643
644         /* Start a heartbeat timer for each transport on the association.
645          * hold a reference on the transport to make sure none of
646          * the needed data structures go away.
647          */
648         list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
649
650                 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
651                         sctp_transport_hold(t);
652         }
653 }
654
655 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
656                                     struct sctp_association *asoc)
657 {
658         struct sctp_transport *t;
659
660         /* Stop all heartbeat timers. */
661
662         list_for_each_entry(t, &asoc->peer.transport_addr_list,
663                         transports) {
664                 if (del_timer(&t->hb_timer))
665                         sctp_transport_put(t);
666         }
667 }
668
669 /* Helper function to stop any pending T3-RTX timers */
670 static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
671                                         struct sctp_association *asoc)
672 {
673         struct sctp_transport *t;
674
675         list_for_each_entry(t, &asoc->peer.transport_addr_list,
676                         transports) {
677                 if (del_timer(&t->T3_rtx_timer))
678                         sctp_transport_put(t);
679         }
680 }
681
682
683 /* Helper function to update the heartbeat timer. */
684 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
685                                      struct sctp_transport *t)
686 {
687         /* Update the heartbeat timer.  */
688         if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
689                 sctp_transport_hold(t);
690 }
691
692 /* Helper function to handle the reception of an HEARTBEAT ACK.  */
693 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
694                                   struct sctp_association *asoc,
695                                   struct sctp_transport *t,
696                                   struct sctp_chunk *chunk)
697 {
698         sctp_sender_hb_info_t *hbinfo;
699         int was_unconfirmed = 0;
700
701         /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
702          * HEARTBEAT should clear the error counter of the destination
703          * transport address to which the HEARTBEAT was sent.
704          */
705         t->error_count = 0;
706
707         /*
708          * Although RFC4960 specifies that the overall error count must
709          * be cleared when a HEARTBEAT ACK is received, we make an
710          * exception while in SHUTDOWN PENDING. If the peer keeps its
711          * window shut forever, we may never be able to transmit our
712          * outstanding data and rely on the retransmission limit be reached
713          * to shutdown the association.
714          */
715         if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
716                 t->asoc->overall_error_count = 0;
717
718         /* Clear the hb_sent flag to signal that we had a good
719          * acknowledgement.
720          */
721         t->hb_sent = 0;
722
723         /* Mark the destination transport address as active if it is not so
724          * marked.
725          */
726         if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
727                 was_unconfirmed = 1;
728                 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
729                                              SCTP_HEARTBEAT_SUCCESS);
730         }
731
732         if (t->state == SCTP_PF)
733                 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
734                                              SCTP_HEARTBEAT_SUCCESS);
735
736         /* The receiver of the HEARTBEAT ACK should also perform an
737          * RTT measurement for that destination transport address
738          * using the time value carried in the HEARTBEAT ACK chunk.
739          * If the transport's rto_pending variable has been cleared,
740          * it was most likely due to a retransmit.  However, we want
741          * to re-enable it to properly update the rto.
742          */
743         if (t->rto_pending == 0)
744                 t->rto_pending = 1;
745
746         hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
747         sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
748
749         /* Update the heartbeat timer.  */
750         if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
751                 sctp_transport_hold(t);
752
753         if (was_unconfirmed && asoc->peer.transport_count == 1)
754                 sctp_transport_immediate_rtx(t);
755 }
756
757
758 /* Helper function to process the process SACK command.  */
759 static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
760                                  struct sctp_association *asoc,
761                                  struct sctp_chunk *chunk)
762 {
763         int err = 0;
764
765         if (sctp_outq_sack(&asoc->outqueue, chunk)) {
766                 struct net *net = sock_net(asoc->base.sk);
767
768                 /* There are no more TSNs awaiting SACK.  */
769                 err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
770                                  SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
771                                  asoc->state, asoc->ep, asoc, NULL,
772                                  GFP_ATOMIC);
773         }
774
775         return err;
776 }
777
778 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
779  * the transport for a shutdown chunk.
780  */
781 static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
782                               struct sctp_association *asoc,
783                               struct sctp_chunk *chunk)
784 {
785         struct sctp_transport *t;
786
787         if (chunk->transport)
788                 t = chunk->transport;
789         else {
790                 t = sctp_assoc_choose_alter_transport(asoc,
791                                               asoc->shutdown_last_sent_to);
792                 chunk->transport = t;
793         }
794         asoc->shutdown_last_sent_to = t;
795         asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
796 }
797
798 /* Helper function to change the state of an association. */
799 static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
800                                struct sctp_association *asoc,
801                                sctp_state_t state)
802 {
803         struct sock *sk = asoc->base.sk;
804
805         asoc->state = state;
806
807         SCTP_DEBUG_PRINTK("sctp_cmd_new_state: asoc %p[%s]\n",
808                           asoc, sctp_state_tbl[state]);
809
810         if (sctp_style(sk, TCP)) {
811                 /* Change the sk->sk_state of a TCP-style socket that has
812                  * successfully completed a connect() call.
813                  */
814                 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
815                         sk->sk_state = SCTP_SS_ESTABLISHED;
816
817                 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
818                 if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
819                     sctp_sstate(sk, ESTABLISHED))
820                         sk->sk_shutdown |= RCV_SHUTDOWN;
821         }
822
823         if (sctp_state(asoc, COOKIE_WAIT)) {
824                 /* Reset init timeouts since they may have been
825                  * increased due to timer expirations.
826                  */
827                 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
828                                                 asoc->rto_initial;
829                 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
830                                                 asoc->rto_initial;
831         }
832
833         if (sctp_state(asoc, ESTABLISHED) ||
834             sctp_state(asoc, CLOSED) ||
835             sctp_state(asoc, SHUTDOWN_RECEIVED)) {
836                 /* Wake up any processes waiting in the asoc's wait queue in
837                  * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
838                  */
839                 if (waitqueue_active(&asoc->wait))
840                         wake_up_interruptible(&asoc->wait);
841
842                 /* Wake up any processes waiting in the sk's sleep queue of
843                  * a TCP-style or UDP-style peeled-off socket in
844                  * sctp_wait_for_accept() or sctp_wait_for_packet().
845                  * For a UDP-style socket, the waiters are woken up by the
846                  * notifications.
847                  */
848                 if (!sctp_style(sk, UDP))
849                         sk->sk_state_change(sk);
850         }
851 }
852
853 /* Helper function to delete an association. */
854 static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
855                                 struct sctp_association *asoc)
856 {
857         struct sock *sk = asoc->base.sk;
858
859         /* If it is a non-temporary association belonging to a TCP-style
860          * listening socket that is not closed, do not free it so that accept()
861          * can pick it up later.
862          */
863         if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
864             (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
865                 return;
866
867         BUG_ON(asoc->peer.primary_path == NULL);
868         sctp_unhash_established(asoc);
869         sctp_association_free(asoc);
870 }
871
872 /*
873  * ADDIP Section 4.1 ASCONF Chunk Procedures
874  * A4) Start a T-4 RTO timer, using the RTO value of the selected
875  * destination address (we use active path instead of primary path just
876  * because primary path may be inactive.
877  */
878 static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
879                                 struct sctp_association *asoc,
880                                 struct sctp_chunk *chunk)
881 {
882         struct sctp_transport *t;
883
884         t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
885         asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
886         chunk->transport = t;
887 }
888
889 /* Process an incoming Operation Error Chunk. */
890 static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
891                                    struct sctp_association *asoc,
892                                    struct sctp_chunk *chunk)
893 {
894         struct sctp_errhdr *err_hdr;
895         struct sctp_ulpevent *ev;
896
897         while (chunk->chunk_end > chunk->skb->data) {
898                 err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
899
900                 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
901                                                      GFP_ATOMIC);
902                 if (!ev)
903                         return;
904
905                 sctp_ulpq_tail_event(&asoc->ulpq, ev);
906
907                 switch (err_hdr->cause) {
908                 case SCTP_ERROR_UNKNOWN_CHUNK:
909                 {
910                         sctp_chunkhdr_t *unk_chunk_hdr;
911
912                         unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;
913                         switch (unk_chunk_hdr->type) {
914                         /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
915                          * an ERROR chunk reporting that it did not recognized
916                          * the ASCONF chunk type, the sender of the ASCONF MUST
917                          * NOT send any further ASCONF chunks and MUST stop its
918                          * T-4 timer.
919                          */
920                         case SCTP_CID_ASCONF:
921                                 if (asoc->peer.asconf_capable == 0)
922                                         break;
923
924                                 asoc->peer.asconf_capable = 0;
925                                 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
926                                         SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
927                                 break;
928                         default:
929                                 break;
930                         }
931                         break;
932                 }
933                 default:
934                         break;
935                 }
936         }
937 }
938
939 /* Process variable FWDTSN chunk information. */
940 static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
941                                     struct sctp_chunk *chunk)
942 {
943         struct sctp_fwdtsn_skip *skip;
944         /* Walk through all the skipped SSNs */
945         sctp_walk_fwdtsn(skip, chunk) {
946                 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
947         }
948 }
949
950 /* Helper function to remove the association non-primary peer
951  * transports.
952  */
953 static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
954 {
955         struct sctp_transport *t;
956         struct list_head *pos;
957         struct list_head *temp;
958
959         list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
960                 t = list_entry(pos, struct sctp_transport, transports);
961                 if (!sctp_cmp_addr_exact(&t->ipaddr,
962                                          &asoc->peer.primary_addr)) {
963                         sctp_assoc_del_peer(asoc, &t->ipaddr);
964                 }
965         }
966 }
967
968 /* Helper function to set sk_err on a 1-1 style socket. */
969 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
970 {
971         struct sock *sk = asoc->base.sk;
972
973         if (!sctp_style(sk, UDP))
974                 sk->sk_err = error;
975 }
976
977 /* Helper function to generate an association change event */
978 static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands,
979                                  struct sctp_association *asoc,
980                                  u8 state)
981 {
982         struct sctp_ulpevent *ev;
983
984         ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
985                                             asoc->c.sinit_num_ostreams,
986                                             asoc->c.sinit_max_instreams,
987                                             NULL, GFP_ATOMIC);
988         if (ev)
989                 sctp_ulpq_tail_event(&asoc->ulpq, ev);
990 }
991
992 /* Helper function to generate an adaptation indication event */
993 static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands,
994                                     struct sctp_association *asoc)
995 {
996         struct sctp_ulpevent *ev;
997
998         ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
999
1000         if (ev)
1001                 sctp_ulpq_tail_event(&asoc->ulpq, ev);
1002 }
1003
1004
1005 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
1006                                     sctp_event_timeout_t timer,
1007                                     char *name)
1008 {
1009         struct sctp_transport *t;
1010
1011         t = asoc->init_last_sent_to;
1012         asoc->init_err_counter++;
1013
1014         if (t->init_sent_count > (asoc->init_cycle + 1)) {
1015                 asoc->timeouts[timer] *= 2;
1016                 if (asoc->timeouts[timer] > asoc->max_init_timeo) {
1017                         asoc->timeouts[timer] = asoc->max_init_timeo;
1018                 }
1019                 asoc->init_cycle++;
1020                 SCTP_DEBUG_PRINTK(
1021                         "T1 %s Timeout adjustment"
1022                         " init_err_counter: %d"
1023                         " cycle: %d"
1024                         " timeout: %ld\n",
1025                         name,
1026                         asoc->init_err_counter,
1027                         asoc->init_cycle,
1028                         asoc->timeouts[timer]);
1029         }
1030
1031 }
1032
1033 /* Send the whole message, chunk by chunk, to the outqueue.
1034  * This way the whole message is queued up and bundling if
1035  * encouraged for small fragments.
1036  */
1037 static int sctp_cmd_send_msg(struct sctp_association *asoc,
1038                                 struct sctp_datamsg *msg)
1039 {
1040         struct sctp_chunk *chunk;
1041         int error = 0;
1042
1043         list_for_each_entry(chunk, &msg->chunks, frag_list) {
1044                 error = sctp_outq_tail(&asoc->outqueue, chunk);
1045                 if (error)
1046                         break;
1047         }
1048
1049         return error;
1050 }
1051
1052
1053 /* Sent the next ASCONF packet currently stored in the association.
1054  * This happens after the ASCONF_ACK was succeffully processed.
1055  */
1056 static void sctp_cmd_send_asconf(struct sctp_association *asoc)
1057 {
1058         struct net *net = sock_net(asoc->base.sk);
1059
1060         /* Send the next asconf chunk from the addip chunk
1061          * queue.
1062          */
1063         if (!list_empty(&asoc->addip_chunk_list)) {
1064                 struct list_head *entry = asoc->addip_chunk_list.next;
1065                 struct sctp_chunk *asconf = list_entry(entry,
1066                                                 struct sctp_chunk, list);
1067                 list_del_init(entry);
1068
1069                 /* Hold the chunk until an ASCONF_ACK is received. */
1070                 sctp_chunk_hold(asconf);
1071                 if (sctp_primitive_ASCONF(net, asoc, asconf))
1072                         sctp_chunk_free(asconf);
1073                 else
1074                         asoc->addip_last_asconf = asconf;
1075         }
1076 }
1077
1078
1079 /* These three macros allow us to pull the debugging code out of the
1080  * main flow of sctp_do_sm() to keep attention focused on the real
1081  * functionality there.
1082  */
1083 #define DEBUG_PRE \
1084         SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
1085                           "ep %p, %s, %s, asoc %p[%s], %s\n", \
1086                           ep, sctp_evttype_tbl[event_type], \
1087                           (*debug_fn)(subtype), asoc, \
1088                           sctp_state_tbl[state], state_fn->name)
1089
1090 #define DEBUG_POST \
1091         SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
1092                           "asoc %p, status: %s\n", \
1093                           asoc, sctp_status_tbl[status])
1094
1095 #define DEBUG_POST_SFX \
1096         SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
1097                           error, asoc, \
1098                           sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1099                           sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
1100
1101 /*
1102  * This is the master state machine processing function.
1103  *
1104  * If you want to understand all of lksctp, this is a
1105  * good place to start.
1106  */
1107 int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
1108                sctp_state_t state,
1109                struct sctp_endpoint *ep,
1110                struct sctp_association *asoc,
1111                void *event_arg,
1112                gfp_t gfp)
1113 {
1114         sctp_cmd_seq_t commands;
1115         const sctp_sm_table_entry_t *state_fn;
1116         sctp_disposition_t status;
1117         int error = 0;
1118         typedef const char *(printfn_t)(sctp_subtype_t);
1119
1120         static printfn_t *table[] = {
1121                 NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
1122         };
1123         printfn_t *debug_fn  __attribute__ ((unused)) = table[event_type];
1124
1125         /* Look up the state function, run it, and then process the
1126          * side effects.  These three steps are the heart of lksctp.
1127          */
1128         state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
1129
1130         sctp_init_cmd_seq(&commands);
1131
1132         DEBUG_PRE;
1133         status = (*state_fn->fn)(net, ep, asoc, subtype, event_arg, &commands);
1134         DEBUG_POST;
1135
1136         error = sctp_side_effects(event_type, subtype, state,
1137                                   ep, asoc, event_arg, status,
1138                                   &commands, gfp);
1139         DEBUG_POST_SFX;
1140
1141         return error;
1142 }
1143
1144 #undef DEBUG_PRE
1145 #undef DEBUG_POST
1146
1147 /*****************************************************************
1148  * This the master state function side effect processing function.
1149  *****************************************************************/
1150 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1151                              sctp_state_t state,
1152                              struct sctp_endpoint *ep,
1153                              struct sctp_association *asoc,
1154                              void *event_arg,
1155                              sctp_disposition_t status,
1156                              sctp_cmd_seq_t *commands,
1157                              gfp_t gfp)
1158 {
1159         int error;
1160
1161         /* FIXME - Most of the dispositions left today would be categorized
1162          * as "exceptional" dispositions.  For those dispositions, it
1163          * may not be proper to run through any of the commands at all.
1164          * For example, the command interpreter might be run only with
1165          * disposition SCTP_DISPOSITION_CONSUME.
1166          */
1167         if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
1168                                                ep, asoc,
1169                                                event_arg, status,
1170                                                commands, gfp)))
1171                 goto bail;
1172
1173         switch (status) {
1174         case SCTP_DISPOSITION_DISCARD:
1175                 SCTP_DEBUG_PRINTK("Ignored sctp protocol event - state %d, "
1176                                   "event_type %d, event_id %d\n",
1177                                   state, event_type, subtype.chunk);
1178                 break;
1179
1180         case SCTP_DISPOSITION_NOMEM:
1181                 /* We ran out of memory, so we need to discard this
1182                  * packet.
1183                  */
1184                 /* BUG--we should now recover some memory, probably by
1185                  * reneging...
1186                  */
1187                 error = -ENOMEM;
1188                 break;
1189
1190         case SCTP_DISPOSITION_DELETE_TCB:
1191                 /* This should now be a command. */
1192                 break;
1193
1194         case SCTP_DISPOSITION_CONSUME:
1195         case SCTP_DISPOSITION_ABORT:
1196                 /*
1197                  * We should no longer have much work to do here as the
1198                  * real work has been done as explicit commands above.
1199                  */
1200                 break;
1201
1202         case SCTP_DISPOSITION_VIOLATION:
1203                 net_err_ratelimited("protocol violation state %d chunkid %d\n",
1204                                     state, subtype.chunk);
1205                 break;
1206
1207         case SCTP_DISPOSITION_NOT_IMPL:
1208                 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1209                         state, event_type, subtype.chunk);
1210                 break;
1211
1212         case SCTP_DISPOSITION_BUG:
1213                 pr_err("bug in state %d, event_type %d, event_id %d\n",
1214                        state, event_type, subtype.chunk);
1215                 BUG();
1216                 break;
1217
1218         default:
1219                 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1220                        status, state, event_type, subtype.chunk);
1221                 BUG();
1222                 break;
1223         }
1224
1225 bail:
1226         return error;
1227 }
1228
1229 /********************************************************************
1230  * 2nd Level Abstractions
1231  ********************************************************************/
1232
1233 /* This is the side-effect interpreter.  */
1234 static int sctp_cmd_interpreter(sctp_event_t event_type,
1235                                 sctp_subtype_t subtype,
1236                                 sctp_state_t state,
1237                                 struct sctp_endpoint *ep,
1238                                 struct sctp_association *asoc,
1239                                 void *event_arg,
1240                                 sctp_disposition_t status,
1241                                 sctp_cmd_seq_t *commands,
1242                                 gfp_t gfp)
1243 {
1244         int error = 0;
1245         int force;
1246         sctp_cmd_t *cmd;
1247         struct sctp_chunk *new_obj;
1248         struct sctp_chunk *chunk = NULL;
1249         struct sctp_packet *packet;
1250         struct timer_list *timer;
1251         unsigned long timeout;
1252         struct sctp_transport *t;
1253         struct sctp_sackhdr sackh;
1254         int local_cork = 0;
1255
1256         if (SCTP_EVENT_T_TIMEOUT != event_type)
1257                 chunk = event_arg;
1258
1259         /* Note:  This whole file is a huge candidate for rework.
1260          * For example, each command could either have its own handler, so
1261          * the loop would look like:
1262          *     while (cmds)
1263          *         cmd->handle(x, y, z)
1264          * --jgrimm
1265          */
1266         while (NULL != (cmd = sctp_next_cmd(commands))) {
1267                 switch (cmd->verb) {
1268                 case SCTP_CMD_NOP:
1269                         /* Do nothing. */
1270                         break;
1271
1272                 case SCTP_CMD_NEW_ASOC:
1273                         /* Register a new association.  */
1274                         if (local_cork) {
1275                                 sctp_outq_uncork(&asoc->outqueue);
1276                                 local_cork = 0;
1277                         }
1278
1279                         /* Register with the endpoint.  */
1280                         asoc = cmd->obj.asoc;
1281                         BUG_ON(asoc->peer.primary_path == NULL);
1282                         sctp_endpoint_add_asoc(ep, asoc);
1283                         sctp_hash_established(asoc);
1284                         break;
1285
1286                 case SCTP_CMD_UPDATE_ASSOC:
1287                        sctp_assoc_update(asoc, cmd->obj.asoc);
1288                        break;
1289
1290                 case SCTP_CMD_PURGE_OUTQUEUE:
1291                        sctp_outq_teardown(&asoc->outqueue);
1292                        break;
1293
1294                 case SCTP_CMD_DELETE_TCB:
1295                         if (local_cork) {
1296                                 sctp_outq_uncork(&asoc->outqueue);
1297                                 local_cork = 0;
1298                         }
1299                         /* Delete the current association.  */
1300                         sctp_cmd_delete_tcb(commands, asoc);
1301                         asoc = NULL;
1302                         break;
1303
1304                 case SCTP_CMD_NEW_STATE:
1305                         /* Enter a new state.  */
1306                         sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1307                         break;
1308
1309                 case SCTP_CMD_REPORT_TSN:
1310                         /* Record the arrival of a TSN.  */
1311                         error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1312                                                  cmd->obj.u32, NULL);
1313                         break;
1314
1315                 case SCTP_CMD_REPORT_FWDTSN:
1316                         /* Move the Cumulattive TSN Ack ahead. */
1317                         sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
1318
1319                         /* purge the fragmentation queue */
1320                         sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
1321
1322                         /* Abort any in progress partial delivery. */
1323                         sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
1324                         break;
1325
1326                 case SCTP_CMD_PROCESS_FWDTSN:
1327                         sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
1328                         break;
1329
1330                 case SCTP_CMD_GEN_SACK:
1331                         /* Generate a Selective ACK.
1332                          * The argument tells us whether to just count
1333                          * the packet and MAYBE generate a SACK, or
1334                          * force a SACK out.
1335                          */
1336                         force = cmd->obj.i32;
1337                         error = sctp_gen_sack(asoc, force, commands);
1338                         break;
1339
1340                 case SCTP_CMD_PROCESS_SACK:
1341                         /* Process an inbound SACK.  */
1342                         error = sctp_cmd_process_sack(commands, asoc,
1343                                                       cmd->obj.chunk);
1344                         break;
1345
1346                 case SCTP_CMD_GEN_INIT_ACK:
1347                         /* Generate an INIT ACK chunk.  */
1348                         new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1349                                                      0);
1350                         if (!new_obj)
1351                                 goto nomem;
1352
1353                         sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1354                                         SCTP_CHUNK(new_obj));
1355                         break;
1356
1357                 case SCTP_CMD_PEER_INIT:
1358                         /* Process a unified INIT from the peer.
1359                          * Note: Only used during INIT-ACK processing.  If
1360                          * there is an error just return to the outter
1361                          * layer which will bail.
1362                          */
1363                         error = sctp_cmd_process_init(commands, asoc, chunk,
1364                                                       cmd->obj.init, gfp);
1365                         break;
1366
1367                 case SCTP_CMD_GEN_COOKIE_ECHO:
1368                         /* Generate a COOKIE ECHO chunk.  */
1369                         new_obj = sctp_make_cookie_echo(asoc, chunk);
1370                         if (!new_obj) {
1371                                 if (cmd->obj.chunk)
1372                                         sctp_chunk_free(cmd->obj.chunk);
1373                                 goto nomem;
1374                         }
1375                         sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1376                                         SCTP_CHUNK(new_obj));
1377
1378                         /* If there is an ERROR chunk to be sent along with
1379                          * the COOKIE_ECHO, send it, too.
1380                          */
1381                         if (cmd->obj.chunk)
1382                                 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1383                                                 SCTP_CHUNK(cmd->obj.chunk));
1384
1385                         if (new_obj->transport) {
1386                                 new_obj->transport->init_sent_count++;
1387                                 asoc->init_last_sent_to = new_obj->transport;
1388                         }
1389
1390                         /* FIXME - Eventually come up with a cleaner way to
1391                          * enabling COOKIE-ECHO + DATA bundling during
1392                          * multihoming stale cookie scenarios, the following
1393                          * command plays with asoc->peer.retran_path to
1394                          * avoid the problem of sending the COOKIE-ECHO and
1395                          * DATA in different paths, which could result
1396                          * in the association being ABORTed if the DATA chunk
1397                          * is processed first by the server.  Checking the
1398                          * init error counter simply causes this command
1399                          * to be executed only during failed attempts of
1400                          * association establishment.
1401                          */
1402                         if ((asoc->peer.retran_path !=
1403                              asoc->peer.primary_path) &&
1404                             (asoc->init_err_counter > 0)) {
1405                                 sctp_add_cmd_sf(commands,
1406                                                 SCTP_CMD_FORCE_PRIM_RETRAN,
1407                                                 SCTP_NULL());
1408                         }
1409
1410                         break;
1411
1412                 case SCTP_CMD_GEN_SHUTDOWN:
1413                         /* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1414                          * Reset error counts.
1415                          */
1416                         asoc->overall_error_count = 0;
1417
1418                         /* Generate a SHUTDOWN chunk.  */
1419                         new_obj = sctp_make_shutdown(asoc, chunk);
1420                         if (!new_obj)
1421                                 goto nomem;
1422                         sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1423                                         SCTP_CHUNK(new_obj));
1424                         break;
1425
1426                 case SCTP_CMD_CHUNK_ULP:
1427                         /* Send a chunk to the sockets layer.  */
1428                         SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
1429                                           "chunk_up:", cmd->obj.chunk,
1430                                           "ulpq:", &asoc->ulpq);
1431                         sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
1432                                             GFP_ATOMIC);
1433                         break;
1434
1435                 case SCTP_CMD_EVENT_ULP:
1436                         /* Send a notification to the sockets layer.  */
1437                         SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
1438                                           "event_up:",cmd->obj.ulpevent,
1439                                           "ulpq:",&asoc->ulpq);
1440                         sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
1441                         break;
1442
1443                 case SCTP_CMD_REPLY:
1444                         /* If an caller has not already corked, do cork. */
1445                         if (!asoc->outqueue.cork) {
1446                                 sctp_outq_cork(&asoc->outqueue);
1447                                 local_cork = 1;
1448                         }
1449                         /* Send a chunk to our peer.  */
1450                         error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk);
1451                         break;
1452
1453                 case SCTP_CMD_SEND_PKT:
1454                         /* Send a full packet to our peer.  */
1455                         packet = cmd->obj.packet;
1456                         sctp_packet_transmit(packet);
1457                         sctp_ootb_pkt_free(packet);
1458                         break;
1459
1460                 case SCTP_CMD_T1_RETRAN:
1461                         /* Mark a transport for retransmission.  */
1462                         sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1463                                         SCTP_RTXR_T1_RTX);
1464                         break;
1465
1466                 case SCTP_CMD_RETRAN:
1467                         /* Mark a transport for retransmission.  */
1468                         sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1469                                         SCTP_RTXR_T3_RTX);
1470                         break;
1471
1472                 case SCTP_CMD_ECN_CE:
1473                         /* Do delayed CE processing.   */
1474                         sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1475                         break;
1476
1477                 case SCTP_CMD_ECN_ECNE:
1478                         /* Do delayed ECNE processing. */
1479                         new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1480                                                         chunk);
1481                         if (new_obj)
1482                                 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1483                                                 SCTP_CHUNK(new_obj));
1484                         break;
1485
1486                 case SCTP_CMD_ECN_CWR:
1487                         /* Do delayed CWR processing.  */
1488                         sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1489                         break;
1490
1491                 case SCTP_CMD_SETUP_T2:
1492                         sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1493                         break;
1494
1495                 case SCTP_CMD_TIMER_START_ONCE:
1496                         timer = &asoc->timers[cmd->obj.to];
1497
1498                         if (timer_pending(timer))
1499                                 break;
1500                         /* fall through */
1501
1502                 case SCTP_CMD_TIMER_START:
1503                         timer = &asoc->timers[cmd->obj.to];
1504                         timeout = asoc->timeouts[cmd->obj.to];
1505                         BUG_ON(!timeout);
1506
1507                         timer->expires = jiffies + timeout;
1508                         sctp_association_hold(asoc);
1509                         add_timer(timer);
1510                         break;
1511
1512                 case SCTP_CMD_TIMER_RESTART:
1513                         timer = &asoc->timers[cmd->obj.to];
1514                         timeout = asoc->timeouts[cmd->obj.to];
1515                         if (!mod_timer(timer, jiffies + timeout))
1516                                 sctp_association_hold(asoc);
1517                         break;
1518
1519                 case SCTP_CMD_TIMER_STOP:
1520                         timer = &asoc->timers[cmd->obj.to];
1521                         if (del_timer(timer))
1522                                 sctp_association_put(asoc);
1523                         break;
1524
1525                 case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1526                         chunk = cmd->obj.chunk;
1527                         t = sctp_assoc_choose_alter_transport(asoc,
1528                                                 asoc->init_last_sent_to);
1529                         asoc->init_last_sent_to = t;
1530                         chunk->transport = t;
1531                         t->init_sent_count++;
1532                         /* Set the new transport as primary */
1533                         sctp_assoc_set_primary(asoc, t);
1534                         break;
1535
1536                 case SCTP_CMD_INIT_RESTART:
1537                         /* Do the needed accounting and updates
1538                          * associated with restarting an initialization
1539                          * timer. Only multiply the timeout by two if
1540                          * all transports have been tried at the current
1541                          * timeout.
1542                          */
1543                         sctp_cmd_t1_timer_update(asoc,
1544                                                 SCTP_EVENT_TIMEOUT_T1_INIT,
1545                                                 "INIT");
1546
1547                         sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
1548                                         SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
1549                         break;
1550
1551                 case SCTP_CMD_COOKIEECHO_RESTART:
1552                         /* Do the needed accounting and updates
1553                          * associated with restarting an initialization
1554                          * timer. Only multiply the timeout by two if
1555                          * all transports have been tried at the current
1556                          * timeout.
1557                          */
1558                         sctp_cmd_t1_timer_update(asoc,
1559                                                 SCTP_EVENT_TIMEOUT_T1_COOKIE,
1560                                                 "COOKIE");
1561
1562                         /* If we've sent any data bundled with
1563                          * COOKIE-ECHO we need to resend.
1564                          */
1565                         list_for_each_entry(t, &asoc->peer.transport_addr_list,
1566                                         transports) {
1567                                 sctp_retransmit_mark(&asoc->outqueue, t,
1568                                             SCTP_RTXR_T1_RTX);
1569                         }
1570
1571                         sctp_add_cmd_sf(commands,
1572                                         SCTP_CMD_TIMER_RESTART,
1573                                         SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1574                         break;
1575
1576                 case SCTP_CMD_INIT_FAILED:
1577                         sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
1578                         break;
1579
1580                 case SCTP_CMD_ASSOC_FAILED:
1581                         sctp_cmd_assoc_failed(commands, asoc, event_type,
1582                                               subtype, chunk, cmd->obj.err);
1583                         break;
1584
1585                 case SCTP_CMD_INIT_COUNTER_INC:
1586                         asoc->init_err_counter++;
1587                         break;
1588
1589                 case SCTP_CMD_INIT_COUNTER_RESET:
1590                         asoc->init_err_counter = 0;
1591                         asoc->init_cycle = 0;
1592                         list_for_each_entry(t, &asoc->peer.transport_addr_list,
1593                                             transports) {
1594                                 t->init_sent_count = 0;
1595                         }
1596                         break;
1597
1598                 case SCTP_CMD_REPORT_DUP:
1599                         sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1600                                              cmd->obj.u32);
1601                         break;
1602
1603                 case SCTP_CMD_REPORT_BAD_TAG:
1604                         SCTP_DEBUG_PRINTK("vtag mismatch!\n");
1605                         break;
1606
1607                 case SCTP_CMD_STRIKE:
1608                         /* Mark one strike against a transport.  */
1609                         sctp_do_8_2_transport_strike(commands, asoc,
1610                                                     cmd->obj.transport, 0);
1611                         break;
1612
1613                 case SCTP_CMD_TRANSPORT_IDLE:
1614                         t = cmd->obj.transport;
1615                         sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
1616                         break;
1617
1618                 case SCTP_CMD_TRANSPORT_HB_SENT:
1619                         t = cmd->obj.transport;
1620                         sctp_do_8_2_transport_strike(commands, asoc,
1621                                                      t, 1);
1622                         t->hb_sent = 1;
1623                         break;
1624
1625                 case SCTP_CMD_TRANSPORT_ON:
1626                         t = cmd->obj.transport;
1627                         sctp_cmd_transport_on(commands, asoc, t, chunk);
1628                         break;
1629
1630                 case SCTP_CMD_HB_TIMERS_START:
1631                         sctp_cmd_hb_timers_start(commands, asoc);
1632                         break;
1633
1634                 case SCTP_CMD_HB_TIMER_UPDATE:
1635                         t = cmd->obj.transport;
1636                         sctp_cmd_hb_timer_update(commands, t);
1637                         break;
1638
1639                 case SCTP_CMD_HB_TIMERS_STOP:
1640                         sctp_cmd_hb_timers_stop(commands, asoc);
1641                         break;
1642
1643                 case SCTP_CMD_REPORT_ERROR:
1644                         error = cmd->obj.error;
1645                         break;
1646
1647                 case SCTP_CMD_PROCESS_CTSN:
1648                         /* Dummy up a SACK for processing. */
1649                         sackh.cum_tsn_ack = cmd->obj.be32;
1650                         sackh.a_rwnd = asoc->peer.rwnd +
1651                                         asoc->outqueue.outstanding_bytes;
1652                         sackh.num_gap_ack_blocks = 0;
1653                         sackh.num_dup_tsns = 0;
1654                         chunk->subh.sack_hdr = &sackh;
1655                         sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
1656                                         SCTP_CHUNK(chunk));
1657                         break;
1658
1659                 case SCTP_CMD_DISCARD_PACKET:
1660                         /* We need to discard the whole packet.
1661                          * Uncork the queue since there might be
1662                          * responses pending
1663                          */
1664                         chunk->pdiscard = 1;
1665                         if (asoc) {
1666                                 sctp_outq_uncork(&asoc->outqueue);
1667                                 local_cork = 0;
1668                         }
1669                         break;
1670
1671                 case SCTP_CMD_RTO_PENDING:
1672                         t = cmd->obj.transport;
1673                         t->rto_pending = 1;
1674                         break;
1675
1676                 case SCTP_CMD_PART_DELIVER:
1677                         sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
1678                         break;
1679
1680                 case SCTP_CMD_RENEGE:
1681                         sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
1682                                          GFP_ATOMIC);
1683                         break;
1684
1685                 case SCTP_CMD_SETUP_T4:
1686                         sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1687                         break;
1688
1689                 case SCTP_CMD_PROCESS_OPERR:
1690                         sctp_cmd_process_operr(commands, asoc, chunk);
1691                         break;
1692                 case SCTP_CMD_CLEAR_INIT_TAG:
1693                         asoc->peer.i.init_tag = 0;
1694                         break;
1695                 case SCTP_CMD_DEL_NON_PRIMARY:
1696                         sctp_cmd_del_non_primary(asoc);
1697                         break;
1698                 case SCTP_CMD_T3_RTX_TIMERS_STOP:
1699                         sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1700                         break;
1701                 case SCTP_CMD_FORCE_PRIM_RETRAN:
1702                         t = asoc->peer.retran_path;
1703                         asoc->peer.retran_path = asoc->peer.primary_path;
1704                         error = sctp_outq_uncork(&asoc->outqueue);
1705                         local_cork = 0;
1706                         asoc->peer.retran_path = t;
1707                         break;
1708                 case SCTP_CMD_SET_SK_ERR:
1709                         sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1710                         break;
1711                 case SCTP_CMD_ASSOC_CHANGE:
1712                         sctp_cmd_assoc_change(commands, asoc,
1713                                               cmd->obj.u8);
1714                         break;
1715                 case SCTP_CMD_ADAPTATION_IND:
1716                         sctp_cmd_adaptation_ind(commands, asoc);
1717                         break;
1718
1719                 case SCTP_CMD_ASSOC_SHKEY:
1720                         error = sctp_auth_asoc_init_active_key(asoc,
1721                                                 GFP_ATOMIC);
1722                         break;
1723                 case SCTP_CMD_UPDATE_INITTAG:
1724                         asoc->peer.i.init_tag = cmd->obj.u32;
1725                         break;
1726                 case SCTP_CMD_SEND_MSG:
1727                         if (!asoc->outqueue.cork) {
1728                                 sctp_outq_cork(&asoc->outqueue);
1729                                 local_cork = 1;
1730                         }
1731                         error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
1732                         break;
1733                 case SCTP_CMD_SEND_NEXT_ASCONF:
1734                         sctp_cmd_send_asconf(asoc);
1735                         break;
1736                 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1737                         sctp_asconf_queue_teardown(asoc);
1738                         break;
1739
1740                 case SCTP_CMD_SET_ASOC:
1741                         asoc = cmd->obj.asoc;
1742                         break;
1743
1744                 default:
1745                         pr_warn("Impossible command: %u\n",
1746                                 cmd->verb);
1747                         break;
1748                 }
1749
1750                 if (error)
1751                         break;
1752         }
1753
1754 out:
1755         /* If this is in response to a received chunk, wait until
1756          * we are done with the packet to open the queue so that we don't
1757          * send multiple packets in response to a single request.
1758          */
1759         if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1760                 if (chunk->end_of_packet || chunk->singleton)
1761                         error = sctp_outq_uncork(&asoc->outqueue);
1762         } else if (local_cork)
1763                 error = sctp_outq_uncork(&asoc->outqueue);
1764         return error;
1765 nomem:
1766         error = -ENOMEM;
1767         goto out;
1768 }
1769