[ICSK] compat: Introduce inet_csk_compat_[gs]etsockopt
[firefly-linux-kernel-4.4.55.git] / net / dccp / proto.c
1 /*
2  *  net/dccp/proto.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *      This program is free software; you can redistribute it and/or modify it
8  *      under the terms of the GNU General Public License version 2 as
9  *      published by the Free Software Foundation.
10  */
11
12 #include <linux/config.h>
13 #include <linux/dccp.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
20 #include <linux/in.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/random.h>
24 #include <net/checksum.h>
25
26 #include <net/inet_sock.h>
27 #include <net/sock.h>
28 #include <net/xfrm.h>
29
30 #include <asm/semaphore.h>
31 #include <linux/spinlock.h>
32 #include <linux/timer.h>
33 #include <linux/delay.h>
34 #include <linux/poll.h>
35
36 #include "ccid.h"
37 #include "dccp.h"
38 #include "feat.h"
39
40 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
41
42 EXPORT_SYMBOL_GPL(dccp_statistics);
43
44 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45
46 EXPORT_SYMBOL_GPL(dccp_orphan_count);
47
48 struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
49         .lhash_lock     = RW_LOCK_UNLOCKED,
50         .lhash_users    = ATOMIC_INIT(0),
51         .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
52 };
53
54 EXPORT_SYMBOL_GPL(dccp_hashinfo);
55
56 void dccp_set_state(struct sock *sk, const int state)
57 {
58         const int oldstate = sk->sk_state;
59
60         dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
61                       dccp_role(sk), sk,
62                       dccp_state_name(oldstate), dccp_state_name(state));
63         WARN_ON(state == oldstate);
64
65         switch (state) {
66         case DCCP_OPEN:
67                 if (oldstate != DCCP_OPEN)
68                         DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
69                 break;
70
71         case DCCP_CLOSED:
72                 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
73                         DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
74
75                 sk->sk_prot->unhash(sk);
76                 if (inet_csk(sk)->icsk_bind_hash != NULL &&
77                     !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
78                         inet_put_port(&dccp_hashinfo, sk);
79                 /* fall through */
80         default:
81                 if (oldstate == DCCP_OPEN)
82                         DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
83         }
84
85         /* Change state AFTER socket is unhashed to avoid closed
86          * socket sitting in hash tables.
87          */
88         sk->sk_state = state;
89 }
90
91 EXPORT_SYMBOL_GPL(dccp_set_state);
92
93 void dccp_done(struct sock *sk)
94 {
95         dccp_set_state(sk, DCCP_CLOSED);
96         dccp_clear_xmit_timers(sk);
97
98         sk->sk_shutdown = SHUTDOWN_MASK;
99
100         if (!sock_flag(sk, SOCK_DEAD))
101                 sk->sk_state_change(sk);
102         else
103                 inet_csk_destroy_sock(sk);
104 }
105
106 EXPORT_SYMBOL_GPL(dccp_done);
107
108 const char *dccp_packet_name(const int type)
109 {
110         static const char *dccp_packet_names[] = {
111                 [DCCP_PKT_REQUEST]  = "REQUEST",
112                 [DCCP_PKT_RESPONSE] = "RESPONSE",
113                 [DCCP_PKT_DATA]     = "DATA",
114                 [DCCP_PKT_ACK]      = "ACK",
115                 [DCCP_PKT_DATAACK]  = "DATAACK",
116                 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
117                 [DCCP_PKT_CLOSE]    = "CLOSE",
118                 [DCCP_PKT_RESET]    = "RESET",
119                 [DCCP_PKT_SYNC]     = "SYNC",
120                 [DCCP_PKT_SYNCACK]  = "SYNCACK",
121         };
122
123         if (type >= DCCP_NR_PKT_TYPES)
124                 return "INVALID";
125         else
126                 return dccp_packet_names[type];
127 }
128
129 EXPORT_SYMBOL_GPL(dccp_packet_name);
130
131 const char *dccp_state_name(const int state)
132 {
133         static char *dccp_state_names[] = {
134         [DCCP_OPEN]       = "OPEN",
135         [DCCP_REQUESTING] = "REQUESTING",
136         [DCCP_PARTOPEN]   = "PARTOPEN",
137         [DCCP_LISTEN]     = "LISTEN",
138         [DCCP_RESPOND]    = "RESPOND",
139         [DCCP_CLOSING]    = "CLOSING",
140         [DCCP_TIME_WAIT]  = "TIME_WAIT",
141         [DCCP_CLOSED]     = "CLOSED",
142         };
143
144         if (state >= DCCP_MAX_STATES)
145                 return "INVALID STATE!";
146         else
147                 return dccp_state_names[state];
148 }
149
150 EXPORT_SYMBOL_GPL(dccp_state_name);
151
152 void dccp_hash(struct sock *sk)
153 {
154         inet_hash(&dccp_hashinfo, sk);
155 }
156
157 EXPORT_SYMBOL_GPL(dccp_hash);
158
159 void dccp_unhash(struct sock *sk)
160 {
161         inet_unhash(&dccp_hashinfo, sk);
162 }
163
164 EXPORT_SYMBOL_GPL(dccp_unhash);
165
166 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
167 {
168         struct dccp_sock *dp = dccp_sk(sk);
169         struct inet_connection_sock *icsk = inet_csk(sk);
170
171         dccp_options_init(&dp->dccps_options);
172         do_gettimeofday(&dp->dccps_epoch);
173
174         /*
175          * FIXME: We're hardcoding the CCID, and doing this at this point makes
176          * the listening (master) sock get CCID control blocks, which is not
177          * necessary, but for now, to not mess with the test userspace apps,
178          * lets leave it here, later the real solution is to do this in a
179          * setsockopt(CCIDs-I-want/accept). -acme
180          */
181         if (likely(ctl_sock_initialized)) {
182                 int rc = dccp_feat_init(sk);
183
184                 if (rc)
185                         return rc;
186
187                 if (dp->dccps_options.dccpo_send_ack_vector) {
188                         dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(GFP_KERNEL);
189                         if (dp->dccps_hc_rx_ackvec == NULL)
190                                 return -ENOMEM;
191                 }
192                 dp->dccps_hc_rx_ccid =
193                                 ccid_hc_rx_new(dp->dccps_options.dccpo_rx_ccid,
194                                                sk, GFP_KERNEL);
195                 dp->dccps_hc_tx_ccid =
196                                 ccid_hc_tx_new(dp->dccps_options.dccpo_tx_ccid,
197                                                sk, GFP_KERNEL);
198                 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
199                              dp->dccps_hc_tx_ccid == NULL)) {
200                         ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
201                         ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
202                         if (dp->dccps_options.dccpo_send_ack_vector) {
203                                 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
204                                 dp->dccps_hc_rx_ackvec = NULL;
205                         }
206                         dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
207                         return -ENOMEM;
208                 }
209         } else {
210                 /* control socket doesn't need feat nego */
211                 INIT_LIST_HEAD(&dp->dccps_options.dccpo_pending);
212                 INIT_LIST_HEAD(&dp->dccps_options.dccpo_conf);
213         }
214
215         dccp_init_xmit_timers(sk);
216         icsk->icsk_rto          = DCCP_TIMEOUT_INIT;
217         sk->sk_state            = DCCP_CLOSED;
218         sk->sk_write_space      = dccp_write_space;
219         icsk->icsk_sync_mss     = dccp_sync_mss;
220         dp->dccps_mss_cache     = 536;
221         dp->dccps_role          = DCCP_ROLE_UNDEFINED;
222         dp->dccps_service       = DCCP_SERVICE_INVALID_VALUE;
223         dp->dccps_l_ack_ratio   = dp->dccps_r_ack_ratio = 1;
224
225         return 0;
226 }
227
228 EXPORT_SYMBOL_GPL(dccp_init_sock);
229
230 int dccp_destroy_sock(struct sock *sk)
231 {
232         struct dccp_sock *dp = dccp_sk(sk);
233
234         /*
235          * DCCP doesn't use sk_write_queue, just sk_send_head
236          * for retransmissions
237          */
238         if (sk->sk_send_head != NULL) {
239                 kfree_skb(sk->sk_send_head);
240                 sk->sk_send_head = NULL;
241         }
242
243         /* Clean up a referenced DCCP bind bucket. */
244         if (inet_csk(sk)->icsk_bind_hash != NULL)
245                 inet_put_port(&dccp_hashinfo, sk);
246
247         kfree(dp->dccps_service_list);
248         dp->dccps_service_list = NULL;
249
250         if (dp->dccps_options.dccpo_send_ack_vector) {
251                 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
252                 dp->dccps_hc_rx_ackvec = NULL;
253         }
254         ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
255         ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
256         dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
257
258         /* clean up feature negotiation state */
259         dccp_feat_clean(sk);
260
261         return 0;
262 }
263
264 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
265
266 static inline int dccp_listen_start(struct sock *sk)
267 {
268         struct dccp_sock *dp = dccp_sk(sk);
269
270         dp->dccps_role = DCCP_ROLE_LISTEN;
271         /*
272          * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
273          * before calling listen()
274          */
275         if (dccp_service_not_initialized(sk))
276                 return -EPROTO;
277         return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
278 }
279
280 int dccp_disconnect(struct sock *sk, int flags)
281 {
282         struct inet_connection_sock *icsk = inet_csk(sk);
283         struct inet_sock *inet = inet_sk(sk);
284         int err = 0;
285         const int old_state = sk->sk_state;
286
287         if (old_state != DCCP_CLOSED)
288                 dccp_set_state(sk, DCCP_CLOSED);
289
290         /* ABORT function of RFC793 */
291         if (old_state == DCCP_LISTEN) {
292                 inet_csk_listen_stop(sk);
293         /* FIXME: do the active reset thing */
294         } else if (old_state == DCCP_REQUESTING)
295                 sk->sk_err = ECONNRESET;
296
297         dccp_clear_xmit_timers(sk);
298         __skb_queue_purge(&sk->sk_receive_queue);
299         if (sk->sk_send_head != NULL) {
300                 __kfree_skb(sk->sk_send_head);
301                 sk->sk_send_head = NULL;
302         }
303
304         inet->dport = 0;
305
306         if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
307                 inet_reset_saddr(sk);
308
309         sk->sk_shutdown = 0;
310         sock_reset_flag(sk, SOCK_DONE);
311
312         icsk->icsk_backoff = 0;
313         inet_csk_delack_init(sk);
314         __sk_dst_reset(sk);
315
316         BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
317
318         sk->sk_error_report(sk);
319         return err;
320 }
321
322 EXPORT_SYMBOL_GPL(dccp_disconnect);
323
324 /*
325  *      Wait for a DCCP event.
326  *
327  *      Note that we don't need to lock the socket, as the upper poll layers
328  *      take care of normal races (between the test and the event) and we don't
329  *      go look at any of the socket buffers directly.
330  */
331 unsigned int dccp_poll(struct file *file, struct socket *sock,
332                        poll_table *wait)
333 {
334         unsigned int mask;
335         struct sock *sk = sock->sk;
336
337         poll_wait(file, sk->sk_sleep, wait);
338         if (sk->sk_state == DCCP_LISTEN)
339                 return inet_csk_listen_poll(sk);
340
341         /* Socket is not locked. We are protected from async events
342            by poll logic and correct handling of state changes
343            made by another threads is impossible in any case.
344          */
345
346         mask = 0;
347         if (sk->sk_err)
348                 mask = POLLERR;
349
350         if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
351                 mask |= POLLHUP;
352         if (sk->sk_shutdown & RCV_SHUTDOWN)
353                 mask |= POLLIN | POLLRDNORM;
354
355         /* Connected? */
356         if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
357                 if (atomic_read(&sk->sk_rmem_alloc) > 0)
358                         mask |= POLLIN | POLLRDNORM;
359
360                 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
361                         if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
362                                 mask |= POLLOUT | POLLWRNORM;
363                         } else {  /* send SIGIO later */
364                                 set_bit(SOCK_ASYNC_NOSPACE,
365                                         &sk->sk_socket->flags);
366                                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
367
368                                 /* Race breaker. If space is freed after
369                                  * wspace test but before the flags are set,
370                                  * IO signal will be lost.
371                                  */
372                                 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
373                                         mask |= POLLOUT | POLLWRNORM;
374                         }
375                 }
376         }
377         return mask;
378 }
379
380 EXPORT_SYMBOL_GPL(dccp_poll);
381
382 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
383 {
384         dccp_pr_debug("entry\n");
385         return -ENOIOCTLCMD;
386 }
387
388 EXPORT_SYMBOL_GPL(dccp_ioctl);
389
390 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
391                                    char __user *optval, int optlen)
392 {
393         struct dccp_sock *dp = dccp_sk(sk);
394         struct dccp_service_list *sl = NULL;
395
396         if (service == DCCP_SERVICE_INVALID_VALUE || 
397             optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
398                 return -EINVAL;
399
400         if (optlen > sizeof(service)) {
401                 sl = kmalloc(optlen, GFP_KERNEL);
402                 if (sl == NULL)
403                         return -ENOMEM;
404
405                 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
406                 if (copy_from_user(sl->dccpsl_list,
407                                    optval + sizeof(service),
408                                    optlen - sizeof(service)) ||
409                     dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
410                         kfree(sl);
411                         return -EFAULT;
412                 }
413         }
414
415         lock_sock(sk);
416         dp->dccps_service = service;
417
418         kfree(dp->dccps_service_list);
419
420         dp->dccps_service_list = sl;
421         release_sock(sk);
422         return 0;
423 }
424
425 /* byte 1 is feature.  the rest is the preference list */
426 static int dccp_setsockopt_change(struct sock *sk, int type,
427                                   struct dccp_so_feat __user *optval)
428 {
429         struct dccp_so_feat opt;
430         u8 *val;
431         int rc;
432
433         if (copy_from_user(&opt, optval, sizeof(opt)))
434                 return -EFAULT;
435
436         val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
437         if (!val)
438                 return -ENOMEM;
439
440         if (copy_from_user(val, opt.dccpsf_val, opt.dccpsf_len)) {
441                 rc = -EFAULT;
442                 goto out_free_val;
443         }
444
445         rc = dccp_feat_change(sk, type, opt.dccpsf_feat, val, opt.dccpsf_len,
446                               GFP_KERNEL);
447         if (rc)
448                 goto out_free_val;
449
450 out:
451         return rc;
452
453 out_free_val:
454         kfree(val);
455         goto out;
456 }
457
458 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
459                 char __user *optval, int optlen)
460 {
461         struct dccp_sock *dp;
462         int err;
463         int val;
464
465         if (optlen < sizeof(int))
466                 return -EINVAL;
467
468         if (get_user(val, (int __user *)optval))
469                 return -EFAULT;
470
471         if (optname == DCCP_SOCKOPT_SERVICE)
472                 return dccp_setsockopt_service(sk, val, optval, optlen);
473
474         lock_sock(sk);
475         dp = dccp_sk(sk);
476         err = 0;
477
478         switch (optname) {
479         case DCCP_SOCKOPT_PACKET_SIZE:
480                 dp->dccps_packet_size = val;
481                 break;
482
483         case DCCP_SOCKOPT_CHANGE_L:
484                 if (optlen != sizeof(struct dccp_so_feat))
485                         err = -EINVAL;
486                 else
487                         err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
488                                                      (struct dccp_so_feat *)
489                                                      optval);
490                 break;
491
492         case DCCP_SOCKOPT_CHANGE_R:
493                 if (optlen != sizeof(struct dccp_so_feat))
494                         err = -EINVAL;
495                 else
496                         err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
497                                                      (struct dccp_so_feat *)
498                                                      optval);
499                 break;
500
501         default:
502                 err = -ENOPROTOOPT;
503                 break;
504         }
505         
506         release_sock(sk);
507         return err;
508 }
509
510 int dccp_setsockopt(struct sock *sk, int level, int optname,
511                     char __user *optval, int optlen)
512 {
513         if (level != SOL_DCCP)
514                 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
515                                                              optname, optval,
516                                                              optlen);
517         return do_dccp_setsockopt(sk, level, optname, optval, optlen);
518 }
519 EXPORT_SYMBOL_GPL(dccp_setsockopt);
520
521 #ifdef CONFIG_COMPAT
522 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
523                     char __user *optval, int optlen)
524 {
525         if (level != SOL_DCCP)
526                 return inet_csk_compat_setsockopt(sk, level, optname,
527                                                   optval, optlen);
528
529         return do_dccp_setsockopt(sk, level, optname, optval, optlen);
530 }
531 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
532 #endif
533
534 static int dccp_getsockopt_service(struct sock *sk, int len,
535                                    __be32 __user *optval,
536                                    int __user *optlen)
537 {
538         const struct dccp_sock *dp = dccp_sk(sk);
539         const struct dccp_service_list *sl;
540         int err = -ENOENT, slen = 0, total_len = sizeof(u32);
541
542         lock_sock(sk);
543         if (dccp_service_not_initialized(sk))
544                 goto out;
545
546         if ((sl = dp->dccps_service_list) != NULL) {
547                 slen = sl->dccpsl_nr * sizeof(u32);
548                 total_len += slen;
549         }
550
551         err = -EINVAL;
552         if (total_len > len)
553                 goto out;
554
555         err = 0;
556         if (put_user(total_len, optlen) ||
557             put_user(dp->dccps_service, optval) ||
558             (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
559                 err = -EFAULT;
560 out:
561         release_sock(sk);
562         return err;
563 }
564
565 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
566                     char __user *optval, int __user *optlen)
567 {
568         struct dccp_sock *dp;
569         int val, len;
570
571         if (get_user(len, optlen))
572                 return -EFAULT;
573
574         if (len < sizeof(int))
575                 return -EINVAL;
576
577         dp = dccp_sk(sk);
578
579         switch (optname) {
580         case DCCP_SOCKOPT_PACKET_SIZE:
581                 val = dp->dccps_packet_size;
582                 len = sizeof(dp->dccps_packet_size);
583                 break;
584         case DCCP_SOCKOPT_SERVICE:
585                 return dccp_getsockopt_service(sk, len,
586                                                (__be32 __user *)optval, optlen);
587         case 128 ... 191:
588                 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
589                                              len, (u32 __user *)optval, optlen);
590         case 192 ... 255:
591                 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
592                                              len, (u32 __user *)optval, optlen);
593         default:
594                 return -ENOPROTOOPT;
595         }
596
597         if (put_user(len, optlen) || copy_to_user(optval, &val, len))
598                 return -EFAULT;
599
600         return 0;
601 }
602
603 int dccp_getsockopt(struct sock *sk, int level, int optname,
604                     char __user *optval, int __user *optlen)
605 {
606         if (level != SOL_DCCP)
607                 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
608                                                              optname, optval,
609                                                              optlen);
610         return do_dccp_getsockopt(sk, level, optname, optval, optlen);
611 }
612 EXPORT_SYMBOL_GPL(dccp_getsockopt);
613
614 #ifdef CONFIG_COMPAT
615 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
616                     char __user *optval, int __user *optlen)
617 {
618         if (level != SOL_DCCP)
619                 return inet_csk_compat_getsockopt(sk, level, optname,
620                                                   optval, optlen);
621         return do_dccp_getsockopt(sk, level, optname, optval, optlen);
622 }
623 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
624 #endif
625
626 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
627                  size_t len)
628 {
629         const struct dccp_sock *dp = dccp_sk(sk);
630         const int flags = msg->msg_flags;
631         const int noblock = flags & MSG_DONTWAIT;
632         struct sk_buff *skb;
633         int rc, size;
634         long timeo;
635
636         if (len > dp->dccps_mss_cache)
637                 return -EMSGSIZE;
638
639         lock_sock(sk);
640         timeo = sock_sndtimeo(sk, noblock);
641
642         /*
643          * We have to use sk_stream_wait_connect here to set sk_write_pending,
644          * so that the trick in dccp_rcv_request_sent_state_process.
645          */
646         /* Wait for a connection to finish. */
647         if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
648                 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
649                         goto out_release;
650
651         size = sk->sk_prot->max_header + len;
652         release_sock(sk);
653         skb = sock_alloc_send_skb(sk, size, noblock, &rc);
654         lock_sock(sk);
655         if (skb == NULL)
656                 goto out_release;
657
658         skb_reserve(skb, sk->sk_prot->max_header);
659         rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
660         if (rc != 0)
661                 goto out_discard;
662
663         rc = dccp_write_xmit(sk, skb, &timeo);
664         /*
665          * XXX we don't use sk_write_queue, so just discard the packet.
666          *     Current plan however is to _use_ sk_write_queue with
667          *     an algorith similar to tcp_sendmsg, where the main difference
668          *     is that in DCCP we have to respect packet boundaries, so
669          *     no coalescing of skbs.
670          *
671          *     This bug was _quickly_ found & fixed by just looking at an OSTRA
672          *     generated callgraph 8) -acme
673          */
674 out_release:
675         release_sock(sk);
676         return rc ? : len;
677 out_discard:
678         kfree_skb(skb);
679         goto out_release;
680 }
681
682 EXPORT_SYMBOL_GPL(dccp_sendmsg);
683
684 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
685                  size_t len, int nonblock, int flags, int *addr_len)
686 {
687         const struct dccp_hdr *dh;
688         long timeo;
689
690         lock_sock(sk);
691
692         if (sk->sk_state == DCCP_LISTEN) {
693                 len = -ENOTCONN;
694                 goto out;
695         }
696
697         timeo = sock_rcvtimeo(sk, nonblock);
698
699         do {
700                 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
701
702                 if (skb == NULL)
703                         goto verify_sock_status;
704
705                 dh = dccp_hdr(skb);
706
707                 if (dh->dccph_type == DCCP_PKT_DATA ||
708                     dh->dccph_type == DCCP_PKT_DATAACK)
709                         goto found_ok_skb;
710
711                 if (dh->dccph_type == DCCP_PKT_RESET ||
712                     dh->dccph_type == DCCP_PKT_CLOSE) {
713                         dccp_pr_debug("found fin ok!\n");
714                         len = 0;
715                         goto found_fin_ok;
716                 }
717                 dccp_pr_debug("packet_type=%s\n",
718                               dccp_packet_name(dh->dccph_type));
719                 sk_eat_skb(sk, skb);
720 verify_sock_status:
721                 if (sock_flag(sk, SOCK_DONE)) {
722                         len = 0;
723                         break;
724                 }
725
726                 if (sk->sk_err) {
727                         len = sock_error(sk);
728                         break;
729                 }
730
731                 if (sk->sk_shutdown & RCV_SHUTDOWN) {
732                         len = 0;
733                         break;
734                 }
735
736                 if (sk->sk_state == DCCP_CLOSED) {
737                         if (!sock_flag(sk, SOCK_DONE)) {
738                                 /* This occurs when user tries to read
739                                  * from never connected socket.
740                                  */
741                                 len = -ENOTCONN;
742                                 break;
743                         }
744                         len = 0;
745                         break;
746                 }
747
748                 if (!timeo) {
749                         len = -EAGAIN;
750                         break;
751                 }
752
753                 if (signal_pending(current)) {
754                         len = sock_intr_errno(timeo);
755                         break;
756                 }
757
758                 sk_wait_data(sk, &timeo);
759                 continue;
760         found_ok_skb:
761                 if (len > skb->len)
762                         len = skb->len;
763                 else if (len < skb->len)
764                         msg->msg_flags |= MSG_TRUNC;
765
766                 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
767                         /* Exception. Bailout! */
768                         len = -EFAULT;
769                         break;
770                 }
771         found_fin_ok:
772                 if (!(flags & MSG_PEEK))
773                         sk_eat_skb(sk, skb);
774                 break;
775         } while (1);
776 out:
777         release_sock(sk);
778         return len;
779 }
780
781 EXPORT_SYMBOL_GPL(dccp_recvmsg);
782
783 int inet_dccp_listen(struct socket *sock, int backlog)
784 {
785         struct sock *sk = sock->sk;
786         unsigned char old_state;
787         int err;
788
789         lock_sock(sk);
790
791         err = -EINVAL;
792         if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
793                 goto out;
794
795         old_state = sk->sk_state;
796         if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
797                 goto out;
798
799         /* Really, if the socket is already in listen state
800          * we can only allow the backlog to be adjusted.
801          */
802         if (old_state != DCCP_LISTEN) {
803                 /*
804                  * FIXME: here it probably should be sk->sk_prot->listen_start
805                  * see tcp_listen_start
806                  */
807                 err = dccp_listen_start(sk);
808                 if (err)
809                         goto out;
810         }
811         sk->sk_max_ack_backlog = backlog;
812         err = 0;
813
814 out:
815         release_sock(sk);
816         return err;
817 }
818
819 EXPORT_SYMBOL_GPL(inet_dccp_listen);
820
821 static const unsigned char dccp_new_state[] = {
822         /* current state:   new state:      action:     */
823         [0]               = DCCP_CLOSED,
824         [DCCP_OPEN]       = DCCP_CLOSING | DCCP_ACTION_FIN,
825         [DCCP_REQUESTING] = DCCP_CLOSED,
826         [DCCP_PARTOPEN]   = DCCP_CLOSING | DCCP_ACTION_FIN,
827         [DCCP_LISTEN]     = DCCP_CLOSED,
828         [DCCP_RESPOND]    = DCCP_CLOSED,
829         [DCCP_CLOSING]    = DCCP_CLOSED,
830         [DCCP_TIME_WAIT]  = DCCP_CLOSED,
831         [DCCP_CLOSED]     = DCCP_CLOSED,
832 };
833
834 static int dccp_close_state(struct sock *sk)
835 {
836         const int next = dccp_new_state[sk->sk_state];
837         const int ns = next & DCCP_STATE_MASK;
838
839         if (ns != sk->sk_state)
840                 dccp_set_state(sk, ns);
841
842         return next & DCCP_ACTION_FIN;
843 }
844
845 void dccp_close(struct sock *sk, long timeout)
846 {
847         struct sk_buff *skb;
848
849         lock_sock(sk);
850
851         sk->sk_shutdown = SHUTDOWN_MASK;
852
853         if (sk->sk_state == DCCP_LISTEN) {
854                 dccp_set_state(sk, DCCP_CLOSED);
855
856                 /* Special case. */
857                 inet_csk_listen_stop(sk);
858
859                 goto adjudge_to_death;
860         }
861
862         /*
863          * We need to flush the recv. buffs.  We do this only on the
864          * descriptor close, not protocol-sourced closes, because the
865           *reader process may not have drained the data yet!
866          */
867         /* FIXME: check for unread data */
868         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
869                 __kfree_skb(skb);
870         }
871
872         if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
873                 /* Check zero linger _after_ checking for unread data. */
874                 sk->sk_prot->disconnect(sk, 0);
875         } else if (dccp_close_state(sk)) {
876                 dccp_send_close(sk, 1);
877         }
878
879         sk_stream_wait_close(sk, timeout);
880
881 adjudge_to_death:
882         /*
883          * It is the last release_sock in its life. It will remove backlog.
884          */
885         release_sock(sk);
886         /*
887          * Now socket is owned by kernel and we acquire BH lock
888          * to finish close. No need to check for user refs.
889          */
890         local_bh_disable();
891         bh_lock_sock(sk);
892         BUG_TRAP(!sock_owned_by_user(sk));
893
894         sock_hold(sk);
895         sock_orphan(sk);
896
897         /*
898          * The last release_sock may have processed the CLOSE or RESET
899          * packet moving sock to CLOSED state, if not we have to fire
900          * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
901          * in draft-ietf-dccp-spec-11. -acme
902          */
903         if (sk->sk_state == DCCP_CLOSING) {
904                 /* FIXME: should start at 2 * RTT */
905                 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
906                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
907                                           inet_csk(sk)->icsk_rto,
908                                           DCCP_RTO_MAX);
909 #if 0
910                 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
911                 dccp_set_state(sk, DCCP_CLOSED);
912 #endif
913         }
914
915         atomic_inc(sk->sk_prot->orphan_count);
916         if (sk->sk_state == DCCP_CLOSED)
917                 inet_csk_destroy_sock(sk);
918
919         /* Otherwise, socket is reprieved until protocol close. */
920
921         bh_unlock_sock(sk);
922         local_bh_enable();
923         sock_put(sk);
924 }
925
926 EXPORT_SYMBOL_GPL(dccp_close);
927
928 void dccp_shutdown(struct sock *sk, int how)
929 {
930         dccp_pr_debug("entry\n");
931 }
932
933 EXPORT_SYMBOL_GPL(dccp_shutdown);
934
935 static int __init dccp_mib_init(void)
936 {
937         int rc = -ENOMEM;
938
939         dccp_statistics[0] = alloc_percpu(struct dccp_mib);
940         if (dccp_statistics[0] == NULL)
941                 goto out;
942
943         dccp_statistics[1] = alloc_percpu(struct dccp_mib);
944         if (dccp_statistics[1] == NULL)
945                 goto out_free_one;
946
947         rc = 0;
948 out:
949         return rc;
950 out_free_one:
951         free_percpu(dccp_statistics[0]);
952         dccp_statistics[0] = NULL;
953         goto out;
954
955 }
956
957 static void dccp_mib_exit(void)
958 {
959         free_percpu(dccp_statistics[0]);
960         free_percpu(dccp_statistics[1]);
961         dccp_statistics[0] = dccp_statistics[1] = NULL;
962 }
963
964 static int thash_entries;
965 module_param(thash_entries, int, 0444);
966 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
967
968 #ifdef CONFIG_IP_DCCP_DEBUG
969 int dccp_debug;
970 module_param(dccp_debug, int, 0444);
971 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
972
973 EXPORT_SYMBOL_GPL(dccp_debug);
974 #endif
975
976 static int __init dccp_init(void)
977 {
978         unsigned long goal;
979         int ehash_order, bhash_order, i;
980         int rc = -ENOBUFS;
981
982         dccp_hashinfo.bind_bucket_cachep =
983                 kmem_cache_create("dccp_bind_bucket",
984                                   sizeof(struct inet_bind_bucket), 0,
985                                   SLAB_HWCACHE_ALIGN, NULL, NULL);
986         if (!dccp_hashinfo.bind_bucket_cachep)
987                 goto out;
988
989         /*
990          * Size and allocate the main established and bind bucket
991          * hash tables.
992          *
993          * The methodology is similar to that of the buffer cache.
994          */
995         if (num_physpages >= (128 * 1024))
996                 goal = num_physpages >> (21 - PAGE_SHIFT);
997         else
998                 goal = num_physpages >> (23 - PAGE_SHIFT);
999
1000         if (thash_entries)
1001                 goal = (thash_entries *
1002                         sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1003         for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1004                 ;
1005         do {
1006                 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
1007                                         sizeof(struct inet_ehash_bucket);
1008                 dccp_hashinfo.ehash_size >>= 1;
1009                 while (dccp_hashinfo.ehash_size &
1010                        (dccp_hashinfo.ehash_size - 1))
1011                         dccp_hashinfo.ehash_size--;
1012                 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1013                         __get_free_pages(GFP_ATOMIC, ehash_order);
1014         } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1015
1016         if (!dccp_hashinfo.ehash) {
1017                 printk(KERN_CRIT "Failed to allocate DCCP "
1018                                  "established hash table\n");
1019                 goto out_free_bind_bucket_cachep;
1020         }
1021
1022         for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
1023                 rwlock_init(&dccp_hashinfo.ehash[i].lock);
1024                 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
1025         }
1026
1027         bhash_order = ehash_order;
1028
1029         do {
1030                 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1031                                         sizeof(struct inet_bind_hashbucket);
1032                 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1033                     bhash_order > 0)
1034                         continue;
1035                 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1036                         __get_free_pages(GFP_ATOMIC, bhash_order);
1037         } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1038
1039         if (!dccp_hashinfo.bhash) {
1040                 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
1041                 goto out_free_dccp_ehash;
1042         }
1043
1044         for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1045                 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1046                 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1047         }
1048
1049         rc = dccp_mib_init();
1050         if (rc)
1051                 goto out_free_dccp_bhash;
1052
1053         rc = dccp_ackvec_init();
1054         if (rc)
1055                 goto out_free_dccp_mib;
1056
1057         rc = dccp_sysctl_init();
1058         if (rc)
1059                 goto out_ackvec_exit;
1060 out:
1061         return rc;
1062 out_ackvec_exit:
1063         dccp_ackvec_exit();
1064 out_free_dccp_mib:
1065         dccp_mib_exit();
1066 out_free_dccp_bhash:
1067         free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1068         dccp_hashinfo.bhash = NULL;
1069 out_free_dccp_ehash:
1070         free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1071         dccp_hashinfo.ehash = NULL;
1072 out_free_bind_bucket_cachep:
1073         kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1074         dccp_hashinfo.bind_bucket_cachep = NULL;
1075         goto out;
1076 }
1077
1078 static void __exit dccp_fini(void)
1079 {
1080         dccp_mib_exit();
1081         free_pages((unsigned long)dccp_hashinfo.bhash,
1082                    get_order(dccp_hashinfo.bhash_size *
1083                              sizeof(struct inet_bind_hashbucket)));
1084         free_pages((unsigned long)dccp_hashinfo.ehash,
1085                    get_order(dccp_hashinfo.ehash_size *
1086                              sizeof(struct inet_ehash_bucket)));
1087         kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1088         dccp_ackvec_exit();
1089         dccp_sysctl_exit();
1090 }
1091
1092 module_init(dccp_init);
1093 module_exit(dccp_fini);
1094
1095 MODULE_LICENSE("GPL");
1096 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1097 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");