l2tp: purge session reorder queue on delete
[firefly-linux-kernel-4.4.55.git] / net / l2tp / l2tp_core.c
1 /*
2  * L2TP core.
3  *
4  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5  *
6  * This file contains some code of the original L2TPv2 pppol2tp
7  * driver, which has the following copyright:
8  *
9  * Authors:     Martijn van Oosterhout <kleptog@svana.org>
10  *              James Chapman (jchapman@katalix.com)
11  * Contributors:
12  *              Michal Ostrowski <mostrows@speakeasy.net>
13  *              Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14  *              David S. Miller (davem@redhat.com)
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License version 2 as
18  * published by the Free Software Foundation.
19  */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/string.h>
25 #include <linux/list.h>
26 #include <linux/rculist.h>
27 #include <linux/uaccess.h>
28
29 #include <linux/kernel.h>
30 #include <linux/spinlock.h>
31 #include <linux/kthread.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/errno.h>
35 #include <linux/jiffies.h>
36
37 #include <linux/netdevice.h>
38 #include <linux/net.h>
39 #include <linux/inetdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/udp.h>
45 #include <linux/l2tp.h>
46 #include <linux/hash.h>
47 #include <linux/sort.h>
48 #include <linux/file.h>
49 #include <linux/nsproxy.h>
50 #include <net/net_namespace.h>
51 #include <net/netns/generic.h>
52 #include <net/dst.h>
53 #include <net/ip.h>
54 #include <net/udp.h>
55 #include <net/inet_common.h>
56 #include <net/xfrm.h>
57 #include <net/protocol.h>
58 #include <net/inet6_connection_sock.h>
59 #include <net/inet_ecn.h>
60 #include <net/ip6_route.h>
61 #include <net/ip6_checksum.h>
62
63 #include <asm/byteorder.h>
64 #include <linux/atomic.h>
65
66 #include "l2tp_core.h"
67
68 #define L2TP_DRV_VERSION        "V2.0"
69
70 /* L2TP header constants */
71 #define L2TP_HDRFLAG_T     0x8000
72 #define L2TP_HDRFLAG_L     0x4000
73 #define L2TP_HDRFLAG_S     0x0800
74 #define L2TP_HDRFLAG_O     0x0200
75 #define L2TP_HDRFLAG_P     0x0100
76
77 #define L2TP_HDR_VER_MASK  0x000F
78 #define L2TP_HDR_VER_2     0x0002
79 #define L2TP_HDR_VER_3     0x0003
80
81 /* L2TPv3 default L2-specific sublayer */
82 #define L2TP_SLFLAG_S      0x40000000
83 #define L2TP_SL_SEQ_MASK   0x00ffffff
84
85 #define L2TP_HDR_SIZE_SEQ               10
86 #define L2TP_HDR_SIZE_NOSEQ             6
87
88 /* Default trace flags */
89 #define L2TP_DEFAULT_DEBUG_FLAGS        0
90
91 /* Private data stored for received packets in the skb.
92  */
93 struct l2tp_skb_cb {
94         u32                     ns;
95         u16                     has_seq;
96         u16                     length;
97         unsigned long           expires;
98 };
99
100 #define L2TP_SKB_CB(skb)        ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
101
102 static atomic_t l2tp_tunnel_count;
103 static atomic_t l2tp_session_count;
104 static struct workqueue_struct *l2tp_wq;
105
106 /* per-net private data for this module */
107 static unsigned int l2tp_net_id;
108 struct l2tp_net {
109         struct list_head l2tp_tunnel_list;
110         spinlock_t l2tp_tunnel_list_lock;
111         struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
112         spinlock_t l2tp_session_hlist_lock;
113 };
114
115 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117
118 static inline struct l2tp_net *l2tp_pernet(struct net *net)
119 {
120         BUG_ON(!net);
121
122         return net_generic(net, l2tp_net_id);
123 }
124
125 /* Tunnel reference counts. Incremented per session that is added to
126  * the tunnel.
127  */
128 static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
129 {
130         atomic_inc(&tunnel->ref_count);
131 }
132
133 static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
134 {
135         if (atomic_dec_and_test(&tunnel->ref_count))
136                 l2tp_tunnel_free(tunnel);
137 }
138 #ifdef L2TP_REFCNT_DEBUG
139 #define l2tp_tunnel_inc_refcount(_t)                                    \
140 do {                                                                    \
141         pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n",        \
142                  __func__, __LINE__, (_t)->name,                        \
143                  atomic_read(&_t->ref_count));                          \
144         l2tp_tunnel_inc_refcount_1(_t);                                 \
145 } while (0)
146 #define l2tp_tunnel_dec_refcount(_t)
147 do {                                                                    \
148         pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n",        \
149                  __func__, __LINE__, (_t)->name,                        \
150                  atomic_read(&_t->ref_count));                          \
151         l2tp_tunnel_dec_refcount_1(_t);                                 \
152 } while (0)
153 #else
154 #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
155 #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
156 #endif
157
158 /* Session hash global list for L2TPv3.
159  * The session_id SHOULD be random according to RFC3931, but several
160  * L2TP implementations use incrementing session_ids.  So we do a real
161  * hash on the session_id, rather than a simple bitmask.
162  */
163 static inline struct hlist_head *
164 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
165 {
166         return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
167
168 }
169
170 /* Lookup the tunnel socket, possibly involving the fs code if the socket is
171  * owned by userspace.  A struct sock returned from this function must be
172  * released using l2tp_tunnel_sock_put once you're done with it.
173  */
174 struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
175 {
176         int err = 0;
177         struct socket *sock = NULL;
178         struct sock *sk = NULL;
179
180         if (!tunnel)
181                 goto out;
182
183         if (tunnel->fd >= 0) {
184                 /* Socket is owned by userspace, who might be in the process
185                  * of closing it.  Look the socket up using the fd to ensure
186                  * consistency.
187                  */
188                 sock = sockfd_lookup(tunnel->fd, &err);
189                 if (sock)
190                         sk = sock->sk;
191         } else {
192                 /* Socket is owned by kernelspace */
193                 sk = tunnel->sock;
194                 sock_hold(sk);
195         }
196
197 out:
198         return sk;
199 }
200 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
201
202 /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
203 void l2tp_tunnel_sock_put(struct sock *sk)
204 {
205         struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
206         if (tunnel) {
207                 if (tunnel->fd >= 0) {
208                         /* Socket is owned by userspace */
209                         sockfd_put(sk->sk_socket);
210                 }
211                 sock_put(sk);
212         }
213         sock_put(sk);
214 }
215 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
216
217 /* Lookup a session by id in the global session list
218  */
219 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
220 {
221         struct l2tp_net *pn = l2tp_pernet(net);
222         struct hlist_head *session_list =
223                 l2tp_session_id_hash_2(pn, session_id);
224         struct l2tp_session *session;
225
226         rcu_read_lock_bh();
227         hlist_for_each_entry_rcu(session, session_list, global_hlist) {
228                 if (session->session_id == session_id) {
229                         rcu_read_unlock_bh();
230                         return session;
231                 }
232         }
233         rcu_read_unlock_bh();
234
235         return NULL;
236 }
237
238 /* Session hash list.
239  * The session_id SHOULD be random according to RFC2661, but several
240  * L2TP implementations (Cisco and Microsoft) use incrementing
241  * session_ids.  So we do a real hash on the session_id, rather than a
242  * simple bitmask.
243  */
244 static inline struct hlist_head *
245 l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
246 {
247         return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
248 }
249
250 /* Lookup a session by id
251  */
252 struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
253 {
254         struct hlist_head *session_list;
255         struct l2tp_session *session;
256
257         /* In L2TPv3, session_ids are unique over all tunnels and we
258          * sometimes need to look them up before we know the
259          * tunnel.
260          */
261         if (tunnel == NULL)
262                 return l2tp_session_find_2(net, session_id);
263
264         session_list = l2tp_session_id_hash(tunnel, session_id);
265         read_lock_bh(&tunnel->hlist_lock);
266         hlist_for_each_entry(session, session_list, hlist) {
267                 if (session->session_id == session_id) {
268                         read_unlock_bh(&tunnel->hlist_lock);
269                         return session;
270                 }
271         }
272         read_unlock_bh(&tunnel->hlist_lock);
273
274         return NULL;
275 }
276 EXPORT_SYMBOL_GPL(l2tp_session_find);
277
278 struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
279 {
280         int hash;
281         struct l2tp_session *session;
282         int count = 0;
283
284         read_lock_bh(&tunnel->hlist_lock);
285         for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
286                 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
287                         if (++count > nth) {
288                                 read_unlock_bh(&tunnel->hlist_lock);
289                                 return session;
290                         }
291                 }
292         }
293
294         read_unlock_bh(&tunnel->hlist_lock);
295
296         return NULL;
297 }
298 EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
299
300 /* Lookup a session by interface name.
301  * This is very inefficient but is only used by management interfaces.
302  */
303 struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
304 {
305         struct l2tp_net *pn = l2tp_pernet(net);
306         int hash;
307         struct l2tp_session *session;
308
309         rcu_read_lock_bh();
310         for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
311                 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
312                         if (!strcmp(session->ifname, ifname)) {
313                                 rcu_read_unlock_bh();
314                                 return session;
315                         }
316                 }
317         }
318
319         rcu_read_unlock_bh();
320
321         return NULL;
322 }
323 EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
324
325 /* Lookup a tunnel by id
326  */
327 struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
328 {
329         struct l2tp_tunnel *tunnel;
330         struct l2tp_net *pn = l2tp_pernet(net);
331
332         rcu_read_lock_bh();
333         list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
334                 if (tunnel->tunnel_id == tunnel_id) {
335                         rcu_read_unlock_bh();
336                         return tunnel;
337                 }
338         }
339         rcu_read_unlock_bh();
340
341         return NULL;
342 }
343 EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
344
345 struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
346 {
347         struct l2tp_net *pn = l2tp_pernet(net);
348         struct l2tp_tunnel *tunnel;
349         int count = 0;
350
351         rcu_read_lock_bh();
352         list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
353                 if (++count > nth) {
354                         rcu_read_unlock_bh();
355                         return tunnel;
356                 }
357         }
358
359         rcu_read_unlock_bh();
360
361         return NULL;
362 }
363 EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
364
365 /*****************************************************************************
366  * Receive data handling
367  *****************************************************************************/
368
369 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
370  * number.
371  */
372 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
373 {
374         struct sk_buff *skbp;
375         struct sk_buff *tmp;
376         u32 ns = L2TP_SKB_CB(skb)->ns;
377         struct l2tp_stats *sstats;
378
379         spin_lock_bh(&session->reorder_q.lock);
380         sstats = &session->stats;
381         skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
382                 if (L2TP_SKB_CB(skbp)->ns > ns) {
383                         __skb_queue_before(&session->reorder_q, skbp, skb);
384                         l2tp_dbg(session, L2TP_MSG_SEQ,
385                                  "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
386                                  session->name, ns, L2TP_SKB_CB(skbp)->ns,
387                                  skb_queue_len(&session->reorder_q));
388                         u64_stats_update_begin(&sstats->syncp);
389                         sstats->rx_oos_packets++;
390                         u64_stats_update_end(&sstats->syncp);
391                         goto out;
392                 }
393         }
394
395         __skb_queue_tail(&session->reorder_q, skb);
396
397 out:
398         spin_unlock_bh(&session->reorder_q.lock);
399 }
400
401 /* Dequeue a single skb.
402  */
403 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
404 {
405         struct l2tp_tunnel *tunnel = session->tunnel;
406         int length = L2TP_SKB_CB(skb)->length;
407         struct l2tp_stats *tstats, *sstats;
408
409         /* We're about to requeue the skb, so return resources
410          * to its current owner (a socket receive buffer).
411          */
412         skb_orphan(skb);
413
414         tstats = &tunnel->stats;
415         u64_stats_update_begin(&tstats->syncp);
416         sstats = &session->stats;
417         u64_stats_update_begin(&sstats->syncp);
418         tstats->rx_packets++;
419         tstats->rx_bytes += length;
420         sstats->rx_packets++;
421         sstats->rx_bytes += length;
422         u64_stats_update_end(&tstats->syncp);
423         u64_stats_update_end(&sstats->syncp);
424
425         if (L2TP_SKB_CB(skb)->has_seq) {
426                 /* Bump our Nr */
427                 session->nr++;
428                 if (tunnel->version == L2TP_HDR_VER_2)
429                         session->nr &= 0xffff;
430                 else
431                         session->nr &= 0xffffff;
432
433                 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
434                          session->name, session->nr);
435         }
436
437         /* call private receive handler */
438         if (session->recv_skb != NULL)
439                 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
440         else
441                 kfree_skb(skb);
442
443         if (session->deref)
444                 (*session->deref)(session);
445 }
446
447 /* Dequeue skbs from the session's reorder_q, subject to packet order.
448  * Skbs that have been in the queue for too long are simply discarded.
449  */
450 static void l2tp_recv_dequeue(struct l2tp_session *session)
451 {
452         struct sk_buff *skb;
453         struct sk_buff *tmp;
454         struct l2tp_stats *sstats;
455
456         /* If the pkt at the head of the queue has the nr that we
457          * expect to send up next, dequeue it and any other
458          * in-sequence packets behind it.
459          */
460 start:
461         spin_lock_bh(&session->reorder_q.lock);
462         sstats = &session->stats;
463         skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
464                 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
465                         u64_stats_update_begin(&sstats->syncp);
466                         sstats->rx_seq_discards++;
467                         sstats->rx_errors++;
468                         u64_stats_update_end(&sstats->syncp);
469                         l2tp_dbg(session, L2TP_MSG_SEQ,
470                                  "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
471                                  session->name, L2TP_SKB_CB(skb)->ns,
472                                  L2TP_SKB_CB(skb)->length, session->nr,
473                                  skb_queue_len(&session->reorder_q));
474                         session->reorder_skip = 1;
475                         __skb_unlink(skb, &session->reorder_q);
476                         kfree_skb(skb);
477                         if (session->deref)
478                                 (*session->deref)(session);
479                         continue;
480                 }
481
482                 if (L2TP_SKB_CB(skb)->has_seq) {
483                         if (session->reorder_skip) {
484                                 l2tp_dbg(session, L2TP_MSG_SEQ,
485                                          "%s: advancing nr to next pkt: %u -> %u",
486                                          session->name, session->nr,
487                                          L2TP_SKB_CB(skb)->ns);
488                                 session->reorder_skip = 0;
489                                 session->nr = L2TP_SKB_CB(skb)->ns;
490                         }
491                         if (L2TP_SKB_CB(skb)->ns != session->nr) {
492                                 l2tp_dbg(session, L2TP_MSG_SEQ,
493                                          "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
494                                          session->name, L2TP_SKB_CB(skb)->ns,
495                                          L2TP_SKB_CB(skb)->length, session->nr,
496                                          skb_queue_len(&session->reorder_q));
497                                 goto out;
498                         }
499                 }
500                 __skb_unlink(skb, &session->reorder_q);
501
502                 /* Process the skb. We release the queue lock while we
503                  * do so to let other contexts process the queue.
504                  */
505                 spin_unlock_bh(&session->reorder_q.lock);
506                 l2tp_recv_dequeue_skb(session, skb);
507                 goto start;
508         }
509
510 out:
511         spin_unlock_bh(&session->reorder_q.lock);
512 }
513
514 static inline int l2tp_verify_udp_checksum(struct sock *sk,
515                                            struct sk_buff *skb)
516 {
517         struct udphdr *uh = udp_hdr(skb);
518         u16 ulen = ntohs(uh->len);
519         __wsum psum;
520
521         if (sk->sk_no_check || skb_csum_unnecessary(skb))
522                 return 0;
523
524 #if IS_ENABLED(CONFIG_IPV6)
525         if (sk->sk_family == PF_INET6) {
526                 if (!uh->check) {
527                         LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
528                         return 1;
529                 }
530                 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
531                     !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
532                                      &ipv6_hdr(skb)->daddr, ulen,
533                                      IPPROTO_UDP, skb->csum)) {
534                         skb->ip_summed = CHECKSUM_UNNECESSARY;
535                         return 0;
536                 }
537                 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
538                                                          &ipv6_hdr(skb)->daddr,
539                                                          skb->len, IPPROTO_UDP,
540                                                          0));
541         } else
542 #endif
543         {
544                 struct inet_sock *inet;
545                 if (!uh->check)
546                         return 0;
547                 inet = inet_sk(sk);
548                 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
549                                           ulen, IPPROTO_UDP, 0);
550
551                 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
552                     !csum_fold(csum_add(psum, skb->csum)))
553                         return 0;
554                 skb->csum = psum;
555         }
556
557         return __skb_checksum_complete(skb);
558 }
559
560 /* Do receive processing of L2TP data frames. We handle both L2TPv2
561  * and L2TPv3 data frames here.
562  *
563  * L2TPv2 Data Message Header
564  *
565  *  0                   1                   2                   3
566  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
567  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
568  * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
569  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
570  * |           Tunnel ID           |           Session ID          |
571  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
572  * |             Ns (opt)          |             Nr (opt)          |
573  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
574  * |      Offset Size (opt)        |    Offset pad... (opt)
575  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
576  *
577  * Data frames are marked by T=0. All other fields are the same as
578  * those in L2TP control frames.
579  *
580  * L2TPv3 Data Message Header
581  *
582  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
583  * |                      L2TP Session Header                      |
584  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
585  * |                      L2-Specific Sublayer                     |
586  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
587  * |                        Tunnel Payload                      ...
588  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
589  *
590  * L2TPv3 Session Header Over IP
591  *
592  *  0                   1                   2                   3
593  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
594  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
595  * |                           Session ID                          |
596  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
597  * |               Cookie (optional, maximum 64 bits)...
598  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
599  *                                                                 |
600  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
601  *
602  * L2TPv3 L2-Specific Sublayer Format
603  *
604  *  0                   1                   2                   3
605  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
606  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
607  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
608  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
609  *
610  * Cookie value, sublayer format and offset (pad) are negotiated with
611  * the peer when the session is set up. Unlike L2TPv2, we do not need
612  * to parse the packet header to determine if optional fields are
613  * present.
614  *
615  * Caller must already have parsed the frame and determined that it is
616  * a data (not control) frame before coming here. Fields up to the
617  * session-id have already been parsed and ptr points to the data
618  * after the session-id.
619  */
620 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
621                       unsigned char *ptr, unsigned char *optr, u16 hdrflags,
622                       int length, int (*payload_hook)(struct sk_buff *skb))
623 {
624         struct l2tp_tunnel *tunnel = session->tunnel;
625         int offset;
626         u32 ns, nr;
627         struct l2tp_stats *sstats = &session->stats;
628
629         /* The ref count is increased since we now hold a pointer to
630          * the session. Take care to decrement the refcnt when exiting
631          * this function from now on...
632          */
633         l2tp_session_inc_refcount(session);
634         if (session->ref)
635                 (*session->ref)(session);
636
637         /* Parse and check optional cookie */
638         if (session->peer_cookie_len > 0) {
639                 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
640                         l2tp_info(tunnel, L2TP_MSG_DATA,
641                                   "%s: cookie mismatch (%u/%u). Discarding.\n",
642                                   tunnel->name, tunnel->tunnel_id,
643                                   session->session_id);
644                         u64_stats_update_begin(&sstats->syncp);
645                         sstats->rx_cookie_discards++;
646                         u64_stats_update_end(&sstats->syncp);
647                         goto discard;
648                 }
649                 ptr += session->peer_cookie_len;
650         }
651
652         /* Handle the optional sequence numbers. Sequence numbers are
653          * in different places for L2TPv2 and L2TPv3.
654          *
655          * If we are the LAC, enable/disable sequence numbers under
656          * the control of the LNS.  If no sequence numbers present but
657          * we were expecting them, discard frame.
658          */
659         ns = nr = 0;
660         L2TP_SKB_CB(skb)->has_seq = 0;
661         if (tunnel->version == L2TP_HDR_VER_2) {
662                 if (hdrflags & L2TP_HDRFLAG_S) {
663                         ns = ntohs(*(__be16 *) ptr);
664                         ptr += 2;
665                         nr = ntohs(*(__be16 *) ptr);
666                         ptr += 2;
667
668                         /* Store L2TP info in the skb */
669                         L2TP_SKB_CB(skb)->ns = ns;
670                         L2TP_SKB_CB(skb)->has_seq = 1;
671
672                         l2tp_dbg(session, L2TP_MSG_SEQ,
673                                  "%s: recv data ns=%u, nr=%u, session nr=%u\n",
674                                  session->name, ns, nr, session->nr);
675                 }
676         } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
677                 u32 l2h = ntohl(*(__be32 *) ptr);
678
679                 if (l2h & 0x40000000) {
680                         ns = l2h & 0x00ffffff;
681
682                         /* Store L2TP info in the skb */
683                         L2TP_SKB_CB(skb)->ns = ns;
684                         L2TP_SKB_CB(skb)->has_seq = 1;
685
686                         l2tp_dbg(session, L2TP_MSG_SEQ,
687                                  "%s: recv data ns=%u, session nr=%u\n",
688                                  session->name, ns, session->nr);
689                 }
690         }
691
692         /* Advance past L2-specific header, if present */
693         ptr += session->l2specific_len;
694
695         if (L2TP_SKB_CB(skb)->has_seq) {
696                 /* Received a packet with sequence numbers. If we're the LNS,
697                  * check if we sre sending sequence numbers and if not,
698                  * configure it so.
699                  */
700                 if ((!session->lns_mode) && (!session->send_seq)) {
701                         l2tp_info(session, L2TP_MSG_SEQ,
702                                   "%s: requested to enable seq numbers by LNS\n",
703                                   session->name);
704                         session->send_seq = -1;
705                         l2tp_session_set_header_len(session, tunnel->version);
706                 }
707         } else {
708                 /* No sequence numbers.
709                  * If user has configured mandatory sequence numbers, discard.
710                  */
711                 if (session->recv_seq) {
712                         l2tp_warn(session, L2TP_MSG_SEQ,
713                                   "%s: recv data has no seq numbers when required. Discarding.\n",
714                                   session->name);
715                         u64_stats_update_begin(&sstats->syncp);
716                         sstats->rx_seq_discards++;
717                         u64_stats_update_end(&sstats->syncp);
718                         goto discard;
719                 }
720
721                 /* If we're the LAC and we're sending sequence numbers, the
722                  * LNS has requested that we no longer send sequence numbers.
723                  * If we're the LNS and we're sending sequence numbers, the
724                  * LAC is broken. Discard the frame.
725                  */
726                 if ((!session->lns_mode) && (session->send_seq)) {
727                         l2tp_info(session, L2TP_MSG_SEQ,
728                                   "%s: requested to disable seq numbers by LNS\n",
729                                   session->name);
730                         session->send_seq = 0;
731                         l2tp_session_set_header_len(session, tunnel->version);
732                 } else if (session->send_seq) {
733                         l2tp_warn(session, L2TP_MSG_SEQ,
734                                   "%s: recv data has no seq numbers when required. Discarding.\n",
735                                   session->name);
736                         u64_stats_update_begin(&sstats->syncp);
737                         sstats->rx_seq_discards++;
738                         u64_stats_update_end(&sstats->syncp);
739                         goto discard;
740                 }
741         }
742
743         /* Session data offset is handled differently for L2TPv2 and
744          * L2TPv3. For L2TPv2, there is an optional 16-bit value in
745          * the header. For L2TPv3, the offset is negotiated using AVPs
746          * in the session setup control protocol.
747          */
748         if (tunnel->version == L2TP_HDR_VER_2) {
749                 /* If offset bit set, skip it. */
750                 if (hdrflags & L2TP_HDRFLAG_O) {
751                         offset = ntohs(*(__be16 *)ptr);
752                         ptr += 2 + offset;
753                 }
754         } else
755                 ptr += session->offset;
756
757         offset = ptr - optr;
758         if (!pskb_may_pull(skb, offset))
759                 goto discard;
760
761         __skb_pull(skb, offset);
762
763         /* If caller wants to process the payload before we queue the
764          * packet, do so now.
765          */
766         if (payload_hook)
767                 if ((*payload_hook)(skb))
768                         goto discard;
769
770         /* Prepare skb for adding to the session's reorder_q.  Hold
771          * packets for max reorder_timeout or 1 second if not
772          * reordering.
773          */
774         L2TP_SKB_CB(skb)->length = length;
775         L2TP_SKB_CB(skb)->expires = jiffies +
776                 (session->reorder_timeout ? session->reorder_timeout : HZ);
777
778         /* Add packet to the session's receive queue. Reordering is done here, if
779          * enabled. Saved L2TP protocol info is stored in skb->sb[].
780          */
781         if (L2TP_SKB_CB(skb)->has_seq) {
782                 if (session->reorder_timeout != 0) {
783                         /* Packet reordering enabled. Add skb to session's
784                          * reorder queue, in order of ns.
785                          */
786                         l2tp_recv_queue_skb(session, skb);
787                 } else {
788                         /* Packet reordering disabled. Discard out-of-sequence
789                          * packets
790                          */
791                         if (L2TP_SKB_CB(skb)->ns != session->nr) {
792                                 u64_stats_update_begin(&sstats->syncp);
793                                 sstats->rx_seq_discards++;
794                                 u64_stats_update_end(&sstats->syncp);
795                                 l2tp_dbg(session, L2TP_MSG_SEQ,
796                                          "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
797                                          session->name, L2TP_SKB_CB(skb)->ns,
798                                          L2TP_SKB_CB(skb)->length, session->nr,
799                                          skb_queue_len(&session->reorder_q));
800                                 goto discard;
801                         }
802                         skb_queue_tail(&session->reorder_q, skb);
803                 }
804         } else {
805                 /* No sequence numbers. Add the skb to the tail of the
806                  * reorder queue. This ensures that it will be
807                  * delivered after all previous sequenced skbs.
808                  */
809                 skb_queue_tail(&session->reorder_q, skb);
810         }
811
812         /* Try to dequeue as many skbs from reorder_q as we can. */
813         l2tp_recv_dequeue(session);
814
815         l2tp_session_dec_refcount(session);
816
817         return;
818
819 discard:
820         u64_stats_update_begin(&sstats->syncp);
821         sstats->rx_errors++;
822         u64_stats_update_end(&sstats->syncp);
823         kfree_skb(skb);
824
825         if (session->deref)
826                 (*session->deref)(session);
827
828         l2tp_session_dec_refcount(session);
829 }
830 EXPORT_SYMBOL(l2tp_recv_common);
831
832 /* Drop skbs from the session's reorder_q
833  */
834 int l2tp_session_queue_purge(struct l2tp_session *session)
835 {
836         struct sk_buff *skb = NULL;
837         BUG_ON(!session);
838         BUG_ON(session->magic != L2TP_SESSION_MAGIC);
839         while ((skb = skb_dequeue(&session->reorder_q))) {
840                 atomic_long_inc(&session->stats.rx_errors);
841                 kfree_skb(skb);
842                 if (session->deref)
843                         (*session->deref)(session);
844         }
845         return 0;
846 }
847 EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
848
849 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
850  * here. The skb is not on a list when we get here.
851  * Returns 0 if the packet was a data packet and was successfully passed on.
852  * Returns 1 if the packet was not a good data packet and could not be
853  * forwarded.  All such packets are passed up to userspace to deal with.
854  */
855 static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
856                               int (*payload_hook)(struct sk_buff *skb))
857 {
858         struct l2tp_session *session = NULL;
859         unsigned char *ptr, *optr;
860         u16 hdrflags;
861         u32 tunnel_id, session_id;
862         u16 version;
863         int length;
864         struct l2tp_stats *tstats;
865
866         if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
867                 goto discard_bad_csum;
868
869         /* UDP always verifies the packet length. */
870         __skb_pull(skb, sizeof(struct udphdr));
871
872         /* Short packet? */
873         if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
874                 l2tp_info(tunnel, L2TP_MSG_DATA,
875                           "%s: recv short packet (len=%d)\n",
876                           tunnel->name, skb->len);
877                 goto error;
878         }
879
880         /* Trace packet contents, if enabled */
881         if (tunnel->debug & L2TP_MSG_DATA) {
882                 length = min(32u, skb->len);
883                 if (!pskb_may_pull(skb, length))
884                         goto error;
885
886                 pr_debug("%s: recv\n", tunnel->name);
887                 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
888         }
889
890         /* Point to L2TP header */
891         optr = ptr = skb->data;
892
893         /* Get L2TP header flags */
894         hdrflags = ntohs(*(__be16 *) ptr);
895
896         /* Check protocol version */
897         version = hdrflags & L2TP_HDR_VER_MASK;
898         if (version != tunnel->version) {
899                 l2tp_info(tunnel, L2TP_MSG_DATA,
900                           "%s: recv protocol version mismatch: got %d expected %d\n",
901                           tunnel->name, version, tunnel->version);
902                 goto error;
903         }
904
905         /* Get length of L2TP packet */
906         length = skb->len;
907
908         /* If type is control packet, it is handled by userspace. */
909         if (hdrflags & L2TP_HDRFLAG_T) {
910                 l2tp_dbg(tunnel, L2TP_MSG_DATA,
911                          "%s: recv control packet, len=%d\n",
912                          tunnel->name, length);
913                 goto error;
914         }
915
916         /* Skip flags */
917         ptr += 2;
918
919         if (tunnel->version == L2TP_HDR_VER_2) {
920                 /* If length is present, skip it */
921                 if (hdrflags & L2TP_HDRFLAG_L)
922                         ptr += 2;
923
924                 /* Extract tunnel and session ID */
925                 tunnel_id = ntohs(*(__be16 *) ptr);
926                 ptr += 2;
927                 session_id = ntohs(*(__be16 *) ptr);
928                 ptr += 2;
929         } else {
930                 ptr += 2;       /* skip reserved bits */
931                 tunnel_id = tunnel->tunnel_id;
932                 session_id = ntohl(*(__be32 *) ptr);
933                 ptr += 4;
934         }
935
936         /* Find the session context */
937         session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
938         if (!session || !session->recv_skb) {
939                 /* Not found? Pass to userspace to deal with */
940                 l2tp_info(tunnel, L2TP_MSG_DATA,
941                           "%s: no session found (%u/%u). Passing up.\n",
942                           tunnel->name, tunnel_id, session_id);
943                 goto error;
944         }
945
946         l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
947
948         return 0;
949
950 discard_bad_csum:
951         LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
952         UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
953         tstats = &tunnel->stats;
954         u64_stats_update_begin(&tstats->syncp);
955         tstats->rx_errors++;
956         u64_stats_update_end(&tstats->syncp);
957         kfree_skb(skb);
958
959         return 0;
960
961 error:
962         /* Put UDP header back */
963         __skb_push(skb, sizeof(struct udphdr));
964
965         return 1;
966 }
967
968 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
969  * Return codes:
970  * 0 : success.
971  * <0: error
972  * >0: skb should be passed up to userspace as UDP.
973  */
974 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
975 {
976         struct l2tp_tunnel *tunnel;
977
978         tunnel = l2tp_sock_to_tunnel(sk);
979         if (tunnel == NULL)
980                 goto pass_up;
981
982         l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
983                  tunnel->name, skb->len);
984
985         if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
986                 goto pass_up_put;
987
988         sock_put(sk);
989         return 0;
990
991 pass_up_put:
992         sock_put(sk);
993 pass_up:
994         return 1;
995 }
996 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
997
998 /************************************************************************
999  * Transmit handling
1000  ***********************************************************************/
1001
1002 /* Build an L2TP header for the session into the buffer provided.
1003  */
1004 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1005 {
1006         struct l2tp_tunnel *tunnel = session->tunnel;
1007         __be16 *bufp = buf;
1008         __be16 *optr = buf;
1009         u16 flags = L2TP_HDR_VER_2;
1010         u32 tunnel_id = tunnel->peer_tunnel_id;
1011         u32 session_id = session->peer_session_id;
1012
1013         if (session->send_seq)
1014                 flags |= L2TP_HDRFLAG_S;
1015
1016         /* Setup L2TP header. */
1017         *bufp++ = htons(flags);
1018         *bufp++ = htons(tunnel_id);
1019         *bufp++ = htons(session_id);
1020         if (session->send_seq) {
1021                 *bufp++ = htons(session->ns);
1022                 *bufp++ = 0;
1023                 session->ns++;
1024                 session->ns &= 0xffff;
1025                 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
1026                          session->name, session->ns);
1027         }
1028
1029         return bufp - optr;
1030 }
1031
1032 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1033 {
1034         struct l2tp_tunnel *tunnel = session->tunnel;
1035         char *bufp = buf;
1036         char *optr = bufp;
1037
1038         /* Setup L2TP header. The header differs slightly for UDP and
1039          * IP encapsulations. For UDP, there is 4 bytes of flags.
1040          */
1041         if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1042                 u16 flags = L2TP_HDR_VER_3;
1043                 *((__be16 *) bufp) = htons(flags);
1044                 bufp += 2;
1045                 *((__be16 *) bufp) = 0;
1046                 bufp += 2;
1047         }
1048
1049         *((__be32 *) bufp) = htonl(session->peer_session_id);
1050         bufp += 4;
1051         if (session->cookie_len) {
1052                 memcpy(bufp, &session->cookie[0], session->cookie_len);
1053                 bufp += session->cookie_len;
1054         }
1055         if (session->l2specific_len) {
1056                 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1057                         u32 l2h = 0;
1058                         if (session->send_seq) {
1059                                 l2h = 0x40000000 | session->ns;
1060                                 session->ns++;
1061                                 session->ns &= 0xffffff;
1062                                 l2tp_dbg(session, L2TP_MSG_SEQ,
1063                                          "%s: updated ns to %u\n",
1064                                          session->name, session->ns);
1065                         }
1066
1067                         *((__be32 *) bufp) = htonl(l2h);
1068                 }
1069                 bufp += session->l2specific_len;
1070         }
1071         if (session->offset)
1072                 bufp += session->offset;
1073
1074         return bufp - optr;
1075 }
1076
1077 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1078                           struct flowi *fl, size_t data_len)
1079 {
1080         struct l2tp_tunnel *tunnel = session->tunnel;
1081         unsigned int len = skb->len;
1082         int error;
1083         struct l2tp_stats *tstats, *sstats;
1084
1085         /* Debug */
1086         if (session->send_seq)
1087                 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n",
1088                          session->name, data_len, session->ns - 1);
1089         else
1090                 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n",
1091                          session->name, data_len);
1092
1093         if (session->debug & L2TP_MSG_DATA) {
1094                 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1095                 unsigned char *datap = skb->data + uhlen;
1096
1097                 pr_debug("%s: xmit\n", session->name);
1098                 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
1099                                      datap, min_t(size_t, 32, len - uhlen));
1100         }
1101
1102         /* Queue the packet to IP for output */
1103         skb->local_df = 1;
1104 #if IS_ENABLED(CONFIG_IPV6)
1105         if (skb->sk->sk_family == PF_INET6)
1106                 error = inet6_csk_xmit(skb, NULL);
1107         else
1108 #endif
1109                 error = ip_queue_xmit(skb, fl);
1110
1111         /* Update stats */
1112         tstats = &tunnel->stats;
1113         u64_stats_update_begin(&tstats->syncp);
1114         sstats = &session->stats;
1115         u64_stats_update_begin(&sstats->syncp);
1116         if (error >= 0) {
1117                 tstats->tx_packets++;
1118                 tstats->tx_bytes += len;
1119                 sstats->tx_packets++;
1120                 sstats->tx_bytes += len;
1121         } else {
1122                 tstats->tx_errors++;
1123                 sstats->tx_errors++;
1124         }
1125         u64_stats_update_end(&tstats->syncp);
1126         u64_stats_update_end(&sstats->syncp);
1127
1128         return 0;
1129 }
1130
1131 /* Automatically called when the skb is freed.
1132  */
1133 static void l2tp_sock_wfree(struct sk_buff *skb)
1134 {
1135         sock_put(skb->sk);
1136 }
1137
1138 /* For data skbs that we transmit, we associate with the tunnel socket
1139  * but don't do accounting.
1140  */
1141 static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1142 {
1143         sock_hold(sk);
1144         skb->sk = sk;
1145         skb->destructor = l2tp_sock_wfree;
1146 }
1147
1148 #if IS_ENABLED(CONFIG_IPV6)
1149 static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
1150                                 int udp_len)
1151 {
1152         struct ipv6_pinfo *np = inet6_sk(sk);
1153         struct udphdr *uh = udp_hdr(skb);
1154
1155         if (!skb_dst(skb) || !skb_dst(skb)->dev ||
1156             !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
1157                 __wsum csum = skb_checksum(skb, 0, udp_len, 0);
1158                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159                 uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
1160                                             IPPROTO_UDP, csum);
1161                 if (uh->check == 0)
1162                         uh->check = CSUM_MANGLED_0;
1163         } else {
1164                 skb->ip_summed = CHECKSUM_PARTIAL;
1165                 skb->csum_start = skb_transport_header(skb) - skb->head;
1166                 skb->csum_offset = offsetof(struct udphdr, check);
1167                 uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
1168                                              udp_len, IPPROTO_UDP, 0);
1169         }
1170 }
1171 #endif
1172
1173 /* If caller requires the skb to have a ppp header, the header must be
1174  * inserted in the skb data before calling this function.
1175  */
1176 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1177 {
1178         int data_len = skb->len;
1179         struct l2tp_tunnel *tunnel = session->tunnel;
1180         struct sock *sk = tunnel->sock;
1181         struct flowi *fl;
1182         struct udphdr *uh;
1183         struct inet_sock *inet;
1184         __wsum csum;
1185         int headroom;
1186         int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1187         int udp_len;
1188         int ret = NET_XMIT_SUCCESS;
1189
1190         /* Check that there's enough headroom in the skb to insert IP,
1191          * UDP and L2TP headers. If not enough, expand it to
1192          * make room. Adjust truesize.
1193          */
1194         headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1195                 uhlen + hdr_len;
1196         if (skb_cow_head(skb, headroom)) {
1197                 kfree_skb(skb);
1198                 return NET_XMIT_DROP;
1199         }
1200
1201         skb_orphan(skb);
1202         /* Setup L2TP header */
1203         session->build_header(session, __skb_push(skb, hdr_len));
1204
1205         /* Reset skb netfilter state */
1206         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1207         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1208                               IPSKB_REROUTED);
1209         nf_reset(skb);
1210
1211         bh_lock_sock(sk);
1212         if (sock_owned_by_user(sk)) {
1213                 kfree_skb(skb);
1214                 ret = NET_XMIT_DROP;
1215                 goto out_unlock;
1216         }
1217
1218         /* Get routing info from the tunnel socket */
1219         skb_dst_drop(skb);
1220         skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
1221
1222         inet = inet_sk(sk);
1223         fl = &inet->cork.fl;
1224         switch (tunnel->encap) {
1225         case L2TP_ENCAPTYPE_UDP:
1226                 /* Setup UDP header */
1227                 __skb_push(skb, sizeof(*uh));
1228                 skb_reset_transport_header(skb);
1229                 uh = udp_hdr(skb);
1230                 uh->source = inet->inet_sport;
1231                 uh->dest = inet->inet_dport;
1232                 udp_len = uhlen + hdr_len + data_len;
1233                 uh->len = htons(udp_len);
1234                 uh->check = 0;
1235
1236                 /* Calculate UDP checksum if configured to do so */
1237 #if IS_ENABLED(CONFIG_IPV6)
1238                 if (sk->sk_family == PF_INET6)
1239                         l2tp_xmit_ipv6_csum(sk, skb, udp_len);
1240                 else
1241 #endif
1242                 if (sk->sk_no_check == UDP_CSUM_NOXMIT)
1243                         skb->ip_summed = CHECKSUM_NONE;
1244                 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1245                          (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1246                         skb->ip_summed = CHECKSUM_COMPLETE;
1247                         csum = skb_checksum(skb, 0, udp_len, 0);
1248                         uh->check = csum_tcpudp_magic(inet->inet_saddr,
1249                                                       inet->inet_daddr,
1250                                                       udp_len, IPPROTO_UDP, csum);
1251                         if (uh->check == 0)
1252                                 uh->check = CSUM_MANGLED_0;
1253                 } else {
1254                         skb->ip_summed = CHECKSUM_PARTIAL;
1255                         skb->csum_start = skb_transport_header(skb) - skb->head;
1256                         skb->csum_offset = offsetof(struct udphdr, check);
1257                         uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1258                                                        inet->inet_daddr,
1259                                                        udp_len, IPPROTO_UDP, 0);
1260                 }
1261                 break;
1262
1263         case L2TP_ENCAPTYPE_IP:
1264                 break;
1265         }
1266
1267         l2tp_skb_set_owner_w(skb, sk);
1268
1269         l2tp_xmit_core(session, skb, fl, data_len);
1270 out_unlock:
1271         bh_unlock_sock(sk);
1272
1273         return ret;
1274 }
1275 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1276
1277 /*****************************************************************************
1278  * Tinnel and session create/destroy.
1279  *****************************************************************************/
1280
1281 /* Tunnel socket destruct hook.
1282  * The tunnel context is deleted only when all session sockets have been
1283  * closed.
1284  */
1285 static void l2tp_tunnel_destruct(struct sock *sk)
1286 {
1287         struct l2tp_tunnel *tunnel;
1288         struct l2tp_net *pn;
1289
1290         tunnel = sk->sk_user_data;
1291         if (tunnel == NULL)
1292                 goto end;
1293
1294         l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1295
1296
1297         /* Disable udp encapsulation */
1298         switch (tunnel->encap) {
1299         case L2TP_ENCAPTYPE_UDP:
1300                 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1301                 (udp_sk(sk))->encap_type = 0;
1302                 (udp_sk(sk))->encap_rcv = NULL;
1303                 (udp_sk(sk))->encap_destroy = NULL;
1304                 break;
1305         case L2TP_ENCAPTYPE_IP:
1306                 break;
1307         }
1308
1309         /* Remove hooks into tunnel socket */
1310         sk->sk_destruct = tunnel->old_sk_destruct;
1311         sk->sk_user_data = NULL;
1312         tunnel->sock = NULL;
1313
1314         /* Remove the tunnel struct from the tunnel list */
1315         pn = l2tp_pernet(tunnel->l2tp_net);
1316         spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1317         list_del_rcu(&tunnel->list);
1318         spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1319         atomic_dec(&l2tp_tunnel_count);
1320
1321         l2tp_tunnel_closeall(tunnel);
1322         l2tp_tunnel_dec_refcount(tunnel);
1323
1324         /* Call the original destructor */
1325         if (sk->sk_destruct)
1326                 (*sk->sk_destruct)(sk);
1327 end:
1328         return;
1329 }
1330
1331 /* When the tunnel is closed, all the attached sessions need to go too.
1332  */
1333 void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1334 {
1335         int hash;
1336         struct hlist_node *walk;
1337         struct hlist_node *tmp;
1338         struct l2tp_session *session;
1339
1340         BUG_ON(tunnel == NULL);
1341
1342         l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1343                   tunnel->name);
1344
1345         write_lock_bh(&tunnel->hlist_lock);
1346         for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1347 again:
1348                 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1349                         session = hlist_entry(walk, struct l2tp_session, hlist);
1350
1351                         l2tp_info(session, L2TP_MSG_CONTROL,
1352                                   "%s: closing session\n", session->name);
1353
1354                         hlist_del_init(&session->hlist);
1355
1356                         /* Since we should hold the sock lock while
1357                          * doing any unbinding, we need to release the
1358                          * lock we're holding before taking that lock.
1359                          * Hold a reference to the sock so it doesn't
1360                          * disappear as we're jumping between locks.
1361                          */
1362                         if (session->ref != NULL)
1363                                 (*session->ref)(session);
1364
1365                         write_unlock_bh(&tunnel->hlist_lock);
1366
1367                         if (tunnel->version != L2TP_HDR_VER_2) {
1368                                 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1369
1370                                 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1371                                 hlist_del_init_rcu(&session->global_hlist);
1372                                 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1373                                 synchronize_rcu();
1374                         }
1375
1376                         l2tp_session_queue_purge(session);
1377
1378                         if (session->session_close != NULL)
1379                                 (*session->session_close)(session);
1380
1381                         if (session->deref != NULL)
1382                                 (*session->deref)(session);
1383
1384                         l2tp_session_dec_refcount(session);
1385
1386                         write_lock_bh(&tunnel->hlist_lock);
1387
1388                         /* Now restart from the beginning of this hash
1389                          * chain.  We always remove a session from the
1390                          * list so we are guaranteed to make forward
1391                          * progress.
1392                          */
1393                         goto again;
1394                 }
1395         }
1396         write_unlock_bh(&tunnel->hlist_lock);
1397 }
1398 EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1399
1400 /* Tunnel socket destroy hook for UDP encapsulation */
1401 static void l2tp_udp_encap_destroy(struct sock *sk)
1402 {
1403         struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
1404         if (tunnel) {
1405                 l2tp_tunnel_closeall(tunnel);
1406                 sock_put(sk);
1407         }
1408 }
1409
1410 /* Really kill the tunnel.
1411  * Come here only when all sessions have been cleared from the tunnel.
1412  */
1413 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1414 {
1415         BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1416         BUG_ON(tunnel->sock != NULL);
1417         l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1418         kfree_rcu(tunnel, rcu);
1419 }
1420
1421 /* Workqueue tunnel deletion function */
1422 static void l2tp_tunnel_del_work(struct work_struct *work)
1423 {
1424         struct l2tp_tunnel *tunnel = NULL;
1425         struct socket *sock = NULL;
1426         struct sock *sk = NULL;
1427
1428         tunnel = container_of(work, struct l2tp_tunnel, del_work);
1429         sk = l2tp_tunnel_sock_lookup(tunnel);
1430         if (!sk)
1431                 return;
1432
1433         sock = sk->sk_socket;
1434
1435         /* If the tunnel socket was created by userspace, then go through the
1436          * inet layer to shut the socket down, and let userspace close it.
1437          * Otherwise, if we created the socket directly within the kernel, use
1438          * the sk API to release it here.
1439          * In either case the tunnel resources are freed in the socket
1440          * destructor when the tunnel socket goes away.
1441          */
1442         if (tunnel->fd >= 0) {
1443                 if (sock)
1444                         inet_shutdown(sock, 2);
1445         } else {
1446                 if (sock)
1447                         kernel_sock_shutdown(sock, SHUT_RDWR);
1448                 sk_release_kernel(sk);
1449         }
1450
1451         l2tp_tunnel_sock_put(sk);
1452 }
1453
1454 /* Create a socket for the tunnel, if one isn't set up by
1455  * userspace. This is used for static tunnels where there is no
1456  * managing L2TP daemon.
1457  *
1458  * Since we don't want these sockets to keep a namespace alive by
1459  * themselves, we drop the socket's namespace refcount after creation.
1460  * These sockets are freed when the namespace exits using the pernet
1461  * exit hook.
1462  */
1463 static int l2tp_tunnel_sock_create(struct net *net,
1464                                 u32 tunnel_id,
1465                                 u32 peer_tunnel_id,
1466                                 struct l2tp_tunnel_cfg *cfg,
1467                                 struct socket **sockp)
1468 {
1469         int err = -EINVAL;
1470         struct socket *sock = NULL;
1471         struct sockaddr_in udp_addr = {0};
1472         struct sockaddr_l2tpip ip_addr = {0};
1473 #if IS_ENABLED(CONFIG_IPV6)
1474         struct sockaddr_in6 udp6_addr = {0};
1475         struct sockaddr_l2tpip6 ip6_addr = {0};
1476 #endif
1477
1478         switch (cfg->encap) {
1479         case L2TP_ENCAPTYPE_UDP:
1480 #if IS_ENABLED(CONFIG_IPV6)
1481                 if (cfg->local_ip6 && cfg->peer_ip6) {
1482                         err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
1483                         if (err < 0)
1484                                 goto out;
1485
1486                         sk_change_net(sock->sk, net);
1487
1488                         udp6_addr.sin6_family = AF_INET6;
1489                         memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
1490                                sizeof(udp6_addr.sin6_addr));
1491                         udp6_addr.sin6_port = htons(cfg->local_udp_port);
1492                         err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
1493                                           sizeof(udp6_addr));
1494                         if (err < 0)
1495                                 goto out;
1496
1497                         udp6_addr.sin6_family = AF_INET6;
1498                         memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
1499                                sizeof(udp6_addr.sin6_addr));
1500                         udp6_addr.sin6_port = htons(cfg->peer_udp_port);
1501                         err = kernel_connect(sock,
1502                                              (struct sockaddr *) &udp6_addr,
1503                                              sizeof(udp6_addr), 0);
1504                         if (err < 0)
1505                                 goto out;
1506                 } else
1507 #endif
1508                 {
1509                         err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
1510                         if (err < 0)
1511                                 goto out;
1512
1513                         sk_change_net(sock->sk, net);
1514
1515                         udp_addr.sin_family = AF_INET;
1516                         udp_addr.sin_addr = cfg->local_ip;
1517                         udp_addr.sin_port = htons(cfg->local_udp_port);
1518                         err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
1519                                           sizeof(udp_addr));
1520                         if (err < 0)
1521                                 goto out;
1522
1523                         udp_addr.sin_family = AF_INET;
1524                         udp_addr.sin_addr = cfg->peer_ip;
1525                         udp_addr.sin_port = htons(cfg->peer_udp_port);
1526                         err = kernel_connect(sock,
1527                                              (struct sockaddr *) &udp_addr,
1528                                              sizeof(udp_addr), 0);
1529                         if (err < 0)
1530                                 goto out;
1531                 }
1532
1533                 if (!cfg->use_udp_checksums)
1534                         sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
1535
1536                 break;
1537
1538         case L2TP_ENCAPTYPE_IP:
1539 #if IS_ENABLED(CONFIG_IPV6)
1540                 if (cfg->local_ip6 && cfg->peer_ip6) {
1541                         err = sock_create_kern(AF_INET6, SOCK_DGRAM,
1542                                           IPPROTO_L2TP, &sock);
1543                         if (err < 0)
1544                                 goto out;
1545
1546                         sk_change_net(sock->sk, net);
1547
1548                         ip6_addr.l2tp_family = AF_INET6;
1549                         memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1550                                sizeof(ip6_addr.l2tp_addr));
1551                         ip6_addr.l2tp_conn_id = tunnel_id;
1552                         err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1553                                           sizeof(ip6_addr));
1554                         if (err < 0)
1555                                 goto out;
1556
1557                         ip6_addr.l2tp_family = AF_INET6;
1558                         memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1559                                sizeof(ip6_addr.l2tp_addr));
1560                         ip6_addr.l2tp_conn_id = peer_tunnel_id;
1561                         err = kernel_connect(sock,
1562                                              (struct sockaddr *) &ip6_addr,
1563                                              sizeof(ip6_addr), 0);
1564                         if (err < 0)
1565                                 goto out;
1566                 } else
1567 #endif
1568                 {
1569                         err = sock_create_kern(AF_INET, SOCK_DGRAM,
1570                                           IPPROTO_L2TP, &sock);
1571                         if (err < 0)
1572                                 goto out;
1573
1574                         sk_change_net(sock->sk, net);
1575
1576                         ip_addr.l2tp_family = AF_INET;
1577                         ip_addr.l2tp_addr = cfg->local_ip;
1578                         ip_addr.l2tp_conn_id = tunnel_id;
1579                         err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1580                                           sizeof(ip_addr));
1581                         if (err < 0)
1582                                 goto out;
1583
1584                         ip_addr.l2tp_family = AF_INET;
1585                         ip_addr.l2tp_addr = cfg->peer_ip;
1586                         ip_addr.l2tp_conn_id = peer_tunnel_id;
1587                         err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1588                                              sizeof(ip_addr), 0);
1589                         if (err < 0)
1590                                 goto out;
1591                 }
1592                 break;
1593
1594         default:
1595                 goto out;
1596         }
1597
1598 out:
1599         *sockp = sock;
1600         if ((err < 0) && sock) {
1601                 kernel_sock_shutdown(sock, SHUT_RDWR);
1602                 sk_release_kernel(sock->sk);
1603                 *sockp = NULL;
1604         }
1605
1606         return err;
1607 }
1608
1609 static struct lock_class_key l2tp_socket_class;
1610
1611 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1612 {
1613         struct l2tp_tunnel *tunnel = NULL;
1614         int err;
1615         struct socket *sock = NULL;
1616         struct sock *sk = NULL;
1617         struct l2tp_net *pn;
1618         enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1619
1620         /* Get the tunnel socket from the fd, which was opened by
1621          * the userspace L2TP daemon. If not specified, create a
1622          * kernel socket.
1623          */
1624         if (fd < 0) {
1625                 err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
1626                                 cfg, &sock);
1627                 if (err < 0)
1628                         goto err;
1629         } else {
1630                 sock = sockfd_lookup(fd, &err);
1631                 if (!sock) {
1632                         pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1633                                tunnel_id, fd, err);
1634                         err = -EBADF;
1635                         goto err;
1636                 }
1637
1638                 /* Reject namespace mismatches */
1639                 if (!net_eq(sock_net(sock->sk), net)) {
1640                         pr_err("tunl %u: netns mismatch\n", tunnel_id);
1641                         err = -EINVAL;
1642                         goto err;
1643                 }
1644         }
1645
1646         sk = sock->sk;
1647
1648         if (cfg != NULL)
1649                 encap = cfg->encap;
1650
1651         /* Quick sanity checks */
1652         switch (encap) {
1653         case L2TP_ENCAPTYPE_UDP:
1654                 err = -EPROTONOSUPPORT;
1655                 if (sk->sk_protocol != IPPROTO_UDP) {
1656                         pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1657                                tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1658                         goto err;
1659                 }
1660                 break;
1661         case L2TP_ENCAPTYPE_IP:
1662                 err = -EPROTONOSUPPORT;
1663                 if (sk->sk_protocol != IPPROTO_L2TP) {
1664                         pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1665                                tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1666                         goto err;
1667                 }
1668                 break;
1669         }
1670
1671         /* Check if this socket has already been prepped */
1672         tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
1673         if (tunnel != NULL) {
1674                 /* This socket has already been prepped */
1675                 err = -EBUSY;
1676                 goto err;
1677         }
1678
1679         tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1680         if (tunnel == NULL) {
1681                 err = -ENOMEM;
1682                 goto err;
1683         }
1684
1685         tunnel->version = version;
1686         tunnel->tunnel_id = tunnel_id;
1687         tunnel->peer_tunnel_id = peer_tunnel_id;
1688         tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1689
1690         tunnel->magic = L2TP_TUNNEL_MAGIC;
1691         sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1692         rwlock_init(&tunnel->hlist_lock);
1693
1694         /* The net we belong to */
1695         tunnel->l2tp_net = net;
1696         pn = l2tp_pernet(net);
1697
1698         if (cfg != NULL)
1699                 tunnel->debug = cfg->debug;
1700
1701         /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1702         tunnel->encap = encap;
1703         if (encap == L2TP_ENCAPTYPE_UDP) {
1704                 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1705                 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1706                 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1707                 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
1708 #if IS_ENABLED(CONFIG_IPV6)
1709                 if (sk->sk_family == PF_INET6)
1710                         udpv6_encap_enable();
1711                 else
1712 #endif
1713                 udp_encap_enable();
1714         }
1715
1716         sk->sk_user_data = tunnel;
1717
1718         /* Hook on the tunnel socket destructor so that we can cleanup
1719          * if the tunnel socket goes away.
1720          */
1721         tunnel->old_sk_destruct = sk->sk_destruct;
1722         sk->sk_destruct = &l2tp_tunnel_destruct;
1723         tunnel->sock = sk;
1724         tunnel->fd = fd;
1725         lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1726
1727         sk->sk_allocation = GFP_ATOMIC;
1728
1729         /* Init delete workqueue struct */
1730         INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1731
1732         /* Add tunnel to our list */
1733         INIT_LIST_HEAD(&tunnel->list);
1734         atomic_inc(&l2tp_tunnel_count);
1735
1736         /* Bump the reference count. The tunnel context is deleted
1737          * only when this drops to zero. Must be done before list insertion
1738          */
1739         l2tp_tunnel_inc_refcount(tunnel);
1740         spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1741         list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1742         spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1743
1744         err = 0;
1745 err:
1746         if (tunnelp)
1747                 *tunnelp = tunnel;
1748
1749         /* If tunnel's socket was created by the kernel, it doesn't
1750          *  have a file.
1751          */
1752         if (sock && sock->file)
1753                 sockfd_put(sock);
1754
1755         return err;
1756 }
1757 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1758
1759 /* This function is used by the netlink TUNNEL_DELETE command.
1760  */
1761 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1762 {
1763         l2tp_tunnel_closeall(tunnel);
1764         return (false == queue_work(l2tp_wq, &tunnel->del_work));
1765 }
1766 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1767
1768 /* Really kill the session.
1769  */
1770 void l2tp_session_free(struct l2tp_session *session)
1771 {
1772         struct l2tp_tunnel *tunnel;
1773
1774         BUG_ON(atomic_read(&session->ref_count) != 0);
1775
1776         tunnel = session->tunnel;
1777         if (tunnel != NULL) {
1778                 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1779
1780                 /* Delete the session from the hash */
1781                 write_lock_bh(&tunnel->hlist_lock);
1782                 hlist_del_init(&session->hlist);
1783                 write_unlock_bh(&tunnel->hlist_lock);
1784
1785                 /* Unlink from the global hash if not L2TPv2 */
1786                 if (tunnel->version != L2TP_HDR_VER_2) {
1787                         struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1788
1789                         spin_lock_bh(&pn->l2tp_session_hlist_lock);
1790                         hlist_del_init_rcu(&session->global_hlist);
1791                         spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1792                         synchronize_rcu();
1793                 }
1794
1795                 if (session->session_id != 0)
1796                         atomic_dec(&l2tp_session_count);
1797
1798                 sock_put(tunnel->sock);
1799
1800                 /* This will delete the tunnel context if this
1801                  * is the last session on the tunnel.
1802                  */
1803                 session->tunnel = NULL;
1804                 l2tp_tunnel_dec_refcount(tunnel);
1805         }
1806
1807         kfree(session);
1808
1809         return;
1810 }
1811 EXPORT_SYMBOL_GPL(l2tp_session_free);
1812
1813 /* This function is used by the netlink SESSION_DELETE command and by
1814    pseudowire modules.
1815  */
1816 int l2tp_session_delete(struct l2tp_session *session)
1817 {
1818         l2tp_session_queue_purge(session);
1819
1820         if (session->session_close != NULL)
1821                 (*session->session_close)(session);
1822
1823         l2tp_session_dec_refcount(session);
1824
1825         return 0;
1826 }
1827 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1828
1829
1830 /* We come here whenever a session's send_seq, cookie_len or
1831  * l2specific_len parameters are set.
1832  */
1833 static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1834 {
1835         if (version == L2TP_HDR_VER_2) {
1836                 session->hdr_len = 6;
1837                 if (session->send_seq)
1838                         session->hdr_len += 4;
1839         } else {
1840                 session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1841                 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1842                         session->hdr_len += 4;
1843         }
1844
1845 }
1846
1847 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1848 {
1849         struct l2tp_session *session;
1850
1851         session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1852         if (session != NULL) {
1853                 session->magic = L2TP_SESSION_MAGIC;
1854                 session->tunnel = tunnel;
1855
1856                 session->session_id = session_id;
1857                 session->peer_session_id = peer_session_id;
1858                 session->nr = 0;
1859
1860                 sprintf(&session->name[0], "sess %u/%u",
1861                         tunnel->tunnel_id, session->session_id);
1862
1863                 skb_queue_head_init(&session->reorder_q);
1864
1865                 INIT_HLIST_NODE(&session->hlist);
1866                 INIT_HLIST_NODE(&session->global_hlist);
1867
1868                 /* Inherit debug options from tunnel */
1869                 session->debug = tunnel->debug;
1870
1871                 if (cfg) {
1872                         session->pwtype = cfg->pw_type;
1873                         session->debug = cfg->debug;
1874                         session->mtu = cfg->mtu;
1875                         session->mru = cfg->mru;
1876                         session->send_seq = cfg->send_seq;
1877                         session->recv_seq = cfg->recv_seq;
1878                         session->lns_mode = cfg->lns_mode;
1879                         session->reorder_timeout = cfg->reorder_timeout;
1880                         session->offset = cfg->offset;
1881                         session->l2specific_type = cfg->l2specific_type;
1882                         session->l2specific_len = cfg->l2specific_len;
1883                         session->cookie_len = cfg->cookie_len;
1884                         memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1885                         session->peer_cookie_len = cfg->peer_cookie_len;
1886                         memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1887                 }
1888
1889                 if (tunnel->version == L2TP_HDR_VER_2)
1890                         session->build_header = l2tp_build_l2tpv2_header;
1891                 else
1892                         session->build_header = l2tp_build_l2tpv3_header;
1893
1894                 l2tp_session_set_header_len(session, tunnel->version);
1895
1896                 /* Bump the reference count. The session context is deleted
1897                  * only when this drops to zero.
1898                  */
1899                 l2tp_session_inc_refcount(session);
1900                 l2tp_tunnel_inc_refcount(tunnel);
1901
1902                 /* Ensure tunnel socket isn't deleted */
1903                 sock_hold(tunnel->sock);
1904
1905                 /* Add session to the tunnel's hash list */
1906                 write_lock_bh(&tunnel->hlist_lock);
1907                 hlist_add_head(&session->hlist,
1908                                l2tp_session_id_hash(tunnel, session_id));
1909                 write_unlock_bh(&tunnel->hlist_lock);
1910
1911                 /* And to the global session list if L2TPv3 */
1912                 if (tunnel->version != L2TP_HDR_VER_2) {
1913                         struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1914
1915                         spin_lock_bh(&pn->l2tp_session_hlist_lock);
1916                         hlist_add_head_rcu(&session->global_hlist,
1917                                            l2tp_session_id_hash_2(pn, session_id));
1918                         spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1919                 }
1920
1921                 /* Ignore management session in session count value */
1922                 if (session->session_id != 0)
1923                         atomic_inc(&l2tp_session_count);
1924         }
1925
1926         return session;
1927 }
1928 EXPORT_SYMBOL_GPL(l2tp_session_create);
1929
1930 /*****************************************************************************
1931  * Init and cleanup
1932  *****************************************************************************/
1933
1934 static __net_init int l2tp_init_net(struct net *net)
1935 {
1936         struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1937         int hash;
1938
1939         INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1940         spin_lock_init(&pn->l2tp_tunnel_list_lock);
1941
1942         for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1943                 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1944
1945         spin_lock_init(&pn->l2tp_session_hlist_lock);
1946
1947         return 0;
1948 }
1949
1950 static __net_exit void l2tp_exit_net(struct net *net)
1951 {
1952         struct l2tp_net *pn = l2tp_pernet(net);
1953         struct l2tp_tunnel *tunnel = NULL;
1954
1955         rcu_read_lock_bh();
1956         list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1957                 (void)l2tp_tunnel_delete(tunnel);
1958         }
1959         rcu_read_unlock_bh();
1960 }
1961
1962 static struct pernet_operations l2tp_net_ops = {
1963         .init = l2tp_init_net,
1964         .exit = l2tp_exit_net,
1965         .id   = &l2tp_net_id,
1966         .size = sizeof(struct l2tp_net),
1967 };
1968
1969 static int __init l2tp_init(void)
1970 {
1971         int rc = 0;
1972
1973         rc = register_pernet_device(&l2tp_net_ops);
1974         if (rc)
1975                 goto out;
1976
1977         l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0);
1978         if (!l2tp_wq) {
1979                 pr_err("alloc_workqueue failed\n");
1980                 rc = -ENOMEM;
1981                 goto out;
1982         }
1983
1984         pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1985
1986 out:
1987         return rc;
1988 }
1989
1990 static void __exit l2tp_exit(void)
1991 {
1992         unregister_pernet_device(&l2tp_net_ops);
1993         if (l2tp_wq) {
1994                 destroy_workqueue(l2tp_wq);
1995                 l2tp_wq = NULL;
1996         }
1997 }
1998
1999 module_init(l2tp_init);
2000 module_exit(l2tp_exit);
2001
2002 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
2003 MODULE_DESCRIPTION("L2TP core");
2004 MODULE_LICENSE("GPL");
2005 MODULE_VERSION(L2TP_DRV_VERSION);
2006