bridge: Fix the way to check if a local fdb entry can be deleted
[firefly-linux-kernel-4.4.55.git] / net / ieee802154 / 6lowpan.c
1 /*
2  * Copyright 2011, Siemens AG
3  * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4  */
5
6 /*
7  * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8  * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22  */
23
24 /* Jon's code is based on 6lowpan implementation for Contiki which is:
25  * Copyright (c) 2008, Swedish Institute of Computer Science.
26  * All rights reserved.
27  *
28  * Redistribution and use in source and binary forms, with or without
29  * modification, are permitted provided that the following conditions
30  * are met:
31  * 1. Redistributions of source code must retain the above copyright
32  *    notice, this list of conditions and the following disclaimer.
33  * 2. Redistributions in binary form must reproduce the above copyright
34  *    notice, this list of conditions and the following disclaimer in the
35  *    documentation and/or other materials provided with the distribution.
36  * 3. Neither the name of the Institute nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52
53 #include <linux/bitops.h>
54 #include <linux/if_arp.h>
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/netdevice.h>
58 #include <net/af_ieee802154.h>
59 #include <net/ieee802154.h>
60 #include <net/ieee802154_netdev.h>
61 #include <net/ipv6.h>
62
63 #include "6lowpan.h"
64
65 static LIST_HEAD(lowpan_devices);
66
67 /* private device info */
68 struct lowpan_dev_info {
69         struct net_device       *real_dev; /* real WPAN device ptr */
70         struct mutex            dev_list_mtx; /* mutex for list ops */
71         unsigned short          fragment_tag;
72 };
73
74 struct lowpan_dev_record {
75         struct net_device *ldev;
76         struct list_head list;
77 };
78
79 struct lowpan_fragment {
80         struct sk_buff          *skb;           /* skb to be assembled */
81         u16                     length;         /* length to be assemled */
82         u32                     bytes_rcv;      /* bytes received */
83         u16                     tag;            /* current fragment tag */
84         struct timer_list       timer;          /* assembling timer */
85         struct list_head        list;           /* fragments list */
86 };
87
88 static LIST_HEAD(lowpan_fragments);
89 static DEFINE_SPINLOCK(flist_lock);
90
91 static inline struct
92 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
93 {
94         return netdev_priv(dev);
95 }
96
97 static inline void lowpan_address_flip(u8 *src, u8 *dest)
98 {
99         int i;
100         for (i = 0; i < IEEE802154_ADDR_LEN; i++)
101                 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
102 }
103
104 static int lowpan_header_create(struct sk_buff *skb,
105                            struct net_device *dev,
106                            unsigned short type, const void *_daddr,
107                            const void *_saddr, unsigned int len)
108 {
109         const u8 *saddr = _saddr;
110         const u8 *daddr = _daddr;
111         struct ieee802154_addr sa, da;
112
113         /* TODO:
114          * if this package isn't ipv6 one, where should it be routed?
115          */
116         if (type != ETH_P_IPV6)
117                 return 0;
118
119         if (!saddr)
120                 saddr = dev->dev_addr;
121
122         raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
123         raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
124
125         lowpan_header_compress(skb, dev, type, daddr, saddr, len);
126
127         /*
128          * NOTE1: I'm still unsure about the fact that compression and WPAN
129          * header are created here and not later in the xmit. So wait for
130          * an opinion of net maintainers.
131          */
132         /*
133          * NOTE2: to be absolutely correct, we must derive PANid information
134          * from MAC subif of the 'dev' and 'real_dev' network devices, but
135          * this isn't implemented in mainline yet, so currently we assign 0xff
136          */
137         mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
138         mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
139
140         /* prepare wpan address data */
141         sa.addr_type = IEEE802154_ADDR_LONG;
142         sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
143
144         memcpy(&(sa.hwaddr), saddr, 8);
145         /* intra-PAN communications */
146         da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
147
148         /*
149          * if the destination address is the broadcast address, use the
150          * corresponding short address
151          */
152         if (lowpan_is_addr_broadcast(daddr)) {
153                 da.addr_type = IEEE802154_ADDR_SHORT;
154                 da.short_addr = IEEE802154_ADDR_BROADCAST;
155         } else {
156                 da.addr_type = IEEE802154_ADDR_LONG;
157                 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
158
159                 /* request acknowledgment */
160                 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
161         }
162
163         return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
164                         type, (void *)&da, (void *)&sa, skb->len);
165 }
166
167 static int lowpan_give_skb_to_devices(struct sk_buff *skb,
168                                         struct net_device *dev)
169 {
170         struct lowpan_dev_record *entry;
171         struct sk_buff *skb_cp;
172         int stat = NET_RX_SUCCESS;
173
174         rcu_read_lock();
175         list_for_each_entry_rcu(entry, &lowpan_devices, list)
176                 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
177                         skb_cp = skb_copy(skb, GFP_ATOMIC);
178                         if (!skb_cp) {
179                                 stat = -ENOMEM;
180                                 break;
181                         }
182
183                         skb_cp->dev = entry->ldev;
184                         stat = netif_rx(skb_cp);
185                 }
186         rcu_read_unlock();
187
188         return stat;
189 }
190
191 static void lowpan_fragment_timer_expired(unsigned long entry_addr)
192 {
193         struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
194
195         pr_debug("timer expired for frame with tag %d\n", entry->tag);
196
197         list_del(&entry->list);
198         dev_kfree_skb(entry->skb);
199         kfree(entry);
200 }
201
202 static struct lowpan_fragment *
203 lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
204 {
205         struct lowpan_fragment *frame;
206
207         frame = kzalloc(sizeof(struct lowpan_fragment),
208                         GFP_ATOMIC);
209         if (!frame)
210                 goto frame_err;
211
212         INIT_LIST_HEAD(&frame->list);
213
214         frame->length = len;
215         frame->tag = tag;
216
217         /* allocate buffer for frame assembling */
218         frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
219                                                sizeof(struct ipv6hdr));
220
221         if (!frame->skb)
222                 goto skb_err;
223
224         frame->skb->priority = skb->priority;
225
226         /* reserve headroom for uncompressed ipv6 header */
227         skb_reserve(frame->skb, sizeof(struct ipv6hdr));
228         skb_put(frame->skb, frame->length);
229
230         /* copy the first control block to keep a
231          * trace of the link-layer addresses in case
232          * of a link-local compressed address
233          */
234         memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
235
236         init_timer(&frame->timer);
237         /* time out is the same as for ipv6 - 60 sec */
238         frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
239         frame->timer.data = (unsigned long)frame;
240         frame->timer.function = lowpan_fragment_timer_expired;
241
242         add_timer(&frame->timer);
243
244         list_add_tail(&frame->list, &lowpan_fragments);
245
246         return frame;
247
248 skb_err:
249         kfree(frame);
250 frame_err:
251         return NULL;
252 }
253
254 static int process_data(struct sk_buff *skb)
255 {
256         u8 iphc0, iphc1;
257         const struct ieee802154_addr *_saddr, *_daddr;
258
259         raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
260         /* at least two bytes will be used for the encoding */
261         if (skb->len < 2)
262                 goto drop;
263
264         if (lowpan_fetch_skb_u8(skb, &iphc0))
265                 goto drop;
266
267         /* fragments assembling */
268         switch (iphc0 & LOWPAN_DISPATCH_MASK) {
269         case LOWPAN_DISPATCH_FRAG1:
270         case LOWPAN_DISPATCH_FRAGN:
271         {
272                 struct lowpan_fragment *frame;
273                 /* slen stores the rightmost 8 bits of the 11 bits length */
274                 u8 slen, offset = 0;
275                 u16 len, tag;
276                 bool found = false;
277
278                 if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
279                     lowpan_fetch_skb_u16(skb, &tag))  /* fragment tag */
280                         goto drop;
281
282                 /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
283                 len = ((iphc0 & 7) << 8) | slen;
284
285                 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
286                         pr_debug("%s received a FRAG1 packet (tag: %d, "
287                                  "size of the entire IP packet: %d)",
288                                  __func__, tag, len);
289                 } else { /* FRAGN */
290                         if (lowpan_fetch_skb_u8(skb, &offset))
291                                 goto unlock_and_drop;
292                         pr_debug("%s received a FRAGN packet (tag: %d, "
293                                  "size of the entire IP packet: %d, "
294                                  "offset: %d)", __func__, tag, len, offset * 8);
295                 }
296
297                 /*
298                  * check if frame assembling with the same tag is
299                  * already in progress
300                  */
301                 spin_lock_bh(&flist_lock);
302
303                 list_for_each_entry(frame, &lowpan_fragments, list)
304                         if (frame->tag == tag) {
305                                 found = true;
306                                 break;
307                         }
308
309                 /* alloc new frame structure */
310                 if (!found) {
311                         pr_debug("%s first fragment received for tag %d, "
312                                  "begin packet reassembly", __func__, tag);
313                         frame = lowpan_alloc_new_frame(skb, len, tag);
314                         if (!frame)
315                                 goto unlock_and_drop;
316                 }
317
318                 /* if payload fits buffer, copy it */
319                 if (likely((offset * 8 + skb->len) <= frame->length))
320                         skb_copy_to_linear_data_offset(frame->skb, offset * 8,
321                                                         skb->data, skb->len);
322                 else
323                         goto unlock_and_drop;
324
325                 frame->bytes_rcv += skb->len;
326
327                 /* frame assembling complete */
328                 if ((frame->bytes_rcv == frame->length) &&
329                      frame->timer.expires > jiffies) {
330                         /* if timer haven't expired - first of all delete it */
331                         del_timer_sync(&frame->timer);
332                         list_del(&frame->list);
333                         spin_unlock_bh(&flist_lock);
334
335                         pr_debug("%s successfully reassembled fragment "
336                                  "(tag %d)", __func__, tag);
337
338                         dev_kfree_skb(skb);
339                         skb = frame->skb;
340                         kfree(frame);
341
342                         if (lowpan_fetch_skb_u8(skb, &iphc0))
343                                 goto drop;
344
345                         break;
346                 }
347                 spin_unlock_bh(&flist_lock);
348
349                 return kfree_skb(skb), 0;
350         }
351         default:
352                 break;
353         }
354
355         if (lowpan_fetch_skb_u8(skb, &iphc1))
356                 goto drop;
357
358         _saddr = &mac_cb(skb)->sa;
359         _daddr = &mac_cb(skb)->da;
360
361         return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
362                                 _saddr->addr_type, IEEE802154_ADDR_LEN,
363                                 (u8 *)_daddr->hwaddr, _daddr->addr_type,
364                                 IEEE802154_ADDR_LEN, iphc0, iphc1,
365                                 lowpan_give_skb_to_devices);
366
367 unlock_and_drop:
368         spin_unlock_bh(&flist_lock);
369 drop:
370         kfree_skb(skb);
371         return -EINVAL;
372 }
373
374 static int lowpan_set_address(struct net_device *dev, void *p)
375 {
376         struct sockaddr *sa = p;
377
378         if (netif_running(dev))
379                 return -EBUSY;
380
381         /* TODO: validate addr */
382         memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
383
384         return 0;
385 }
386
387 static int
388 lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
389                         int mlen, int plen, int offset, int type)
390 {
391         struct sk_buff *frag;
392         int hlen;
393
394         hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
395                         LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
396
397         raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
398
399         frag = netdev_alloc_skb(skb->dev,
400                                 hlen + mlen + plen + IEEE802154_MFR_SIZE);
401         if (!frag)
402                 return -ENOMEM;
403
404         frag->priority = skb->priority;
405
406         /* copy header, MFR and payload */
407         skb_put(frag, mlen);
408         skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
409
410         skb_put(frag, hlen);
411         skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
412
413         skb_put(frag, plen);
414         skb_copy_to_linear_data_offset(frag, mlen + hlen,
415                                        skb_network_header(skb) + offset, plen);
416
417         raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
418
419         return dev_queue_xmit(frag);
420 }
421
422 static int
423 lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
424 {
425         int  err, header_length, payload_length, tag, offset = 0;
426         u8 head[5];
427
428         header_length = skb->mac_len;
429         payload_length = skb->len - header_length;
430         tag = lowpan_dev_info(dev)->fragment_tag++;
431
432         /* first fragment header */
433         head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
434         head[1] = payload_length & 0xff;
435         head[2] = tag >> 8;
436         head[3] = tag & 0xff;
437
438         err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
439                                    0, LOWPAN_DISPATCH_FRAG1);
440
441         if (err) {
442                 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
443                          __func__, tag);
444                 goto exit;
445         }
446
447         offset = LOWPAN_FRAG_SIZE;
448
449         /* next fragment header */
450         head[0] &= ~LOWPAN_DISPATCH_FRAG1;
451         head[0] |= LOWPAN_DISPATCH_FRAGN;
452
453         while (payload_length - offset > 0) {
454                 int len = LOWPAN_FRAG_SIZE;
455
456                 head[4] = offset / 8;
457
458                 if (payload_length - offset < len)
459                         len = payload_length - offset;
460
461                 err = lowpan_fragment_xmit(skb, head, header_length,
462                                            len, offset, LOWPAN_DISPATCH_FRAGN);
463                 if (err) {
464                         pr_debug("%s unable to send a subsequent FRAGN packet "
465                                  "(tag: %d, offset: %d", __func__, tag, offset);
466                         goto exit;
467                 }
468
469                 offset += len;
470         }
471
472 exit:
473         return err;
474 }
475
476 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
477 {
478         int err = -1;
479
480         pr_debug("package xmit\n");
481
482         skb->dev = lowpan_dev_info(dev)->real_dev;
483         if (skb->dev == NULL) {
484                 pr_debug("ERROR: no real wpan device found\n");
485                 goto error;
486         }
487
488         /* Send directly if less than the MTU minus the 2 checksum bytes. */
489         if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
490                 err = dev_queue_xmit(skb);
491                 goto out;
492         }
493
494         pr_debug("frame is too big, fragmentation is needed\n");
495         err = lowpan_skb_fragmentation(skb, dev);
496 error:
497         dev_kfree_skb(skb);
498 out:
499         if (err)
500                 pr_debug("ERROR: xmit failed\n");
501
502         return (err < 0) ? NET_XMIT_DROP : err;
503 }
504
505 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
506 {
507         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
508         return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
509 }
510
511 static u16 lowpan_get_pan_id(const struct net_device *dev)
512 {
513         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
514         return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
515 }
516
517 static u16 lowpan_get_short_addr(const struct net_device *dev)
518 {
519         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
520         return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
521 }
522
523 static u8 lowpan_get_dsn(const struct net_device *dev)
524 {
525         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
526         return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
527 }
528
529 static struct header_ops lowpan_header_ops = {
530         .create = lowpan_header_create,
531 };
532
533 static const struct net_device_ops lowpan_netdev_ops = {
534         .ndo_start_xmit         = lowpan_xmit,
535         .ndo_set_mac_address    = lowpan_set_address,
536 };
537
538 static struct ieee802154_mlme_ops lowpan_mlme = {
539         .get_pan_id = lowpan_get_pan_id,
540         .get_phy = lowpan_get_phy,
541         .get_short_addr = lowpan_get_short_addr,
542         .get_dsn = lowpan_get_dsn,
543 };
544
545 static void lowpan_setup(struct net_device *dev)
546 {
547         dev->addr_len           = IEEE802154_ADDR_LEN;
548         memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
549         dev->type               = ARPHRD_IEEE802154;
550         /* Frame Control + Sequence Number + Address fields + Security Header */
551         dev->hard_header_len    = 2 + 1 + 20 + 14;
552         dev->needed_tailroom    = 2; /* FCS */
553         dev->mtu                = 1281;
554         dev->tx_queue_len       = 0;
555         dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
556         dev->watchdog_timeo     = 0;
557
558         dev->netdev_ops         = &lowpan_netdev_ops;
559         dev->header_ops         = &lowpan_header_ops;
560         dev->ml_priv            = &lowpan_mlme;
561         dev->destructor         = free_netdev;
562 }
563
564 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
565 {
566         if (tb[IFLA_ADDRESS]) {
567                 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
568                         return -EINVAL;
569         }
570         return 0;
571 }
572
573 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
574         struct packet_type *pt, struct net_device *orig_dev)
575 {
576         struct sk_buff *local_skb;
577
578         if (!netif_running(dev))
579                 goto drop;
580
581         if (dev->type != ARPHRD_IEEE802154)
582                 goto drop;
583
584         /* check that it's our buffer */
585         if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
586                 /* Copy the packet so that the IPv6 header is
587                  * properly aligned.
588                  */
589                 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
590                                             skb_tailroom(skb), GFP_ATOMIC);
591                 if (!local_skb)
592                         goto drop;
593
594                 local_skb->protocol = htons(ETH_P_IPV6);
595                 local_skb->pkt_type = PACKET_HOST;
596
597                 /* Pull off the 1-byte of 6lowpan header. */
598                 skb_pull(local_skb, 1);
599
600                 lowpan_give_skb_to_devices(local_skb, NULL);
601
602                 kfree_skb(local_skb);
603                 kfree_skb(skb);
604         } else {
605                 switch (skb->data[0] & 0xe0) {
606                 case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
607                 case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
608                 case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
609                         local_skb = skb_clone(skb, GFP_ATOMIC);
610                         if (!local_skb)
611                                 goto drop;
612                         process_data(local_skb);
613
614                         kfree_skb(skb);
615                         break;
616                 default:
617                         break;
618                 }
619         }
620
621         return NET_RX_SUCCESS;
622
623 drop:
624         kfree_skb(skb);
625         return NET_RX_DROP;
626 }
627
628 static int lowpan_newlink(struct net *src_net, struct net_device *dev,
629                           struct nlattr *tb[], struct nlattr *data[])
630 {
631         struct net_device *real_dev;
632         struct lowpan_dev_record *entry;
633
634         pr_debug("adding new link\n");
635
636         if (!tb[IFLA_LINK])
637                 return -EINVAL;
638         /* find and hold real wpan device */
639         real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
640         if (!real_dev)
641                 return -ENODEV;
642         if (real_dev->type != ARPHRD_IEEE802154) {
643                 dev_put(real_dev);
644                 return -EINVAL;
645         }
646
647         lowpan_dev_info(dev)->real_dev = real_dev;
648         lowpan_dev_info(dev)->fragment_tag = 0;
649         mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
650
651         entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
652         if (!entry) {
653                 dev_put(real_dev);
654                 lowpan_dev_info(dev)->real_dev = NULL;
655                 return -ENOMEM;
656         }
657
658         entry->ldev = dev;
659
660         /* Set the lowpan harware address to the wpan hardware address. */
661         memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
662
663         mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
664         INIT_LIST_HEAD(&entry->list);
665         list_add_tail(&entry->list, &lowpan_devices);
666         mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
667
668         register_netdevice(dev);
669
670         return 0;
671 }
672
673 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
674 {
675         struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
676         struct net_device *real_dev = lowpan_dev->real_dev;
677         struct lowpan_dev_record *entry, *tmp;
678
679         ASSERT_RTNL();
680
681         mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
682         list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
683                 if (entry->ldev == dev) {
684                         list_del(&entry->list);
685                         kfree(entry);
686                 }
687         }
688         mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
689
690         mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
691
692         unregister_netdevice_queue(dev, head);
693
694         dev_put(real_dev);
695 }
696
697 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
698         .kind           = "lowpan",
699         .priv_size      = sizeof(struct lowpan_dev_info),
700         .setup          = lowpan_setup,
701         .newlink        = lowpan_newlink,
702         .dellink        = lowpan_dellink,
703         .validate       = lowpan_validate,
704 };
705
706 static inline int __init lowpan_netlink_init(void)
707 {
708         return rtnl_link_register(&lowpan_link_ops);
709 }
710
711 static inline void lowpan_netlink_fini(void)
712 {
713         rtnl_link_unregister(&lowpan_link_ops);
714 }
715
716 static int lowpan_device_event(struct notifier_block *unused,
717                                unsigned long event, void *ptr)
718 {
719         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
720         LIST_HEAD(del_list);
721         struct lowpan_dev_record *entry, *tmp;
722
723         if (dev->type != ARPHRD_IEEE802154)
724                 goto out;
725
726         if (event == NETDEV_UNREGISTER) {
727                 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
728                         if (lowpan_dev_info(entry->ldev)->real_dev == dev)
729                                 lowpan_dellink(entry->ldev, &del_list);
730                 }
731
732                 unregister_netdevice_many(&del_list);
733         }
734
735 out:
736         return NOTIFY_DONE;
737 }
738
739 static struct notifier_block lowpan_dev_notifier = {
740         .notifier_call = lowpan_device_event,
741 };
742
743 static struct packet_type lowpan_packet_type = {
744         .type = __constant_htons(ETH_P_IEEE802154),
745         .func = lowpan_rcv,
746 };
747
748 static int __init lowpan_init_module(void)
749 {
750         int err = 0;
751
752         err = lowpan_netlink_init();
753         if (err < 0)
754                 goto out;
755
756         dev_add_pack(&lowpan_packet_type);
757
758         err = register_netdevice_notifier(&lowpan_dev_notifier);
759         if (err < 0) {
760                 dev_remove_pack(&lowpan_packet_type);
761                 lowpan_netlink_fini();
762         }
763 out:
764         return err;
765 }
766
767 static void __exit lowpan_cleanup_module(void)
768 {
769         struct lowpan_fragment *frame, *tframe;
770
771         lowpan_netlink_fini();
772
773         dev_remove_pack(&lowpan_packet_type);
774
775         unregister_netdevice_notifier(&lowpan_dev_notifier);
776
777         /* Now 6lowpan packet_type is removed, so no new fragments are
778          * expected on RX, therefore that's the time to clean incomplete
779          * fragments.
780          */
781         spin_lock_bh(&flist_lock);
782         list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
783                 del_timer_sync(&frame->timer);
784                 list_del(&frame->list);
785                 dev_kfree_skb(frame->skb);
786                 kfree(frame);
787         }
788         spin_unlock_bh(&flist_lock);
789 }
790
791 module_init(lowpan_init_module);
792 module_exit(lowpan_cleanup_module);
793 MODULE_LICENSE("GPL");
794 MODULE_ALIAS_RTNL_LINK("lowpan");