Staging: batman-adv: use rcu callbacks when freeing batman_if
[firefly-linux-kernel-4.4.55.git] / drivers / staging / batman-adv / hard-interface.c
1 /*
2  * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "hard-interface.h"
24 #include "soft-interface.h"
25 #include "send.h"
26 #include "translation-table.h"
27 #include "routing.h"
28 #include "bat_sysfs.h"
29 #include "originator.h"
30 #include "hash.h"
31
32 #include <linux/if_arp.h>
33
34 #define MIN(x, y) ((x) < (y) ? (x) : (y))
35
36 /* protect update critical side of if_list - but not the content */
37 static DEFINE_SPINLOCK(if_list_lock);
38
39 static void hardif_free_rcu(struct rcu_head *rcu)
40 {
41         struct batman_if *batman_if;
42
43         batman_if = container_of(rcu, struct batman_if, rcu);
44         dev_put(batman_if->net_dev);
45         kref_put(&batman_if->refcount, hardif_free_ref);
46 }
47
48 struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
49 {
50         struct batman_if *batman_if;
51
52         rcu_read_lock();
53         list_for_each_entry_rcu(batman_if, &if_list, list) {
54                 if (batman_if->net_dev == net_dev)
55                         goto out;
56         }
57
58         batman_if = NULL;
59
60 out:
61         if (batman_if)
62                 kref_get(&batman_if->refcount);
63
64         rcu_read_unlock();
65         return batman_if;
66 }
67
68 static int is_valid_iface(struct net_device *net_dev)
69 {
70         if (net_dev->flags & IFF_LOOPBACK)
71                 return 0;
72
73         if (net_dev->type != ARPHRD_ETHER)
74                 return 0;
75
76         if (net_dev->addr_len != ETH_ALEN)
77                 return 0;
78
79         /* no batman over batman */
80 #ifdef HAVE_NET_DEVICE_OPS
81         if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
82                 return 0;
83 #else
84         if (net_dev->hard_start_xmit == interface_tx)
85                 return 0;
86 #endif
87
88         /* Device is being bridged */
89         /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
90                 return 0; */
91
92         return 1;
93 }
94
95 static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
96 {
97         struct batman_if *batman_if;
98
99         rcu_read_lock();
100         list_for_each_entry_rcu(batman_if, &if_list, list) {
101                 if (batman_if->soft_iface != soft_iface)
102                         continue;
103
104                 if (batman_if->if_status == IF_ACTIVE)
105                         goto out;
106         }
107
108         batman_if = NULL;
109
110 out:
111         if (batman_if)
112                 kref_get(&batman_if->refcount);
113
114         rcu_read_unlock();
115         return batman_if;
116 }
117
118 static void update_primary_addr(struct bat_priv *bat_priv)
119 {
120         struct vis_packet *vis_packet;
121
122         vis_packet = (struct vis_packet *)
123                                 bat_priv->my_vis_info->skb_packet->data;
124         memcpy(vis_packet->vis_orig,
125                bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
126         memcpy(vis_packet->sender_orig,
127                bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
128 }
129
130 static void set_primary_if(struct bat_priv *bat_priv,
131                            struct batman_if *batman_if)
132 {
133         struct batman_packet *batman_packet;
134         struct batman_if *old_if;
135
136         if (batman_if)
137                 kref_get(&batman_if->refcount);
138
139         old_if = bat_priv->primary_if;
140         bat_priv->primary_if = batman_if;
141
142         if (old_if)
143                 kref_put(&old_if->refcount, hardif_free_ref);
144
145         if (!bat_priv->primary_if)
146                 return;
147
148         batman_packet = (struct batman_packet *)(batman_if->packet_buff);
149         batman_packet->flags = PRIMARIES_FIRST_HOP;
150         batman_packet->ttl = TTL;
151
152         update_primary_addr(bat_priv);
153
154         /***
155          * hacky trick to make sure that we send the HNA information via
156          * our new primary interface
157          */
158         atomic_set(&bat_priv->hna_local_changed, 1);
159 }
160
161 static bool hardif_is_iface_up(struct batman_if *batman_if)
162 {
163         if (batman_if->net_dev->flags & IFF_UP)
164                 return true;
165
166         return false;
167 }
168
169 static void update_mac_addresses(struct batman_if *batman_if)
170 {
171         memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
172                batman_if->net_dev->dev_addr, ETH_ALEN);
173         memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
174                batman_if->net_dev->dev_addr, ETH_ALEN);
175 }
176
177 static void check_known_mac_addr(struct net_device *net_dev)
178 {
179         struct batman_if *batman_if;
180
181         rcu_read_lock();
182         list_for_each_entry_rcu(batman_if, &if_list, list) {
183                 if ((batman_if->if_status != IF_ACTIVE) &&
184                     (batman_if->if_status != IF_TO_BE_ACTIVATED))
185                         continue;
186
187                 if (batman_if->net_dev == net_dev)
188                         continue;
189
190                 if (!compare_orig(batman_if->net_dev->dev_addr,
191                                   net_dev->dev_addr))
192                         continue;
193
194                 pr_warning("The newly added mac address (%pM) already exists "
195                            "on: %s\n", net_dev->dev_addr,
196                            batman_if->net_dev->name);
197                 pr_warning("It is strongly recommended to keep mac addresses "
198                            "unique to avoid problems!\n");
199         }
200         rcu_read_unlock();
201 }
202
203 int hardif_min_mtu(struct net_device *soft_iface)
204 {
205         struct bat_priv *bat_priv = netdev_priv(soft_iface);
206         struct batman_if *batman_if;
207         /* allow big frames if all devices are capable to do so
208          * (have MTU > 1500 + BAT_HEADER_LEN) */
209         int min_mtu = ETH_DATA_LEN;
210
211         if (atomic_read(&bat_priv->frag_enabled))
212                 goto out;
213
214         rcu_read_lock();
215         list_for_each_entry_rcu(batman_if, &if_list, list) {
216                 if ((batman_if->if_status != IF_ACTIVE) &&
217                     (batman_if->if_status != IF_TO_BE_ACTIVATED))
218                         continue;
219
220                 if (batman_if->soft_iface != soft_iface)
221                         continue;
222
223                 min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN,
224                               min_mtu);
225         }
226         rcu_read_unlock();
227 out:
228         return min_mtu;
229 }
230
231 /* adjusts the MTU if a new interface with a smaller MTU appeared. */
232 void update_min_mtu(struct net_device *soft_iface)
233 {
234         int min_mtu;
235
236         min_mtu = hardif_min_mtu(soft_iface);
237         if (soft_iface->mtu != min_mtu)
238                 soft_iface->mtu = min_mtu;
239 }
240
241 static void hardif_activate_interface(struct batman_if *batman_if)
242 {
243         struct bat_priv *bat_priv;
244
245         if (batman_if->if_status != IF_INACTIVE)
246                 return;
247
248         bat_priv = netdev_priv(batman_if->soft_iface);
249
250         update_mac_addresses(batman_if);
251         batman_if->if_status = IF_TO_BE_ACTIVATED;
252
253         /**
254          * the first active interface becomes our primary interface or
255          * the next active interface after the old primay interface was removed
256          */
257         if (!bat_priv->primary_if)
258                 set_primary_if(bat_priv, batman_if);
259
260         bat_info(batman_if->soft_iface, "Interface activated: %s\n",
261                  batman_if->net_dev->name);
262
263         update_min_mtu(batman_if->soft_iface);
264         return;
265 }
266
267 static void hardif_deactivate_interface(struct batman_if *batman_if)
268 {
269         if ((batman_if->if_status != IF_ACTIVE) &&
270            (batman_if->if_status != IF_TO_BE_ACTIVATED))
271                 return;
272
273         batman_if->if_status = IF_INACTIVE;
274
275         bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
276                  batman_if->net_dev->name);
277
278         update_min_mtu(batman_if->soft_iface);
279 }
280
281 int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
282 {
283         struct bat_priv *bat_priv;
284         struct batman_packet *batman_packet;
285
286         if (batman_if->if_status != IF_NOT_IN_USE)
287                 goto out;
288
289         batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
290
291         if (!batman_if->soft_iface) {
292                 batman_if->soft_iface = softif_create(iface_name);
293
294                 if (!batman_if->soft_iface)
295                         goto err;
296
297                 /* dev_get_by_name() increases the reference counter for us */
298                 dev_hold(batman_if->soft_iface);
299         }
300
301         bat_priv = netdev_priv(batman_if->soft_iface);
302         batman_if->packet_len = BAT_PACKET_LEN;
303         batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
304
305         if (!batman_if->packet_buff) {
306                 bat_err(batman_if->soft_iface, "Can't add interface packet "
307                         "(%s): out of memory\n", batman_if->net_dev->name);
308                 goto err;
309         }
310
311         batman_packet = (struct batman_packet *)(batman_if->packet_buff);
312         batman_packet->packet_type = BAT_PACKET;
313         batman_packet->version = COMPAT_VERSION;
314         batman_packet->flags = 0;
315         batman_packet->ttl = 2;
316         batman_packet->tq = TQ_MAX_VALUE;
317         batman_packet->num_hna = 0;
318
319         batman_if->if_num = bat_priv->num_ifaces;
320         bat_priv->num_ifaces++;
321         batman_if->if_status = IF_INACTIVE;
322         orig_hash_add_if(batman_if, bat_priv->num_ifaces);
323
324         batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
325         batman_if->batman_adv_ptype.func = batman_skb_recv;
326         batman_if->batman_adv_ptype.dev = batman_if->net_dev;
327         kref_get(&batman_if->refcount);
328         dev_add_pack(&batman_if->batman_adv_ptype);
329
330         atomic_set(&batman_if->seqno, 1);
331         atomic_set(&batman_if->frag_seqno, 1);
332         bat_info(batman_if->soft_iface, "Adding interface: %s\n",
333                  batman_if->net_dev->name);
334
335         if (atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
336                 ETH_DATA_LEN + BAT_HEADER_LEN)
337                 bat_info(batman_if->soft_iface,
338                         "The MTU of interface %s is too small (%i) to handle "
339                         "the transport of batman-adv packets. Packets going "
340                         "over this interface will be fragmented on layer2 "
341                         "which could impact the performance. Setting the MTU "
342                         "to %zi would solve the problem.\n",
343                         batman_if->net_dev->name, batman_if->net_dev->mtu,
344                         ETH_DATA_LEN + BAT_HEADER_LEN);
345
346         if (!atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
347                 ETH_DATA_LEN + BAT_HEADER_LEN)
348                 bat_info(batman_if->soft_iface,
349                         "The MTU of interface %s is too small (%i) to handle "
350                         "the transport of batman-adv packets. If you experience"
351                         " problems getting traffic through try increasing the "
352                         "MTU to %zi.\n",
353                         batman_if->net_dev->name, batman_if->net_dev->mtu,
354                         ETH_DATA_LEN + BAT_HEADER_LEN);
355
356         if (hardif_is_iface_up(batman_if))
357                 hardif_activate_interface(batman_if);
358         else
359                 bat_err(batman_if->soft_iface, "Not using interface %s "
360                         "(retrying later): interface not active\n",
361                         batman_if->net_dev->name);
362
363         /* begin scheduling originator messages on that interface */
364         schedule_own_packet(batman_if);
365
366 out:
367         return 0;
368
369 err:
370         return -ENOMEM;
371 }
372
373 void hardif_disable_interface(struct batman_if *batman_if)
374 {
375         struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
376
377         if (batman_if->if_status == IF_ACTIVE)
378                 hardif_deactivate_interface(batman_if);
379
380         if (batman_if->if_status != IF_INACTIVE)
381                 return;
382
383         bat_info(batman_if->soft_iface, "Removing interface: %s\n",
384                  batman_if->net_dev->name);
385         dev_remove_pack(&batman_if->batman_adv_ptype);
386         kref_put(&batman_if->refcount, hardif_free_ref);
387
388         bat_priv->num_ifaces--;
389         orig_hash_del_if(batman_if, bat_priv->num_ifaces);
390
391         if (batman_if == bat_priv->primary_if) {
392                 struct batman_if *new_if;
393
394                 new_if = get_active_batman_if(batman_if->soft_iface);
395                 set_primary_if(bat_priv, new_if);
396
397                 if (new_if)
398                         kref_put(&new_if->refcount, hardif_free_ref);
399         }
400
401         kfree(batman_if->packet_buff);
402         batman_if->packet_buff = NULL;
403         batman_if->if_status = IF_NOT_IN_USE;
404
405         /* delete all references to this batman_if */
406         purge_orig_ref(bat_priv);
407         purge_outstanding_packets(bat_priv, batman_if);
408         dev_put(batman_if->soft_iface);
409
410         /* nobody uses this interface anymore */
411         if (!bat_priv->num_ifaces)
412                 softif_destroy(batman_if->soft_iface);
413
414         batman_if->soft_iface = NULL;
415 }
416
417 static struct batman_if *hardif_add_interface(struct net_device *net_dev)
418 {
419         struct batman_if *batman_if;
420         int ret;
421
422         ret = is_valid_iface(net_dev);
423         if (ret != 1)
424                 goto out;
425
426         dev_hold(net_dev);
427
428         batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
429         if (!batman_if) {
430                 pr_err("Can't add interface (%s): out of memory\n",
431                        net_dev->name);
432                 goto release_dev;
433         }
434
435         ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
436         if (ret)
437                 goto free_if;
438
439         batman_if->if_num = -1;
440         batman_if->net_dev = net_dev;
441         batman_if->soft_iface = NULL;
442         batman_if->if_status = IF_NOT_IN_USE;
443         INIT_LIST_HEAD(&batman_if->list);
444         kref_init(&batman_if->refcount);
445
446         check_known_mac_addr(batman_if->net_dev);
447
448         spin_lock(&if_list_lock);
449         list_add_tail_rcu(&batman_if->list, &if_list);
450         spin_unlock(&if_list_lock);
451
452         /* extra reference for return */
453         kref_get(&batman_if->refcount);
454         return batman_if;
455
456 free_if:
457         kfree(batman_if);
458 release_dev:
459         dev_put(net_dev);
460 out:
461         return NULL;
462 }
463
464 static void hardif_remove_interface(struct batman_if *batman_if)
465 {
466         /* first deactivate interface */
467         if (batman_if->if_status != IF_NOT_IN_USE)
468                 hardif_disable_interface(batman_if);
469
470         if (batman_if->if_status != IF_NOT_IN_USE)
471                 return;
472
473         batman_if->if_status = IF_TO_BE_REMOVED;
474         sysfs_del_hardif(&batman_if->hardif_obj);
475         call_rcu(&batman_if->rcu, hardif_free_rcu);
476 }
477
478 void hardif_remove_interfaces(void)
479 {
480         struct batman_if *batman_if, *batman_if_tmp;
481         struct list_head if_queue;
482
483         INIT_LIST_HEAD(&if_queue);
484
485         spin_lock(&if_list_lock);
486         list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
487                 list_del_rcu(&batman_if->list);
488                 list_add_tail(&batman_if->list, &if_queue);
489         }
490         spin_unlock(&if_list_lock);
491
492         rtnl_lock();
493         list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
494                 hardif_remove_interface(batman_if);
495         }
496         rtnl_unlock();
497 }
498
499 static int hard_if_event(struct notifier_block *this,
500                          unsigned long event, void *ptr)
501 {
502         struct net_device *net_dev = (struct net_device *)ptr;
503         struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
504         struct bat_priv *bat_priv;
505
506         if (!batman_if && event == NETDEV_REGISTER)
507                 batman_if = hardif_add_interface(net_dev);
508
509         if (!batman_if)
510                 goto out;
511
512         switch (event) {
513         case NETDEV_UP:
514                 hardif_activate_interface(batman_if);
515                 break;
516         case NETDEV_GOING_DOWN:
517         case NETDEV_DOWN:
518                 hardif_deactivate_interface(batman_if);
519                 break;
520         case NETDEV_UNREGISTER:
521                 spin_lock(&if_list_lock);
522                 list_del_rcu(&batman_if->list);
523                 spin_unlock(&if_list_lock);
524
525                 hardif_remove_interface(batman_if);
526                 break;
527         case NETDEV_CHANGEMTU:
528                 if (batman_if->soft_iface)
529                         update_min_mtu(batman_if->soft_iface);
530                 break;
531         case NETDEV_CHANGEADDR:
532                 if (batman_if->if_status == IF_NOT_IN_USE)
533                         goto hardif_put;
534
535                 check_known_mac_addr(batman_if->net_dev);
536                 update_mac_addresses(batman_if);
537
538                 bat_priv = netdev_priv(batman_if->soft_iface);
539                 if (batman_if == bat_priv->primary_if)
540                         update_primary_addr(bat_priv);
541                 break;
542         default:
543                 break;
544         };
545
546 hardif_put:
547         kref_put(&batman_if->refcount, hardif_free_ref);
548 out:
549         return NOTIFY_DONE;
550 }
551
552 /* receive a packet with the batman ethertype coming on a hard
553  * interface */
554 int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
555         struct packet_type *ptype, struct net_device *orig_dev)
556 {
557         struct bat_priv *bat_priv;
558         struct batman_packet *batman_packet;
559         struct batman_if *batman_if;
560         int ret;
561
562         batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
563         skb = skb_share_check(skb, GFP_ATOMIC);
564
565         /* skb was released by skb_share_check() */
566         if (!skb)
567                 goto err_out;
568
569         /* packet should hold at least type and version */
570         if (unlikely(!pskb_may_pull(skb, 2)))
571                 goto err_free;
572
573         /* expect a valid ethernet header here. */
574         if (unlikely(skb->mac_len != sizeof(struct ethhdr)
575                                 || !skb_mac_header(skb)))
576                 goto err_free;
577
578         if (!batman_if->soft_iface)
579                 goto err_free;
580
581         bat_priv = netdev_priv(batman_if->soft_iface);
582
583         if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
584                 goto err_free;
585
586         /* discard frames on not active interfaces */
587         if (batman_if->if_status != IF_ACTIVE)
588                 goto err_free;
589
590         batman_packet = (struct batman_packet *)skb->data;
591
592         if (batman_packet->version != COMPAT_VERSION) {
593                 bat_dbg(DBG_BATMAN, bat_priv,
594                         "Drop packet: incompatible batman version (%i)\n",
595                         batman_packet->version);
596                 goto err_free;
597         }
598
599         /* all receive handlers return whether they received or reused
600          * the supplied skb. if not, we have to free the skb. */
601
602         switch (batman_packet->packet_type) {
603                 /* batman originator packet */
604         case BAT_PACKET:
605                 ret = recv_bat_packet(skb, batman_if);
606                 break;
607
608                 /* batman icmp packet */
609         case BAT_ICMP:
610                 ret = recv_icmp_packet(skb, batman_if);
611                 break;
612
613                 /* unicast packet */
614         case BAT_UNICAST:
615                 ret = recv_unicast_packet(skb, batman_if);
616                 break;
617
618                 /* fragmented unicast packet */
619         case BAT_UNICAST_FRAG:
620                 ret = recv_ucast_frag_packet(skb, batman_if);
621                 break;
622
623                 /* broadcast packet */
624         case BAT_BCAST:
625                 ret = recv_bcast_packet(skb, batman_if);
626                 break;
627
628                 /* vis packet */
629         case BAT_VIS:
630                 ret = recv_vis_packet(skb, batman_if);
631                 break;
632         default:
633                 ret = NET_RX_DROP;
634         }
635
636         if (ret == NET_RX_DROP)
637                 kfree_skb(skb);
638
639         /* return NET_RX_SUCCESS in any case as we
640          * most probably dropped the packet for
641          * routing-logical reasons. */
642
643         return NET_RX_SUCCESS;
644
645 err_free:
646         kfree_skb(skb);
647 err_out:
648         return NET_RX_DROP;
649 }
650
651 struct notifier_block hard_if_notifier = {
652         .notifier_call = hard_if_event,
653 };