1 /* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "distributed-arp-table.h"
20 #include "originator.h"
22 #include "translation-table.h"
24 #include "gateway_client.h"
25 #include "hard-interface.h"
26 #include "soft-interface.h"
27 #include "bridge_loop_avoidance.h"
28 #include "network-coding.h"
29 #include "fragmentation.h"
32 static struct lock_class_key batadv_orig_hash_lock_class_key;
34 static void batadv_purge_orig(struct work_struct *work);
36 /* returns 1 if they are the same originator */
37 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
39 const void *data1 = container_of(node, struct batadv_orig_node,
42 return batadv_compare_eth(data1, data2);
46 * batadv_orig_node_vlan_get - get an orig_node_vlan object
47 * @orig_node: the originator serving the VLAN
48 * @vid: the VLAN identifier
50 * Returns the vlan object identified by vid and belonging to orig_node or NULL
51 * if it does not exist.
53 struct batadv_orig_node_vlan *
54 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
57 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
60 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
64 if (!atomic_inc_not_zero(&tmp->refcount))
77 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
79 * @orig_node: the originator serving the VLAN
80 * @vid: the VLAN identifier
82 * Returns NULL in case of failure or the vlan object identified by vid and
83 * belonging to orig_node otherwise. The object is created and added to the list
84 * if it does not exist.
86 * The object is returned with refcounter increased by 1.
88 struct batadv_orig_node_vlan *
89 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
92 struct batadv_orig_node_vlan *vlan;
94 spin_lock_bh(&orig_node->vlan_list_lock);
96 /* first look if an object for this vid already exists */
97 vlan = batadv_orig_node_vlan_get(orig_node, vid);
101 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
105 atomic_set(&vlan->refcount, 2);
108 list_add_rcu(&vlan->list, &orig_node->vlan_list);
111 spin_unlock_bh(&orig_node->vlan_list_lock);
117 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
118 * the originator-vlan object
119 * @orig_vlan: the originator-vlan object to release
121 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
123 if (atomic_dec_and_test(&orig_vlan->refcount))
124 kfree_rcu(orig_vlan, rcu);
127 int batadv_originator_init(struct batadv_priv *bat_priv)
129 if (bat_priv->orig_hash)
132 bat_priv->orig_hash = batadv_hash_new(1024);
134 if (!bat_priv->orig_hash)
137 batadv_hash_set_lock_class(bat_priv->orig_hash,
138 &batadv_orig_hash_lock_class_key);
140 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
141 queue_delayed_work(batadv_event_workqueue,
142 &bat_priv->orig_work,
143 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
152 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
153 * @rcu: rcu pointer of the neigh_ifinfo object
155 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
157 struct batadv_neigh_ifinfo *neigh_ifinfo;
159 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
161 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
162 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
168 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
169 * the neigh_ifinfo (without rcu callback)
170 * @neigh_ifinfo: the neigh_ifinfo object to release
173 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
175 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
176 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
180 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
182 * @neigh_ifinfo: the neigh_ifinfo object to release
184 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
186 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
187 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
191 * batadv_neigh_node_free_rcu - free the neigh_node
192 * @rcu: rcu pointer of the neigh_node
194 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
196 struct hlist_node *node_tmp;
197 struct batadv_neigh_node *neigh_node;
198 struct batadv_neigh_ifinfo *neigh_ifinfo;
200 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
202 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
203 &neigh_node->ifinfo_list, list) {
204 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
206 batadv_hardif_free_ref_now(neigh_node->if_incoming);
212 * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
213 * and possibly free it (without rcu callback)
214 * @neigh_node: neigh neighbor to free
217 batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
219 if (atomic_dec_and_test(&neigh_node->refcount))
220 batadv_neigh_node_free_rcu(&neigh_node->rcu);
224 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
225 * and possibly free it
226 * @neigh_node: neigh neighbor to free
228 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
230 if (atomic_dec_and_test(&neigh_node->refcount))
231 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
234 /* increases the refcounter of a found router */
235 struct batadv_neigh_node *
236 batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
238 struct batadv_neigh_node *router;
241 router = rcu_dereference(orig_node->router);
243 if (router && !atomic_inc_not_zero(&router->refcount))
251 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
252 * @neigh_node: the neigh node to be queried
253 * @if_outgoing: the interface for which the ifinfo should be acquired
255 * The object is returned with refcounter increased by 1.
257 * Returns the requested neigh_ifinfo or NULL if not found
259 struct batadv_neigh_ifinfo *
260 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
261 struct batadv_hard_iface *if_outgoing)
263 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
267 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
269 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
272 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
275 neigh_ifinfo = tmp_neigh_ifinfo;
284 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
285 * @neigh_node: the neigh node to be queried
286 * @if_outgoing: the interface for which the ifinfo should be acquired
288 * Returns NULL in case of failure or the neigh_ifinfo object for the
289 * if_outgoing interface otherwise. The object is created and added to the list
290 * if it does not exist.
292 * The object is returned with refcounter increased by 1.
294 struct batadv_neigh_ifinfo *
295 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
296 struct batadv_hard_iface *if_outgoing)
298 struct batadv_neigh_ifinfo *neigh_ifinfo;
300 spin_lock_bh(&neigh->ifinfo_lock);
302 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
306 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
310 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
316 INIT_HLIST_NODE(&neigh_ifinfo->list);
317 atomic_set(&neigh_ifinfo->refcount, 2);
318 neigh_ifinfo->if_outgoing = if_outgoing;
320 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
323 spin_unlock_bh(&neigh->ifinfo_lock);
329 * batadv_neigh_node_new - create and init a new neigh_node object
330 * @hard_iface: the interface where the neighbour is connected to
331 * @neigh_addr: the mac address of the neighbour interface
332 * @orig_node: originator object representing the neighbour
334 * Allocates a new neigh_node object and initialises all the generic fields.
335 * Returns the new object or NULL on failure.
337 struct batadv_neigh_node *
338 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
339 const uint8_t *neigh_addr,
340 struct batadv_orig_node *orig_node)
342 struct batadv_neigh_node *neigh_node;
344 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
348 INIT_HLIST_NODE(&neigh_node->list);
349 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
350 spin_lock_init(&neigh_node->ifinfo_lock);
352 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
353 neigh_node->if_incoming = hard_iface;
354 neigh_node->orig_node = orig_node;
356 /* extra reference for return */
357 atomic_set(&neigh_node->refcount, 2);
363 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
365 struct hlist_node *node_tmp;
366 struct batadv_neigh_node *neigh_node;
367 struct batadv_orig_node *orig_node;
369 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
371 spin_lock_bh(&orig_node->neigh_list_lock);
373 /* for all neighbors towards this originator ... */
374 hlist_for_each_entry_safe(neigh_node, node_tmp,
375 &orig_node->neigh_list, list) {
376 hlist_del_rcu(&neigh_node->list);
377 batadv_neigh_node_free_ref_now(neigh_node);
380 spin_unlock_bh(&orig_node->neigh_list_lock);
383 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
385 batadv_frag_purge_orig(orig_node, NULL);
387 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
388 "originator timed out");
390 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
391 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
393 kfree(orig_node->tt_buff);
398 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
399 * schedule an rcu callback for freeing it
400 * @orig_node: the orig node to free
402 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
404 if (atomic_dec_and_test(&orig_node->refcount))
405 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
409 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
410 * possibly free it (without rcu callback)
411 * @orig_node: the orig node to free
413 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
415 if (atomic_dec_and_test(&orig_node->refcount))
416 batadv_orig_node_free_rcu(&orig_node->rcu);
419 void batadv_originator_free(struct batadv_priv *bat_priv)
421 struct batadv_hashtable *hash = bat_priv->orig_hash;
422 struct hlist_node *node_tmp;
423 struct hlist_head *head;
424 spinlock_t *list_lock; /* spinlock to protect write access */
425 struct batadv_orig_node *orig_node;
431 cancel_delayed_work_sync(&bat_priv->orig_work);
433 bat_priv->orig_hash = NULL;
435 for (i = 0; i < hash->size; i++) {
436 head = &hash->table[i];
437 list_lock = &hash->list_locks[i];
439 spin_lock_bh(list_lock);
440 hlist_for_each_entry_safe(orig_node, node_tmp,
442 hlist_del_rcu(&orig_node->hash_entry);
443 batadv_orig_node_free_ref(orig_node);
445 spin_unlock_bh(list_lock);
448 batadv_hash_destroy(hash);
452 * batadv_orig_node_new - creates a new orig_node
453 * @bat_priv: the bat priv with all the soft interface information
454 * @addr: the mac address of the originator
456 * Creates a new originator object and initialise all the generic fields.
457 * The new object is not added to the originator list.
458 * Returns the newly created object or NULL on failure.
460 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
463 struct batadv_orig_node *orig_node;
464 struct batadv_orig_node_vlan *vlan;
465 unsigned long reset_time;
468 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
469 "Creating new originator: %pM\n", addr);
471 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
475 INIT_HLIST_HEAD(&orig_node->neigh_list);
476 INIT_LIST_HEAD(&orig_node->vlan_list);
477 spin_lock_init(&orig_node->bcast_seqno_lock);
478 spin_lock_init(&orig_node->neigh_list_lock);
479 spin_lock_init(&orig_node->tt_buff_lock);
480 spin_lock_init(&orig_node->tt_lock);
481 spin_lock_init(&orig_node->vlan_list_lock);
483 batadv_nc_init_orig(orig_node);
485 /* extra reference for return */
486 atomic_set(&orig_node->refcount, 2);
488 orig_node->tt_initialised = false;
489 orig_node->bat_priv = bat_priv;
490 memcpy(orig_node->orig, addr, ETH_ALEN);
491 batadv_dat_init_orig_node_addr(orig_node);
492 orig_node->router = NULL;
493 atomic_set(&orig_node->last_ttvn, 0);
494 orig_node->tt_buff = NULL;
495 orig_node->tt_buff_len = 0;
496 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
497 orig_node->bcast_seqno_reset = reset_time;
498 orig_node->batman_seqno_reset = reset_time;
500 /* create a vlan object for the "untagged" LAN */
501 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
504 /* batadv_orig_node_vlan_new() increases the refcounter.
505 * Immediately release vlan since it is not needed anymore in this
508 batadv_orig_node_vlan_free_ref(vlan);
510 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
511 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
512 spin_lock_init(&orig_node->fragments[i].lock);
513 orig_node->fragments[i].size = 0;
523 * batadv_purge_orig_neighbors - purges neighbors from originator
524 * @bat_priv: the bat priv with all the soft interface information
525 * @orig_node: orig node which is to be checked
527 * Returns true if any neighbor was purged, false otherwise
530 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
531 struct batadv_orig_node *orig_node)
533 struct hlist_node *node_tmp;
534 struct batadv_neigh_node *neigh_node;
535 bool neigh_purged = false;
536 unsigned long last_seen;
537 struct batadv_hard_iface *if_incoming;
539 spin_lock_bh(&orig_node->neigh_list_lock);
541 /* for all neighbors towards this originator ... */
542 hlist_for_each_entry_safe(neigh_node, node_tmp,
543 &orig_node->neigh_list, list) {
544 last_seen = neigh_node->last_seen;
545 if_incoming = neigh_node->if_incoming;
547 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
548 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
549 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
550 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
551 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
552 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
553 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
554 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
555 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
556 orig_node->orig, neigh_node->addr,
557 if_incoming->net_dev->name);
559 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
560 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
561 orig_node->orig, neigh_node->addr,
562 jiffies_to_msecs(last_seen));
566 hlist_del_rcu(&neigh_node->list);
567 batadv_neigh_node_free_ref(neigh_node);
571 spin_unlock_bh(&orig_node->neigh_list_lock);
576 * batadv_find_best_neighbor - finds the best neighbor after purging
577 * @bat_priv: the bat priv with all the soft interface information
578 * @orig_node: orig node which is to be checked
579 * @if_outgoing: the interface for which the metric should be compared
581 * Returns the current best neighbor, with refcount increased.
583 static struct batadv_neigh_node *
584 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
585 struct batadv_orig_node *orig_node,
586 struct batadv_hard_iface *if_outgoing)
588 struct batadv_neigh_node *best = NULL, *neigh;
589 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
592 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
593 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
594 best, if_outgoing) <= 0))
597 if (!atomic_inc_not_zero(&neigh->refcount))
601 batadv_neigh_node_free_ref(best);
610 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
611 struct batadv_orig_node *orig_node)
613 struct batadv_neigh_node *best_neigh_node;
615 if (batadv_has_timed_out(orig_node->last_seen,
616 2 * BATADV_PURGE_TIMEOUT)) {
617 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
618 "Originator timeout: originator %pM, last_seen %u\n",
620 jiffies_to_msecs(orig_node->last_seen));
623 if (!batadv_purge_orig_neighbors(bat_priv, orig_node))
626 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
628 batadv_update_route(bat_priv, orig_node, best_neigh_node);
630 batadv_neigh_node_free_ref(best_neigh_node);
635 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
637 struct batadv_hashtable *hash = bat_priv->orig_hash;
638 struct hlist_node *node_tmp;
639 struct hlist_head *head;
640 spinlock_t *list_lock; /* spinlock to protect write access */
641 struct batadv_orig_node *orig_node;
647 /* for all origins... */
648 for (i = 0; i < hash->size; i++) {
649 head = &hash->table[i];
650 list_lock = &hash->list_locks[i];
652 spin_lock_bh(list_lock);
653 hlist_for_each_entry_safe(orig_node, node_tmp,
655 if (batadv_purge_orig_node(bat_priv, orig_node)) {
656 batadv_gw_node_delete(bat_priv, orig_node);
657 hlist_del_rcu(&orig_node->hash_entry);
658 batadv_orig_node_free_ref(orig_node);
662 batadv_frag_purge_orig(orig_node,
663 batadv_frag_check_entry);
665 spin_unlock_bh(list_lock);
668 batadv_gw_node_purge(bat_priv);
669 batadv_gw_election(bat_priv);
672 static void batadv_purge_orig(struct work_struct *work)
674 struct delayed_work *delayed_work;
675 struct batadv_priv *bat_priv;
677 delayed_work = container_of(work, struct delayed_work, work);
678 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
679 _batadv_purge_orig(bat_priv);
680 queue_delayed_work(batadv_event_workqueue,
681 &bat_priv->orig_work,
682 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
685 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
687 _batadv_purge_orig(bat_priv);
690 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
692 struct net_device *net_dev = (struct net_device *)seq->private;
693 struct batadv_priv *bat_priv = netdev_priv(net_dev);
694 struct batadv_hard_iface *primary_if;
696 primary_if = batadv_seq_print_text_primary_if_get(seq);
700 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
701 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
702 primary_if->net_dev->dev_addr, net_dev->name,
703 bat_priv->bat_algo_ops->name);
705 batadv_hardif_free_ref(primary_if);
707 if (!bat_priv->bat_algo_ops->bat_orig_print) {
709 "No printing function for this routing protocol\n");
713 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq);
718 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
721 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
722 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
723 struct batadv_hashtable *hash = bat_priv->orig_hash;
724 struct hlist_head *head;
725 struct batadv_orig_node *orig_node;
729 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
732 for (i = 0; i < hash->size; i++) {
733 head = &hash->table[i];
736 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
738 if (bao->bat_orig_add_if)
739 ret = bao->bat_orig_add_if(orig_node,
754 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
757 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
758 struct batadv_hashtable *hash = bat_priv->orig_hash;
759 struct hlist_head *head;
760 struct batadv_hard_iface *hard_iface_tmp;
761 struct batadv_orig_node *orig_node;
762 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
766 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
769 for (i = 0; i < hash->size; i++) {
770 head = &hash->table[i];
773 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
775 if (bao->bat_orig_del_if)
776 ret = bao->bat_orig_del_if(orig_node,
785 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
787 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
788 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
791 if (hard_iface == hard_iface_tmp)
794 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
797 if (hard_iface_tmp->if_num > hard_iface->if_num)
798 hard_iface_tmp->if_num--;
802 hard_iface->if_num = -1;