batman-adv: split tq information in neigh_node struct
[firefly-linux-kernel-4.4.55.git] / net / batman-adv / originator.c
1 /* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include "main.h"
19 #include "distributed-arp-table.h"
20 #include "originator.h"
21 #include "hash.h"
22 #include "translation-table.h"
23 #include "routing.h"
24 #include "gateway_client.h"
25 #include "hard-interface.h"
26 #include "soft-interface.h"
27 #include "bridge_loop_avoidance.h"
28 #include "network-coding.h"
29 #include "fragmentation.h"
30
31 /* hash class keys */
32 static struct lock_class_key batadv_orig_hash_lock_class_key;
33
34 static void batadv_purge_orig(struct work_struct *work);
35
36 /* returns 1 if they are the same originator */
37 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
38 {
39         const void *data1 = container_of(node, struct batadv_orig_node,
40                                          hash_entry);
41
42         return batadv_compare_eth(data1, data2);
43 }
44
45 /**
46  * batadv_orig_node_vlan_get - get an orig_node_vlan object
47  * @orig_node: the originator serving the VLAN
48  * @vid: the VLAN identifier
49  *
50  * Returns the vlan object identified by vid and belonging to orig_node or NULL
51  * if it does not exist.
52  */
53 struct batadv_orig_node_vlan *
54 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
55                           unsigned short vid)
56 {
57         struct batadv_orig_node_vlan *vlan = NULL, *tmp;
58
59         rcu_read_lock();
60         list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
61                 if (tmp->vid != vid)
62                         continue;
63
64                 if (!atomic_inc_not_zero(&tmp->refcount))
65                         continue;
66
67                 vlan = tmp;
68
69                 break;
70         }
71         rcu_read_unlock();
72
73         return vlan;
74 }
75
76 /**
77  * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
78  *  object
79  * @orig_node: the originator serving the VLAN
80  * @vid: the VLAN identifier
81  *
82  * Returns NULL in case of failure or the vlan object identified by vid and
83  * belonging to orig_node otherwise. The object is created and added to the list
84  * if it does not exist.
85  *
86  * The object is returned with refcounter increased by 1.
87  */
88 struct batadv_orig_node_vlan *
89 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
90                           unsigned short vid)
91 {
92         struct batadv_orig_node_vlan *vlan;
93
94         spin_lock_bh(&orig_node->vlan_list_lock);
95
96         /* first look if an object for this vid already exists */
97         vlan = batadv_orig_node_vlan_get(orig_node, vid);
98         if (vlan)
99                 goto out;
100
101         vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
102         if (!vlan)
103                 goto out;
104
105         atomic_set(&vlan->refcount, 2);
106         vlan->vid = vid;
107
108         list_add_rcu(&vlan->list, &orig_node->vlan_list);
109
110 out:
111         spin_unlock_bh(&orig_node->vlan_list_lock);
112
113         return vlan;
114 }
115
116 /**
117  * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
118  *  the originator-vlan object
119  * @orig_vlan: the originator-vlan object to release
120  */
121 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
122 {
123         if (atomic_dec_and_test(&orig_vlan->refcount))
124                 kfree_rcu(orig_vlan, rcu);
125 }
126
127 int batadv_originator_init(struct batadv_priv *bat_priv)
128 {
129         if (bat_priv->orig_hash)
130                 return 0;
131
132         bat_priv->orig_hash = batadv_hash_new(1024);
133
134         if (!bat_priv->orig_hash)
135                 goto err;
136
137         batadv_hash_set_lock_class(bat_priv->orig_hash,
138                                    &batadv_orig_hash_lock_class_key);
139
140         INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
141         queue_delayed_work(batadv_event_workqueue,
142                            &bat_priv->orig_work,
143                            msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
144
145         return 0;
146
147 err:
148         return -ENOMEM;
149 }
150
151 /**
152  * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
153  * @rcu: rcu pointer of the neigh_ifinfo object
154  */
155 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
156 {
157         struct batadv_neigh_ifinfo *neigh_ifinfo;
158
159         neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
160
161         if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
162                 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
163
164         kfree(neigh_ifinfo);
165 }
166
167 /**
168  * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
169  *  the neigh_ifinfo (without rcu callback)
170  * @neigh_ifinfo: the neigh_ifinfo object to release
171  */
172 static void
173 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
174 {
175         if (atomic_dec_and_test(&neigh_ifinfo->refcount))
176                 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
177 }
178
179 /**
180  * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
181  *  the neigh_ifinfo
182  * @neigh_ifinfo: the neigh_ifinfo object to release
183  */
184 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
185 {
186         if (atomic_dec_and_test(&neigh_ifinfo->refcount))
187                 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
188 }
189
190 /**
191  * batadv_neigh_node_free_rcu - free the neigh_node
192  * @rcu: rcu pointer of the neigh_node
193  */
194 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
195 {
196         struct hlist_node *node_tmp;
197         struct batadv_neigh_node *neigh_node;
198         struct batadv_neigh_ifinfo *neigh_ifinfo;
199
200         neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
201
202         hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
203                                   &neigh_node->ifinfo_list, list) {
204                 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
205         }
206         batadv_hardif_free_ref_now(neigh_node->if_incoming);
207
208         kfree(neigh_node);
209 }
210
211 /**
212  * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
213  *  and possibly free it (without rcu callback)
214  * @neigh_node: neigh neighbor to free
215  */
216 static void
217 batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
218 {
219         if (atomic_dec_and_test(&neigh_node->refcount))
220                 batadv_neigh_node_free_rcu(&neigh_node->rcu);
221 }
222
223 /**
224  * batadv_neigh_node_free_ref - decrement the neighbors refcounter
225  *  and possibly free it
226  * @neigh_node: neigh neighbor to free
227  */
228 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
229 {
230         if (atomic_dec_and_test(&neigh_node->refcount))
231                 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
232 }
233
234 /* increases the refcounter of a found router */
235 struct batadv_neigh_node *
236 batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
237 {
238         struct batadv_neigh_node *router;
239
240         rcu_read_lock();
241         router = rcu_dereference(orig_node->router);
242
243         if (router && !atomic_inc_not_zero(&router->refcount))
244                 router = NULL;
245
246         rcu_read_unlock();
247         return router;
248 }
249
250 /**
251  * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
252  * @neigh_node: the neigh node to be queried
253  * @if_outgoing: the interface for which the ifinfo should be acquired
254  *
255  * The object is returned with refcounter increased by 1.
256  *
257  * Returns the requested neigh_ifinfo or NULL if not found
258  */
259 struct batadv_neigh_ifinfo *
260 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
261                         struct batadv_hard_iface *if_outgoing)
262 {
263         struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
264                                    *tmp_neigh_ifinfo;
265
266         rcu_read_lock();
267         hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
268                                  list) {
269                 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
270                         continue;
271
272                 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
273                         continue;
274
275                 neigh_ifinfo = tmp_neigh_ifinfo;
276                 break;
277         }
278         rcu_read_unlock();
279
280         return neigh_ifinfo;
281 }
282
283 /**
284  * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
285  * @neigh_node: the neigh node to be queried
286  * @if_outgoing: the interface for which the ifinfo should be acquired
287  *
288  * Returns NULL in case of failure or the neigh_ifinfo object for the
289  * if_outgoing interface otherwise. The object is created and added to the list
290  * if it does not exist.
291  *
292  * The object is returned with refcounter increased by 1.
293  */
294 struct batadv_neigh_ifinfo *
295 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
296                         struct batadv_hard_iface *if_outgoing)
297 {
298         struct batadv_neigh_ifinfo *neigh_ifinfo;
299
300         spin_lock_bh(&neigh->ifinfo_lock);
301
302         neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
303         if (neigh_ifinfo)
304                 goto out;
305
306         neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
307         if (!neigh_ifinfo)
308                 goto out;
309
310         if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
311                 kfree(neigh_ifinfo);
312                 neigh_ifinfo = NULL;
313                 goto out;
314         }
315
316         INIT_HLIST_NODE(&neigh_ifinfo->list);
317         atomic_set(&neigh_ifinfo->refcount, 2);
318         neigh_ifinfo->if_outgoing = if_outgoing;
319
320         hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
321
322 out:
323         spin_unlock_bh(&neigh->ifinfo_lock);
324
325         return neigh_ifinfo;
326 }
327
328 /**
329  * batadv_neigh_node_new - create and init a new neigh_node object
330  * @hard_iface: the interface where the neighbour is connected to
331  * @neigh_addr: the mac address of the neighbour interface
332  * @orig_node: originator object representing the neighbour
333  *
334  * Allocates a new neigh_node object and initialises all the generic fields.
335  * Returns the new object or NULL on failure.
336  */
337 struct batadv_neigh_node *
338 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
339                       const uint8_t *neigh_addr,
340                       struct batadv_orig_node *orig_node)
341 {
342         struct batadv_neigh_node *neigh_node;
343
344         neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
345         if (!neigh_node)
346                 goto out;
347
348         INIT_HLIST_NODE(&neigh_node->list);
349         INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
350         spin_lock_init(&neigh_node->ifinfo_lock);
351
352         memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
353         neigh_node->if_incoming = hard_iface;
354         neigh_node->orig_node = orig_node;
355
356         /* extra reference for return */
357         atomic_set(&neigh_node->refcount, 2);
358
359 out:
360         return neigh_node;
361 }
362
363 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
364 {
365         struct hlist_node *node_tmp;
366         struct batadv_neigh_node *neigh_node;
367         struct batadv_orig_node *orig_node;
368
369         orig_node = container_of(rcu, struct batadv_orig_node, rcu);
370
371         spin_lock_bh(&orig_node->neigh_list_lock);
372
373         /* for all neighbors towards this originator ... */
374         hlist_for_each_entry_safe(neigh_node, node_tmp,
375                                   &orig_node->neigh_list, list) {
376                 hlist_del_rcu(&neigh_node->list);
377                 batadv_neigh_node_free_ref_now(neigh_node);
378         }
379
380         spin_unlock_bh(&orig_node->neigh_list_lock);
381
382         /* Free nc_nodes */
383         batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
384
385         batadv_frag_purge_orig(orig_node, NULL);
386
387         batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
388                                   "originator timed out");
389
390         if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
391                 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
392
393         kfree(orig_node->tt_buff);
394         kfree(orig_node);
395 }
396
397 /**
398  * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
399  * schedule an rcu callback for freeing it
400  * @orig_node: the orig node to free
401  */
402 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
403 {
404         if (atomic_dec_and_test(&orig_node->refcount))
405                 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
406 }
407
408 /**
409  * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
410  * possibly free it (without rcu callback)
411  * @orig_node: the orig node to free
412  */
413 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
414 {
415         if (atomic_dec_and_test(&orig_node->refcount))
416                 batadv_orig_node_free_rcu(&orig_node->rcu);
417 }
418
419 void batadv_originator_free(struct batadv_priv *bat_priv)
420 {
421         struct batadv_hashtable *hash = bat_priv->orig_hash;
422         struct hlist_node *node_tmp;
423         struct hlist_head *head;
424         spinlock_t *list_lock; /* spinlock to protect write access */
425         struct batadv_orig_node *orig_node;
426         uint32_t i;
427
428         if (!hash)
429                 return;
430
431         cancel_delayed_work_sync(&bat_priv->orig_work);
432
433         bat_priv->orig_hash = NULL;
434
435         for (i = 0; i < hash->size; i++) {
436                 head = &hash->table[i];
437                 list_lock = &hash->list_locks[i];
438
439                 spin_lock_bh(list_lock);
440                 hlist_for_each_entry_safe(orig_node, node_tmp,
441                                           head, hash_entry) {
442                         hlist_del_rcu(&orig_node->hash_entry);
443                         batadv_orig_node_free_ref(orig_node);
444                 }
445                 spin_unlock_bh(list_lock);
446         }
447
448         batadv_hash_destroy(hash);
449 }
450
451 /**
452  * batadv_orig_node_new - creates a new orig_node
453  * @bat_priv: the bat priv with all the soft interface information
454  * @addr: the mac address of the originator
455  *
456  * Creates a new originator object and initialise all the generic fields.
457  * The new object is not added to the originator list.
458  * Returns the newly created object or NULL on failure.
459  */
460 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
461                                               const uint8_t *addr)
462 {
463         struct batadv_orig_node *orig_node;
464         struct batadv_orig_node_vlan *vlan;
465         unsigned long reset_time;
466         int i;
467
468         batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
469                    "Creating new originator: %pM\n", addr);
470
471         orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
472         if (!orig_node)
473                 return NULL;
474
475         INIT_HLIST_HEAD(&orig_node->neigh_list);
476         INIT_LIST_HEAD(&orig_node->vlan_list);
477         spin_lock_init(&orig_node->bcast_seqno_lock);
478         spin_lock_init(&orig_node->neigh_list_lock);
479         spin_lock_init(&orig_node->tt_buff_lock);
480         spin_lock_init(&orig_node->tt_lock);
481         spin_lock_init(&orig_node->vlan_list_lock);
482
483         batadv_nc_init_orig(orig_node);
484
485         /* extra reference for return */
486         atomic_set(&orig_node->refcount, 2);
487
488         orig_node->tt_initialised = false;
489         orig_node->bat_priv = bat_priv;
490         memcpy(orig_node->orig, addr, ETH_ALEN);
491         batadv_dat_init_orig_node_addr(orig_node);
492         orig_node->router = NULL;
493         atomic_set(&orig_node->last_ttvn, 0);
494         orig_node->tt_buff = NULL;
495         orig_node->tt_buff_len = 0;
496         reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
497         orig_node->bcast_seqno_reset = reset_time;
498         orig_node->batman_seqno_reset = reset_time;
499
500         /* create a vlan object for the "untagged" LAN */
501         vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
502         if (!vlan)
503                 goto free_orig_node;
504         /* batadv_orig_node_vlan_new() increases the refcounter.
505          * Immediately release vlan since it is not needed anymore in this
506          * context
507          */
508         batadv_orig_node_vlan_free_ref(vlan);
509
510         for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
511                 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
512                 spin_lock_init(&orig_node->fragments[i].lock);
513                 orig_node->fragments[i].size = 0;
514         }
515
516         return orig_node;
517 free_orig_node:
518         kfree(orig_node);
519         return NULL;
520 }
521
522 /**
523  * batadv_purge_orig_neighbors - purges neighbors from originator
524  * @bat_priv: the bat priv with all the soft interface information
525  * @orig_node: orig node which is to be checked
526  *
527  * Returns true if any neighbor was purged, false otherwise
528  */
529 static bool
530 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
531                             struct batadv_orig_node *orig_node)
532 {
533         struct hlist_node *node_tmp;
534         struct batadv_neigh_node *neigh_node;
535         bool neigh_purged = false;
536         unsigned long last_seen;
537         struct batadv_hard_iface *if_incoming;
538
539         spin_lock_bh(&orig_node->neigh_list_lock);
540
541         /* for all neighbors towards this originator ... */
542         hlist_for_each_entry_safe(neigh_node, node_tmp,
543                                   &orig_node->neigh_list, list) {
544                 last_seen = neigh_node->last_seen;
545                 if_incoming = neigh_node->if_incoming;
546
547                 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
548                     (if_incoming->if_status == BATADV_IF_INACTIVE) ||
549                     (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
550                     (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
551                         if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
552                             (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
553                             (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
554                                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
555                                            "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
556                                            orig_node->orig, neigh_node->addr,
557                                            if_incoming->net_dev->name);
558                         else
559                                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
560                                            "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
561                                            orig_node->orig, neigh_node->addr,
562                                            jiffies_to_msecs(last_seen));
563
564                         neigh_purged = true;
565
566                         hlist_del_rcu(&neigh_node->list);
567                         batadv_neigh_node_free_ref(neigh_node);
568                 }
569         }
570
571         spin_unlock_bh(&orig_node->neigh_list_lock);
572         return neigh_purged;
573 }
574
575 /**
576  * batadv_find_best_neighbor - finds the best neighbor after purging
577  * @bat_priv: the bat priv with all the soft interface information
578  * @orig_node: orig node which is to be checked
579  * @if_outgoing: the interface for which the metric should be compared
580  *
581  * Returns the current best neighbor, with refcount increased.
582  */
583 static struct batadv_neigh_node *
584 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
585                           struct batadv_orig_node *orig_node,
586                           struct batadv_hard_iface *if_outgoing)
587 {
588         struct batadv_neigh_node *best = NULL, *neigh;
589         struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
590
591         rcu_read_lock();
592         hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
593                 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
594                                                 best, if_outgoing) <= 0))
595                         continue;
596
597                 if (!atomic_inc_not_zero(&neigh->refcount))
598                         continue;
599
600                 if (best)
601                         batadv_neigh_node_free_ref(best);
602
603                 best = neigh;
604         }
605         rcu_read_unlock();
606
607         return best;
608 }
609
610 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
611                                    struct batadv_orig_node *orig_node)
612 {
613         struct batadv_neigh_node *best_neigh_node;
614
615         if (batadv_has_timed_out(orig_node->last_seen,
616                                  2 * BATADV_PURGE_TIMEOUT)) {
617                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
618                            "Originator timeout: originator %pM, last_seen %u\n",
619                            orig_node->orig,
620                            jiffies_to_msecs(orig_node->last_seen));
621                 return true;
622         }
623         if (!batadv_purge_orig_neighbors(bat_priv, orig_node))
624                 return false;
625
626         best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
627                                                     BATADV_IF_DEFAULT);
628         batadv_update_route(bat_priv, orig_node, best_neigh_node);
629         if (best_neigh_node)
630                 batadv_neigh_node_free_ref(best_neigh_node);
631
632         return false;
633 }
634
635 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
636 {
637         struct batadv_hashtable *hash = bat_priv->orig_hash;
638         struct hlist_node *node_tmp;
639         struct hlist_head *head;
640         spinlock_t *list_lock; /* spinlock to protect write access */
641         struct batadv_orig_node *orig_node;
642         uint32_t i;
643
644         if (!hash)
645                 return;
646
647         /* for all origins... */
648         for (i = 0; i < hash->size; i++) {
649                 head = &hash->table[i];
650                 list_lock = &hash->list_locks[i];
651
652                 spin_lock_bh(list_lock);
653                 hlist_for_each_entry_safe(orig_node, node_tmp,
654                                           head, hash_entry) {
655                         if (batadv_purge_orig_node(bat_priv, orig_node)) {
656                                 batadv_gw_node_delete(bat_priv, orig_node);
657                                 hlist_del_rcu(&orig_node->hash_entry);
658                                 batadv_orig_node_free_ref(orig_node);
659                                 continue;
660                         }
661
662                         batadv_frag_purge_orig(orig_node,
663                                                batadv_frag_check_entry);
664                 }
665                 spin_unlock_bh(list_lock);
666         }
667
668         batadv_gw_node_purge(bat_priv);
669         batadv_gw_election(bat_priv);
670 }
671
672 static void batadv_purge_orig(struct work_struct *work)
673 {
674         struct delayed_work *delayed_work;
675         struct batadv_priv *bat_priv;
676
677         delayed_work = container_of(work, struct delayed_work, work);
678         bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
679         _batadv_purge_orig(bat_priv);
680         queue_delayed_work(batadv_event_workqueue,
681                            &bat_priv->orig_work,
682                            msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
683 }
684
685 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
686 {
687         _batadv_purge_orig(bat_priv);
688 }
689
690 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
691 {
692         struct net_device *net_dev = (struct net_device *)seq->private;
693         struct batadv_priv *bat_priv = netdev_priv(net_dev);
694         struct batadv_hard_iface *primary_if;
695
696         primary_if = batadv_seq_print_text_primary_if_get(seq);
697         if (!primary_if)
698                 return 0;
699
700         seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
701                    BATADV_SOURCE_VERSION, primary_if->net_dev->name,
702                    primary_if->net_dev->dev_addr, net_dev->name,
703                    bat_priv->bat_algo_ops->name);
704
705         batadv_hardif_free_ref(primary_if);
706
707         if (!bat_priv->bat_algo_ops->bat_orig_print) {
708                 seq_puts(seq,
709                          "No printing function for this routing protocol\n");
710                 return 0;
711         }
712
713         bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq);
714
715         return 0;
716 }
717
718 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
719                             int max_if_num)
720 {
721         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
722         struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
723         struct batadv_hashtable *hash = bat_priv->orig_hash;
724         struct hlist_head *head;
725         struct batadv_orig_node *orig_node;
726         uint32_t i;
727         int ret;
728
729         /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
730          * if_num
731          */
732         for (i = 0; i < hash->size; i++) {
733                 head = &hash->table[i];
734
735                 rcu_read_lock();
736                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
737                         ret = 0;
738                         if (bao->bat_orig_add_if)
739                                 ret = bao->bat_orig_add_if(orig_node,
740                                                            max_if_num);
741                         if (ret == -ENOMEM)
742                                 goto err;
743                 }
744                 rcu_read_unlock();
745         }
746
747         return 0;
748
749 err:
750         rcu_read_unlock();
751         return -ENOMEM;
752 }
753
754 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
755                             int max_if_num)
756 {
757         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
758         struct batadv_hashtable *hash = bat_priv->orig_hash;
759         struct hlist_head *head;
760         struct batadv_hard_iface *hard_iface_tmp;
761         struct batadv_orig_node *orig_node;
762         struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
763         uint32_t i;
764         int ret;
765
766         /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
767          * if_num
768          */
769         for (i = 0; i < hash->size; i++) {
770                 head = &hash->table[i];
771
772                 rcu_read_lock();
773                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
774                         ret = 0;
775                         if (bao->bat_orig_del_if)
776                                 ret = bao->bat_orig_del_if(orig_node,
777                                                            max_if_num,
778                                                            hard_iface->if_num);
779                         if (ret == -ENOMEM)
780                                 goto err;
781                 }
782                 rcu_read_unlock();
783         }
784
785         /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
786         rcu_read_lock();
787         list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
788                 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
789                         continue;
790
791                 if (hard_iface == hard_iface_tmp)
792                         continue;
793
794                 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
795                         continue;
796
797                 if (hard_iface_tmp->if_num > hard_iface->if_num)
798                         hard_iface_tmp->if_num--;
799         }
800         rcu_read_unlock();
801
802         hard_iface->if_num = -1;
803         return 0;
804
805 err:
806         rcu_read_unlock();
807         return -ENOMEM;
808 }