batman-adv: Prefix originator static inline functions with batadv_
[firefly-linux-kernel-4.4.55.git] / net / batman-adv / translation-table.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19
20 #include "main.h"
21 #include "translation-table.h"
22 #include "soft-interface.h"
23 #include "hard-interface.h"
24 #include "send.h"
25 #include "hash.h"
26 #include "originator.h"
27 #include "routing.h"
28 #include "bridge_loop_avoidance.h"
29
30 #include <linux/crc16.h>
31
32 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
33                           struct orig_node *orig_node);
34 static void tt_purge(struct work_struct *work);
35 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
36
37 /* returns 1 if they are the same mac addr */
38 static int compare_tt(const struct hlist_node *node, const void *data2)
39 {
40         const void *data1 = container_of(node, struct tt_common_entry,
41                                          hash_entry);
42
43         return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
44 }
45
46 static void tt_start_timer(struct bat_priv *bat_priv)
47 {
48         INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
49         queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
50                            msecs_to_jiffies(5000));
51 }
52
53 static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
54                                             const void *data)
55 {
56         struct hlist_head *head;
57         struct hlist_node *node;
58         struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
59         uint32_t index;
60
61         if (!hash)
62                 return NULL;
63
64         index = batadv_choose_orig(data, hash->size);
65         head = &hash->table[index];
66
67         rcu_read_lock();
68         hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
69                 if (!compare_eth(tt_common_entry, data))
70                         continue;
71
72                 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
73                         continue;
74
75                 tt_common_entry_tmp = tt_common_entry;
76                 break;
77         }
78         rcu_read_unlock();
79
80         return tt_common_entry_tmp;
81 }
82
83 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
84                                                  const void *data)
85 {
86         struct tt_common_entry *tt_common_entry;
87         struct tt_local_entry *tt_local_entry = NULL;
88
89         tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
90         if (tt_common_entry)
91                 tt_local_entry = container_of(tt_common_entry,
92                                               struct tt_local_entry, common);
93         return tt_local_entry;
94 }
95
96 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
97                                                    const void *data)
98 {
99         struct tt_common_entry *tt_common_entry;
100         struct tt_global_entry *tt_global_entry = NULL;
101
102         tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
103         if (tt_common_entry)
104                 tt_global_entry = container_of(tt_common_entry,
105                                                struct tt_global_entry, common);
106         return tt_global_entry;
107
108 }
109
110 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
111 {
112         if (atomic_dec_and_test(&tt_local_entry->common.refcount))
113                 kfree_rcu(tt_local_entry, common.rcu);
114 }
115
116 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
117 {
118         struct tt_common_entry *tt_common_entry;
119         struct tt_global_entry *tt_global_entry;
120
121         tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
122         tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
123                                        common);
124
125         kfree(tt_global_entry);
126 }
127
128 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
129 {
130         if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
131                 tt_global_del_orig_list(tt_global_entry);
132                 call_rcu(&tt_global_entry->common.rcu,
133                          tt_global_entry_free_rcu);
134         }
135 }
136
137 static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
138 {
139         struct tt_orig_list_entry *orig_entry;
140
141         orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
142         atomic_dec(&orig_entry->orig_node->tt_size);
143         batadv_orig_node_free_ref(orig_entry->orig_node);
144         kfree(orig_entry);
145 }
146
147 static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
148 {
149         call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
150 }
151
152 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
153                            uint8_t flags)
154 {
155         struct tt_change_node *tt_change_node;
156
157         tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
158
159         if (!tt_change_node)
160                 return;
161
162         tt_change_node->change.flags = flags;
163         memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
164
165         spin_lock_bh(&bat_priv->tt_changes_list_lock);
166         /* track the change in the OGMinterval list */
167         list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
168         atomic_inc(&bat_priv->tt_local_changes);
169         spin_unlock_bh(&bat_priv->tt_changes_list_lock);
170
171         atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
172 }
173
174 int batadv_tt_len(int changes_num)
175 {
176         return changes_num * sizeof(struct tt_change);
177 }
178
179 static int tt_local_init(struct bat_priv *bat_priv)
180 {
181         if (bat_priv->tt_local_hash)
182                 return 0;
183
184         bat_priv->tt_local_hash = batadv_hash_new(1024);
185
186         if (!bat_priv->tt_local_hash)
187                 return -ENOMEM;
188
189         return 0;
190 }
191
192 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
193                          int ifindex)
194 {
195         struct bat_priv *bat_priv = netdev_priv(soft_iface);
196         struct tt_local_entry *tt_local_entry = NULL;
197         struct tt_global_entry *tt_global_entry = NULL;
198         struct hlist_head *head;
199         struct hlist_node *node;
200         struct tt_orig_list_entry *orig_entry;
201         int hash_added;
202
203         tt_local_entry = tt_local_hash_find(bat_priv, addr);
204
205         if (tt_local_entry) {
206                 tt_local_entry->last_seen = jiffies;
207                 /* possibly unset the TT_CLIENT_PENDING flag */
208                 tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
209                 goto out;
210         }
211
212         tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
213         if (!tt_local_entry)
214                 goto out;
215
216         bat_dbg(DBG_TT, bat_priv,
217                 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
218                 (uint8_t)atomic_read(&bat_priv->ttvn));
219
220         memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
221         tt_local_entry->common.flags = NO_FLAGS;
222         if (batadv_is_wifi_iface(ifindex))
223                 tt_local_entry->common.flags |= TT_CLIENT_WIFI;
224         atomic_set(&tt_local_entry->common.refcount, 2);
225         tt_local_entry->last_seen = jiffies;
226
227         /* the batman interface mac address should never be purged */
228         if (compare_eth(addr, soft_iface->dev_addr))
229                 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
230
231         /* The local entry has to be marked as NEW to avoid to send it in
232          * a full table response going out before the next ttvn increment
233          * (consistency check)
234          */
235         tt_local_entry->common.flags |= TT_CLIENT_NEW;
236
237         hash_added = batadv_hash_add(bat_priv->tt_local_hash, compare_tt,
238                                      batadv_choose_orig,
239                                      &tt_local_entry->common,
240                                      &tt_local_entry->common.hash_entry);
241
242         if (unlikely(hash_added != 0)) {
243                 /* remove the reference for the hash */
244                 tt_local_entry_free_ref(tt_local_entry);
245                 goto out;
246         }
247
248         tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
249
250         /* remove address from global hash if present */
251         tt_global_entry = tt_global_hash_find(bat_priv, addr);
252
253         /* Check whether it is a roaming! */
254         if (tt_global_entry) {
255                 /* These node are probably going to update their tt table */
256                 head = &tt_global_entry->orig_list;
257                 rcu_read_lock();
258                 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
259                         orig_entry->orig_node->tt_poss_change = true;
260
261                         send_roam_adv(bat_priv, tt_global_entry->common.addr,
262                                       orig_entry->orig_node);
263                 }
264                 rcu_read_unlock();
265                 /* The global entry has to be marked as ROAMING and
266                  * has to be kept for consistency purpose
267                  */
268                 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
269                 tt_global_entry->roam_at = jiffies;
270         }
271 out:
272         if (tt_local_entry)
273                 tt_local_entry_free_ref(tt_local_entry);
274         if (tt_global_entry)
275                 tt_global_entry_free_ref(tt_global_entry);
276 }
277
278 static void tt_realloc_packet_buff(unsigned char **packet_buff,
279                                    int *packet_buff_len, int min_packet_len,
280                                    int new_packet_len)
281 {
282         unsigned char *new_buff;
283
284         new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
285
286         /* keep old buffer if kmalloc should fail */
287         if (new_buff) {
288                 memcpy(new_buff, *packet_buff, min_packet_len);
289                 kfree(*packet_buff);
290                 *packet_buff = new_buff;
291                 *packet_buff_len = new_packet_len;
292         }
293 }
294
295 static void tt_prepare_packet_buff(struct bat_priv *bat_priv,
296                                    unsigned char **packet_buff,
297                                    int *packet_buff_len, int min_packet_len)
298 {
299         struct hard_iface *primary_if;
300         int req_len;
301
302         primary_if = batadv_primary_if_get_selected(bat_priv);
303
304         req_len = min_packet_len;
305         req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
306
307         /* if we have too many changes for one packet don't send any
308          * and wait for the tt table request which will be fragmented
309          */
310         if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
311                 req_len = min_packet_len;
312
313         tt_realloc_packet_buff(packet_buff, packet_buff_len,
314                                min_packet_len, req_len);
315
316         if (primary_if)
317                 batadv_hardif_free_ref(primary_if);
318 }
319
320 static int tt_changes_fill_buff(struct bat_priv *bat_priv,
321                                 unsigned char **packet_buff,
322                                 int *packet_buff_len, int min_packet_len)
323 {
324         struct tt_change_node *entry, *safe;
325         int count = 0, tot_changes = 0, new_len;
326         unsigned char *tt_buff;
327
328         tt_prepare_packet_buff(bat_priv, packet_buff,
329                                packet_buff_len, min_packet_len);
330
331         new_len = *packet_buff_len - min_packet_len;
332         tt_buff = *packet_buff + min_packet_len;
333
334         if (new_len > 0)
335                 tot_changes = new_len / batadv_tt_len(1);
336
337         spin_lock_bh(&bat_priv->tt_changes_list_lock);
338         atomic_set(&bat_priv->tt_local_changes, 0);
339
340         list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
341                                  list) {
342                 if (count < tot_changes) {
343                         memcpy(tt_buff + batadv_tt_len(count),
344                                &entry->change, sizeof(struct tt_change));
345                         count++;
346                 }
347                 list_del(&entry->list);
348                 kfree(entry);
349         }
350         spin_unlock_bh(&bat_priv->tt_changes_list_lock);
351
352         /* Keep the buffer for possible tt_request */
353         spin_lock_bh(&bat_priv->tt_buff_lock);
354         kfree(bat_priv->tt_buff);
355         bat_priv->tt_buff_len = 0;
356         bat_priv->tt_buff = NULL;
357         /* check whether this new OGM has no changes due to size problems */
358         if (new_len > 0) {
359                 /* if kmalloc() fails we will reply with the full table
360                  * instead of providing the diff
361                  */
362                 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
363                 if (bat_priv->tt_buff) {
364                         memcpy(bat_priv->tt_buff, tt_buff, new_len);
365                         bat_priv->tt_buff_len = new_len;
366                 }
367         }
368         spin_unlock_bh(&bat_priv->tt_buff_lock);
369
370         return count;
371 }
372
373 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
374 {
375         struct net_device *net_dev = (struct net_device *)seq->private;
376         struct bat_priv *bat_priv = netdev_priv(net_dev);
377         struct hashtable_t *hash = bat_priv->tt_local_hash;
378         struct tt_common_entry *tt_common_entry;
379         struct hard_iface *primary_if;
380         struct hlist_node *node;
381         struct hlist_head *head;
382         uint32_t i;
383         int ret = 0;
384
385         primary_if = batadv_primary_if_get_selected(bat_priv);
386         if (!primary_if) {
387                 ret = seq_printf(seq,
388                                  "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
389                                  net_dev->name);
390                 goto out;
391         }
392
393         if (primary_if->if_status != IF_ACTIVE) {
394                 ret = seq_printf(seq,
395                                  "BATMAN mesh %s disabled - primary interface not active\n",
396                                  net_dev->name);
397                 goto out;
398         }
399
400         seq_printf(seq,
401                    "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
402                    net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
403
404         for (i = 0; i < hash->size; i++) {
405                 head = &hash->table[i];
406
407                 rcu_read_lock();
408                 hlist_for_each_entry_rcu(tt_common_entry, node,
409                                          head, hash_entry) {
410                         seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
411                                    tt_common_entry->addr,
412                                    (tt_common_entry->flags &
413                                     TT_CLIENT_ROAM ? 'R' : '.'),
414                                    (tt_common_entry->flags &
415                                     TT_CLIENT_NOPURGE ? 'P' : '.'),
416                                    (tt_common_entry->flags &
417                                     TT_CLIENT_NEW ? 'N' : '.'),
418                                    (tt_common_entry->flags &
419                                     TT_CLIENT_PENDING ? 'X' : '.'),
420                                    (tt_common_entry->flags &
421                                     TT_CLIENT_WIFI ? 'W' : '.'));
422                 }
423                 rcu_read_unlock();
424         }
425 out:
426         if (primary_if)
427                 batadv_hardif_free_ref(primary_if);
428         return ret;
429 }
430
431 static void tt_local_set_pending(struct bat_priv *bat_priv,
432                                  struct tt_local_entry *tt_local_entry,
433                                  uint16_t flags, const char *message)
434 {
435         tt_local_event(bat_priv, tt_local_entry->common.addr,
436                        tt_local_entry->common.flags | flags);
437
438         /* The local client has to be marked as "pending to be removed" but has
439          * to be kept in the table in order to send it in a full table
440          * response issued before the net ttvn increment (consistency check)
441          */
442         tt_local_entry->common.flags |= TT_CLIENT_PENDING;
443
444         bat_dbg(DBG_TT, bat_priv,
445                 "Local tt entry (%pM) pending to be removed: %s\n",
446                 tt_local_entry->common.addr, message);
447 }
448
449 void batadv_tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
450                             const char *message, bool roaming)
451 {
452         struct tt_local_entry *tt_local_entry = NULL;
453
454         tt_local_entry = tt_local_hash_find(bat_priv, addr);
455         if (!tt_local_entry)
456                 goto out;
457
458         tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
459                              (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
460 out:
461         if (tt_local_entry)
462                 tt_local_entry_free_ref(tt_local_entry);
463 }
464
465 static void tt_local_purge(struct bat_priv *bat_priv)
466 {
467         struct hashtable_t *hash = bat_priv->tt_local_hash;
468         struct tt_local_entry *tt_local_entry;
469         struct tt_common_entry *tt_common_entry;
470         struct hlist_node *node, *node_tmp;
471         struct hlist_head *head;
472         spinlock_t *list_lock; /* protects write access to the hash lists */
473         uint32_t i;
474
475         for (i = 0; i < hash->size; i++) {
476                 head = &hash->table[i];
477                 list_lock = &hash->list_locks[i];
478
479                 spin_lock_bh(list_lock);
480                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
481                                           head, hash_entry) {
482                         tt_local_entry = container_of(tt_common_entry,
483                                                       struct tt_local_entry,
484                                                       common);
485                         if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
486                                 continue;
487
488                         /* entry already marked for deletion */
489                         if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
490                                 continue;
491
492                         if (!has_timed_out(tt_local_entry->last_seen,
493                                            TT_LOCAL_TIMEOUT))
494                                 continue;
495
496                         tt_local_set_pending(bat_priv, tt_local_entry,
497                                              TT_CLIENT_DEL, "timed out");
498                 }
499                 spin_unlock_bh(list_lock);
500         }
501
502 }
503
504 static void tt_local_table_free(struct bat_priv *bat_priv)
505 {
506         struct hashtable_t *hash;
507         spinlock_t *list_lock; /* protects write access to the hash lists */
508         struct tt_common_entry *tt_common_entry;
509         struct tt_local_entry *tt_local_entry;
510         struct hlist_node *node, *node_tmp;
511         struct hlist_head *head;
512         uint32_t i;
513
514         if (!bat_priv->tt_local_hash)
515                 return;
516
517         hash = bat_priv->tt_local_hash;
518
519         for (i = 0; i < hash->size; i++) {
520                 head = &hash->table[i];
521                 list_lock = &hash->list_locks[i];
522
523                 spin_lock_bh(list_lock);
524                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
525                                           head, hash_entry) {
526                         hlist_del_rcu(node);
527                         tt_local_entry = container_of(tt_common_entry,
528                                                       struct tt_local_entry,
529                                                       common);
530                         tt_local_entry_free_ref(tt_local_entry);
531                 }
532                 spin_unlock_bh(list_lock);
533         }
534
535         batadv_hash_destroy(hash);
536
537         bat_priv->tt_local_hash = NULL;
538 }
539
540 static int tt_global_init(struct bat_priv *bat_priv)
541 {
542         if (bat_priv->tt_global_hash)
543                 return 0;
544
545         bat_priv->tt_global_hash = batadv_hash_new(1024);
546
547         if (!bat_priv->tt_global_hash)
548                 return -ENOMEM;
549
550         return 0;
551 }
552
553 static void tt_changes_list_free(struct bat_priv *bat_priv)
554 {
555         struct tt_change_node *entry, *safe;
556
557         spin_lock_bh(&bat_priv->tt_changes_list_lock);
558
559         list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
560                                  list) {
561                 list_del(&entry->list);
562                 kfree(entry);
563         }
564
565         atomic_set(&bat_priv->tt_local_changes, 0);
566         spin_unlock_bh(&bat_priv->tt_changes_list_lock);
567 }
568
569 /* find out if an orig_node is already in the list of a tt_global_entry.
570  * returns 1 if found, 0 otherwise
571  */
572 static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
573                                      const struct orig_node *orig_node)
574 {
575         struct tt_orig_list_entry *tmp_orig_entry;
576         const struct hlist_head *head;
577         struct hlist_node *node;
578         bool found = false;
579
580         rcu_read_lock();
581         head = &entry->orig_list;
582         hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
583                 if (tmp_orig_entry->orig_node == orig_node) {
584                         found = true;
585                         break;
586                 }
587         }
588         rcu_read_unlock();
589         return found;
590 }
591
592 static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
593                                      struct orig_node *orig_node,
594                                      int ttvn)
595 {
596         struct tt_orig_list_entry *orig_entry;
597
598         orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
599         if (!orig_entry)
600                 return;
601
602         INIT_HLIST_NODE(&orig_entry->list);
603         atomic_inc(&orig_node->refcount);
604         atomic_inc(&orig_node->tt_size);
605         orig_entry->orig_node = orig_node;
606         orig_entry->ttvn = ttvn;
607
608         spin_lock_bh(&tt_global_entry->list_lock);
609         hlist_add_head_rcu(&orig_entry->list,
610                            &tt_global_entry->orig_list);
611         spin_unlock_bh(&tt_global_entry->list_lock);
612 }
613
614 /* caller must hold orig_node refcount */
615 int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
616                          const unsigned char *tt_addr, uint8_t ttvn,
617                          bool roaming, bool wifi)
618 {
619         struct tt_global_entry *tt_global_entry = NULL;
620         int ret = 0;
621         int hash_added;
622         struct tt_common_entry *common;
623
624         tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
625
626         if (!tt_global_entry) {
627                 tt_global_entry = kzalloc(sizeof(*tt_global_entry),
628                                           GFP_ATOMIC);
629                 if (!tt_global_entry)
630                         goto out;
631
632                 common = &tt_global_entry->common;
633                 memcpy(common->addr, tt_addr, ETH_ALEN);
634
635                 common->flags = NO_FLAGS;
636                 tt_global_entry->roam_at = 0;
637                 atomic_set(&common->refcount, 2);
638
639                 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
640                 spin_lock_init(&tt_global_entry->list_lock);
641
642                 hash_added = batadv_hash_add(bat_priv->tt_global_hash,
643                                              compare_tt, batadv_choose_orig,
644                                              common, &common->hash_entry);
645
646                 if (unlikely(hash_added != 0)) {
647                         /* remove the reference for the hash */
648                         tt_global_entry_free_ref(tt_global_entry);
649                         goto out_remove;
650                 }
651
652                 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
653         } else {
654                 /* there is already a global entry, use this one. */
655
656                 /* If there is the TT_CLIENT_ROAM flag set, there is only one
657                  * originator left in the list and we previously received a
658                  * delete + roaming change for this originator.
659                  *
660                  * We should first delete the old originator before adding the
661                  * new one.
662                  */
663                 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
664                         tt_global_del_orig_list(tt_global_entry);
665                         tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
666                         tt_global_entry->roam_at = 0;
667                 }
668
669                 if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
670                         tt_global_add_orig_entry(tt_global_entry, orig_node,
671                                                  ttvn);
672         }
673
674         if (wifi)
675                 tt_global_entry->common.flags |= TT_CLIENT_WIFI;
676
677         bat_dbg(DBG_TT, bat_priv,
678                 "Creating new global tt entry: %pM (via %pM)\n",
679                 tt_global_entry->common.addr, orig_node->orig);
680
681 out_remove:
682         /* remove address from local hash if present */
683         batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
684                                "global tt received", roaming);
685         ret = 1;
686 out:
687         if (tt_global_entry)
688                 tt_global_entry_free_ref(tt_global_entry);
689         return ret;
690 }
691
692 /* print all orig nodes who announce the address for this global entry.
693  * it is assumed that the caller holds rcu_read_lock();
694  */
695 static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
696                                   struct seq_file *seq)
697 {
698         struct hlist_head *head;
699         struct hlist_node *node;
700         struct tt_orig_list_entry *orig_entry;
701         struct tt_common_entry *tt_common_entry;
702         uint16_t flags;
703         uint8_t last_ttvn;
704
705         tt_common_entry = &tt_global_entry->common;
706
707         head = &tt_global_entry->orig_list;
708
709         hlist_for_each_entry_rcu(orig_entry, node, head, list) {
710                 flags = tt_common_entry->flags;
711                 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
712                 seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
713                            tt_global_entry->common.addr, orig_entry->ttvn,
714                            orig_entry->orig_node->orig, last_ttvn,
715                            (flags & TT_CLIENT_ROAM ? 'R' : '.'),
716                            (flags & TT_CLIENT_WIFI ? 'W' : '.'));
717         }
718 }
719
720 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
721 {
722         struct net_device *net_dev = (struct net_device *)seq->private;
723         struct bat_priv *bat_priv = netdev_priv(net_dev);
724         struct hashtable_t *hash = bat_priv->tt_global_hash;
725         struct tt_common_entry *tt_common_entry;
726         struct tt_global_entry *tt_global_entry;
727         struct hard_iface *primary_if;
728         struct hlist_node *node;
729         struct hlist_head *head;
730         uint32_t i;
731         int ret = 0;
732
733         primary_if = batadv_primary_if_get_selected(bat_priv);
734         if (!primary_if) {
735                 ret = seq_printf(seq,
736                                  "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
737                                  net_dev->name);
738                 goto out;
739         }
740
741         if (primary_if->if_status != IF_ACTIVE) {
742                 ret = seq_printf(seq,
743                                  "BATMAN mesh %s disabled - primary interface not active\n",
744                                  net_dev->name);
745                 goto out;
746         }
747
748         seq_printf(seq,
749                    "Globally announced TT entries received via the mesh %s\n",
750                    net_dev->name);
751         seq_printf(seq, "       %-13s %s       %-15s %s %s\n",
752                    "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
753
754         for (i = 0; i < hash->size; i++) {
755                 head = &hash->table[i];
756
757                 rcu_read_lock();
758                 hlist_for_each_entry_rcu(tt_common_entry, node,
759                                          head, hash_entry) {
760                         tt_global_entry = container_of(tt_common_entry,
761                                                        struct tt_global_entry,
762                                                        common);
763                         tt_global_print_entry(tt_global_entry, seq);
764                 }
765                 rcu_read_unlock();
766         }
767 out:
768         if (primary_if)
769                 batadv_hardif_free_ref(primary_if);
770         return ret;
771 }
772
773 /* deletes the orig list of a tt_global_entry */
774 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
775 {
776         struct hlist_head *head;
777         struct hlist_node *node, *safe;
778         struct tt_orig_list_entry *orig_entry;
779
780         spin_lock_bh(&tt_global_entry->list_lock);
781         head = &tt_global_entry->orig_list;
782         hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
783                 hlist_del_rcu(node);
784                 tt_orig_list_entry_free_ref(orig_entry);
785         }
786         spin_unlock_bh(&tt_global_entry->list_lock);
787
788 }
789
790 static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
791                                      struct tt_global_entry *tt_global_entry,
792                                      struct orig_node *orig_node,
793                                      const char *message)
794 {
795         struct hlist_head *head;
796         struct hlist_node *node, *safe;
797         struct tt_orig_list_entry *orig_entry;
798
799         spin_lock_bh(&tt_global_entry->list_lock);
800         head = &tt_global_entry->orig_list;
801         hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
802                 if (orig_entry->orig_node == orig_node) {
803                         bat_dbg(DBG_TT, bat_priv,
804                                 "Deleting %pM from global tt entry %pM: %s\n",
805                                 orig_node->orig, tt_global_entry->common.addr,
806                                 message);
807                         hlist_del_rcu(node);
808                         tt_orig_list_entry_free_ref(orig_entry);
809                 }
810         }
811         spin_unlock_bh(&tt_global_entry->list_lock);
812 }
813
814 static void tt_global_del_struct(struct bat_priv *bat_priv,
815                                  struct tt_global_entry *tt_global_entry,
816                                  const char *message)
817 {
818         bat_dbg(DBG_TT, bat_priv,
819                 "Deleting global tt entry %pM: %s\n",
820                 tt_global_entry->common.addr, message);
821
822         batadv_hash_remove(bat_priv->tt_global_hash, compare_tt,
823                            batadv_choose_orig, tt_global_entry->common.addr);
824         tt_global_entry_free_ref(tt_global_entry);
825
826 }
827
828 /* If the client is to be deleted, we check if it is the last origantor entry
829  * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
830  * otherwise we simply remove the originator scheduled for deletion.
831  */
832 static void tt_global_del_roaming(struct bat_priv *bat_priv,
833                                   struct tt_global_entry *tt_global_entry,
834                                   struct orig_node *orig_node,
835                                   const char *message)
836 {
837         bool last_entry = true;
838         struct hlist_head *head;
839         struct hlist_node *node;
840         struct tt_orig_list_entry *orig_entry;
841
842         /* no local entry exists, case 1:
843          * Check if this is the last one or if other entries exist.
844          */
845
846         rcu_read_lock();
847         head = &tt_global_entry->orig_list;
848         hlist_for_each_entry_rcu(orig_entry, node, head, list) {
849                 if (orig_entry->orig_node != orig_node) {
850                         last_entry = false;
851                         break;
852                 }
853         }
854         rcu_read_unlock();
855
856         if (last_entry) {
857                 /* its the last one, mark for roaming. */
858                 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
859                 tt_global_entry->roam_at = jiffies;
860         } else
861                 /* there is another entry, we can simply delete this
862                  * one and can still use the other one.
863                  */
864                 tt_global_del_orig_entry(bat_priv, tt_global_entry,
865                                          orig_node, message);
866 }
867
868
869
870 static void tt_global_del(struct bat_priv *bat_priv,
871                           struct orig_node *orig_node,
872                           const unsigned char *addr,
873                           const char *message, bool roaming)
874 {
875         struct tt_global_entry *tt_global_entry = NULL;
876         struct tt_local_entry *tt_local_entry = NULL;
877
878         tt_global_entry = tt_global_hash_find(bat_priv, addr);
879         if (!tt_global_entry)
880                 goto out;
881
882         if (!roaming) {
883                 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
884                                          message);
885
886                 if (hlist_empty(&tt_global_entry->orig_list))
887                         tt_global_del_struct(bat_priv, tt_global_entry,
888                                              message);
889
890                 goto out;
891         }
892
893         /* if we are deleting a global entry due to a roam
894          * event, there are two possibilities:
895          * 1) the client roamed from node A to node B => if there
896          *    is only one originator left for this client, we mark
897          *    it with TT_CLIENT_ROAM, we start a timer and we
898          *    wait for node B to claim it. In case of timeout
899          *    the entry is purged.
900          *
901          *    If there are other originators left, we directly delete
902          *    the originator.
903          * 2) the client roamed to us => we can directly delete
904          *    the global entry, since it is useless now.
905          */
906         tt_local_entry = tt_local_hash_find(bat_priv,
907                                             tt_global_entry->common.addr);
908         if (tt_local_entry) {
909                 /* local entry exists, case 2: client roamed to us. */
910                 tt_global_del_orig_list(tt_global_entry);
911                 tt_global_del_struct(bat_priv, tt_global_entry, message);
912         } else
913                 /* no local entry exists, case 1: check for roaming */
914                 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
915                                       message);
916
917
918 out:
919         if (tt_global_entry)
920                 tt_global_entry_free_ref(tt_global_entry);
921         if (tt_local_entry)
922                 tt_local_entry_free_ref(tt_local_entry);
923 }
924
925 void batadv_tt_global_del_orig(struct bat_priv *bat_priv,
926                                struct orig_node *orig_node, const char *message)
927 {
928         struct tt_global_entry *tt_global_entry;
929         struct tt_common_entry *tt_common_entry;
930         uint32_t i;
931         struct hashtable_t *hash = bat_priv->tt_global_hash;
932         struct hlist_node *node, *safe;
933         struct hlist_head *head;
934         spinlock_t *list_lock; /* protects write access to the hash lists */
935
936         if (!hash)
937                 return;
938
939         for (i = 0; i < hash->size; i++) {
940                 head = &hash->table[i];
941                 list_lock = &hash->list_locks[i];
942
943                 spin_lock_bh(list_lock);
944                 hlist_for_each_entry_safe(tt_common_entry, node, safe,
945                                           head, hash_entry) {
946                         tt_global_entry = container_of(tt_common_entry,
947                                                        struct tt_global_entry,
948                                                        common);
949
950                         tt_global_del_orig_entry(bat_priv, tt_global_entry,
951                                                  orig_node, message);
952
953                         if (hlist_empty(&tt_global_entry->orig_list)) {
954                                 bat_dbg(DBG_TT, bat_priv,
955                                         "Deleting global tt entry %pM: %s\n",
956                                         tt_global_entry->common.addr,
957                                         message);
958                                 hlist_del_rcu(node);
959                                 tt_global_entry_free_ref(tt_global_entry);
960                         }
961                 }
962                 spin_unlock_bh(list_lock);
963         }
964         atomic_set(&orig_node->tt_size, 0);
965         orig_node->tt_initialised = false;
966 }
967
968 static void tt_global_roam_purge(struct bat_priv *bat_priv)
969 {
970         struct hashtable_t *hash = bat_priv->tt_global_hash;
971         struct tt_common_entry *tt_common_entry;
972         struct tt_global_entry *tt_global_entry;
973         struct hlist_node *node, *node_tmp;
974         struct hlist_head *head;
975         spinlock_t *list_lock; /* protects write access to the hash lists */
976         uint32_t i;
977
978         for (i = 0; i < hash->size; i++) {
979                 head = &hash->table[i];
980                 list_lock = &hash->list_locks[i];
981
982                 spin_lock_bh(list_lock);
983                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
984                                           head, hash_entry) {
985                         tt_global_entry = container_of(tt_common_entry,
986                                                        struct tt_global_entry,
987                                                        common);
988                         if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
989                                 continue;
990                         if (!has_timed_out(tt_global_entry->roam_at,
991                                            TT_CLIENT_ROAM_TIMEOUT))
992                                 continue;
993
994                         bat_dbg(DBG_TT, bat_priv,
995                                 "Deleting global tt entry (%pM): Roaming timeout\n",
996                                 tt_global_entry->common.addr);
997
998                         hlist_del_rcu(node);
999                         tt_global_entry_free_ref(tt_global_entry);
1000                 }
1001                 spin_unlock_bh(list_lock);
1002         }
1003
1004 }
1005
1006 static void tt_global_table_free(struct bat_priv *bat_priv)
1007 {
1008         struct hashtable_t *hash;
1009         spinlock_t *list_lock; /* protects write access to the hash lists */
1010         struct tt_common_entry *tt_common_entry;
1011         struct tt_global_entry *tt_global_entry;
1012         struct hlist_node *node, *node_tmp;
1013         struct hlist_head *head;
1014         uint32_t i;
1015
1016         if (!bat_priv->tt_global_hash)
1017                 return;
1018
1019         hash = bat_priv->tt_global_hash;
1020
1021         for (i = 0; i < hash->size; i++) {
1022                 head = &hash->table[i];
1023                 list_lock = &hash->list_locks[i];
1024
1025                 spin_lock_bh(list_lock);
1026                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1027                                           head, hash_entry) {
1028                         hlist_del_rcu(node);
1029                         tt_global_entry = container_of(tt_common_entry,
1030                                                        struct tt_global_entry,
1031                                                        common);
1032                         tt_global_entry_free_ref(tt_global_entry);
1033                 }
1034                 spin_unlock_bh(list_lock);
1035         }
1036
1037         batadv_hash_destroy(hash);
1038
1039         bat_priv->tt_global_hash = NULL;
1040 }
1041
1042 static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
1043                             struct tt_global_entry *tt_global_entry)
1044 {
1045         bool ret = false;
1046
1047         if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
1048             tt_global_entry->common.flags & TT_CLIENT_WIFI)
1049                 ret = true;
1050
1051         return ret;
1052 }
1053
1054 struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv,
1055                                            const uint8_t *src,
1056                                            const uint8_t *addr)
1057 {
1058         struct tt_local_entry *tt_local_entry = NULL;
1059         struct tt_global_entry *tt_global_entry = NULL;
1060         struct orig_node *orig_node = NULL;
1061         struct neigh_node *router = NULL;
1062         struct hlist_head *head;
1063         struct hlist_node *node;
1064         struct tt_orig_list_entry *orig_entry;
1065         int best_tq;
1066
1067         if (src && atomic_read(&bat_priv->ap_isolation)) {
1068                 tt_local_entry = tt_local_hash_find(bat_priv, src);
1069                 if (!tt_local_entry)
1070                         goto out;
1071         }
1072
1073         tt_global_entry = tt_global_hash_find(bat_priv, addr);
1074         if (!tt_global_entry)
1075                 goto out;
1076
1077         /* check whether the clients should not communicate due to AP
1078          * isolation
1079          */
1080         if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
1081                 goto out;
1082
1083         best_tq = 0;
1084
1085         rcu_read_lock();
1086         head = &tt_global_entry->orig_list;
1087         hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1088                 router = batadv_orig_node_get_router(orig_entry->orig_node);
1089                 if (!router)
1090                         continue;
1091
1092                 if (router->tq_avg > best_tq) {
1093                         orig_node = orig_entry->orig_node;
1094                         best_tq = router->tq_avg;
1095                 }
1096                 batadv_neigh_node_free_ref(router);
1097         }
1098         /* found anything? */
1099         if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1100                 orig_node = NULL;
1101         rcu_read_unlock();
1102 out:
1103         if (tt_global_entry)
1104                 tt_global_entry_free_ref(tt_global_entry);
1105         if (tt_local_entry)
1106                 tt_local_entry_free_ref(tt_local_entry);
1107
1108         return orig_node;
1109 }
1110
1111 /* Calculates the checksum of the local table of a given orig_node */
1112 static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1113                               struct orig_node *orig_node)
1114 {
1115         uint16_t total = 0, total_one;
1116         struct hashtable_t *hash = bat_priv->tt_global_hash;
1117         struct tt_common_entry *tt_common_entry;
1118         struct tt_global_entry *tt_global_entry;
1119         struct hlist_node *node;
1120         struct hlist_head *head;
1121         uint32_t i;
1122         int j;
1123
1124         for (i = 0; i < hash->size; i++) {
1125                 head = &hash->table[i];
1126
1127                 rcu_read_lock();
1128                 hlist_for_each_entry_rcu(tt_common_entry, node,
1129                                          head, hash_entry) {
1130                         tt_global_entry = container_of(tt_common_entry,
1131                                                        struct tt_global_entry,
1132                                                        common);
1133                         /* Roaming clients are in the global table for
1134                          * consistency only. They don't have to be
1135                          * taken into account while computing the
1136                          * global crc
1137                          */
1138                         if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
1139                                 continue;
1140
1141                         /* find out if this global entry is announced by this
1142                          * originator
1143                          */
1144                         if (!tt_global_entry_has_orig(tt_global_entry,
1145                                                       orig_node))
1146                                 continue;
1147
1148                         total_one = 0;
1149                         for (j = 0; j < ETH_ALEN; j++)
1150                                 total_one = crc16_byte(total_one,
1151                                         tt_global_entry->common.addr[j]);
1152                         total ^= total_one;
1153                 }
1154                 rcu_read_unlock();
1155         }
1156
1157         return total;
1158 }
1159
1160 /* Calculates the checksum of the local table */
1161 static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv)
1162 {
1163         uint16_t total = 0, total_one;
1164         struct hashtable_t *hash = bat_priv->tt_local_hash;
1165         struct tt_common_entry *tt_common_entry;
1166         struct hlist_node *node;
1167         struct hlist_head *head;
1168         uint32_t i;
1169         int j;
1170
1171         for (i = 0; i < hash->size; i++) {
1172                 head = &hash->table[i];
1173
1174                 rcu_read_lock();
1175                 hlist_for_each_entry_rcu(tt_common_entry, node,
1176                                          head, hash_entry) {
1177                         /* not yet committed clients have not to be taken into
1178                          * account while computing the CRC
1179                          */
1180                         if (tt_common_entry->flags & TT_CLIENT_NEW)
1181                                 continue;
1182                         total_one = 0;
1183                         for (j = 0; j < ETH_ALEN; j++)
1184                                 total_one = crc16_byte(total_one,
1185                                                    tt_common_entry->addr[j]);
1186                         total ^= total_one;
1187                 }
1188                 rcu_read_unlock();
1189         }
1190
1191         return total;
1192 }
1193
1194 static void tt_req_list_free(struct bat_priv *bat_priv)
1195 {
1196         struct tt_req_node *node, *safe;
1197
1198         spin_lock_bh(&bat_priv->tt_req_list_lock);
1199
1200         list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1201                 list_del(&node->list);
1202                 kfree(node);
1203         }
1204
1205         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1206 }
1207
1208 static void tt_save_orig_buffer(struct bat_priv *bat_priv,
1209                                 struct orig_node *orig_node,
1210                                 const unsigned char *tt_buff,
1211                                 uint8_t tt_num_changes)
1212 {
1213         uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
1214
1215         /* Replace the old buffer only if I received something in the
1216          * last OGM (the OGM could carry no changes)
1217          */
1218         spin_lock_bh(&orig_node->tt_buff_lock);
1219         if (tt_buff_len > 0) {
1220                 kfree(orig_node->tt_buff);
1221                 orig_node->tt_buff_len = 0;
1222                 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1223                 if (orig_node->tt_buff) {
1224                         memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1225                         orig_node->tt_buff_len = tt_buff_len;
1226                 }
1227         }
1228         spin_unlock_bh(&orig_node->tt_buff_lock);
1229 }
1230
1231 static void tt_req_purge(struct bat_priv *bat_priv)
1232 {
1233         struct tt_req_node *node, *safe;
1234
1235         spin_lock_bh(&bat_priv->tt_req_list_lock);
1236         list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1237                 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
1238                         list_del(&node->list);
1239                         kfree(node);
1240                 }
1241         }
1242         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1243 }
1244
1245 /* returns the pointer to the new tt_req_node struct if no request
1246  * has already been issued for this orig_node, NULL otherwise
1247  */
1248 static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
1249                                           struct orig_node *orig_node)
1250 {
1251         struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1252
1253         spin_lock_bh(&bat_priv->tt_req_list_lock);
1254         list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1255                 if (compare_eth(tt_req_node_tmp, orig_node) &&
1256                     !has_timed_out(tt_req_node_tmp->issued_at,
1257                                    TT_REQUEST_TIMEOUT))
1258                         goto unlock;
1259         }
1260
1261         tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1262         if (!tt_req_node)
1263                 goto unlock;
1264
1265         memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1266         tt_req_node->issued_at = jiffies;
1267
1268         list_add(&tt_req_node->list, &bat_priv->tt_req_list);
1269 unlock:
1270         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1271         return tt_req_node;
1272 }
1273
1274 /* data_ptr is useless here, but has to be kept to respect the prototype */
1275 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
1276 {
1277         const struct tt_common_entry *tt_common_entry = entry_ptr;
1278
1279         if (tt_common_entry->flags & TT_CLIENT_NEW)
1280                 return 0;
1281         return 1;
1282 }
1283
1284 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1285 {
1286         const struct tt_common_entry *tt_common_entry = entry_ptr;
1287         const struct tt_global_entry *tt_global_entry;
1288         const struct orig_node *orig_node = data_ptr;
1289
1290         if (tt_common_entry->flags & TT_CLIENT_ROAM)
1291                 return 0;
1292
1293         tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1294                                        common);
1295
1296         return tt_global_entry_has_orig(tt_global_entry, orig_node);
1297 }
1298
1299 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1300                                               struct hashtable_t *hash,
1301                                               struct hard_iface *primary_if,
1302                                               int (*valid_cb)(const void *,
1303                                                               const void *),
1304                                               void *cb_data)
1305 {
1306         struct tt_common_entry *tt_common_entry;
1307         struct tt_query_packet *tt_response;
1308         struct tt_change *tt_change;
1309         struct hlist_node *node;
1310         struct hlist_head *head;
1311         struct sk_buff *skb = NULL;
1312         uint16_t tt_tot, tt_count;
1313         ssize_t tt_query_size = sizeof(struct tt_query_packet);
1314         uint32_t i;
1315
1316         if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1317                 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1318                 tt_len -= tt_len % sizeof(struct tt_change);
1319         }
1320         tt_tot = tt_len / sizeof(struct tt_change);
1321
1322         skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1323         if (!skb)
1324                 goto out;
1325
1326         skb_reserve(skb, ETH_HLEN);
1327         tt_response = (struct tt_query_packet *)skb_put(skb,
1328                                                      tt_query_size + tt_len);
1329         tt_response->ttvn = ttvn;
1330
1331         tt_change = (struct tt_change *)(skb->data + tt_query_size);
1332         tt_count = 0;
1333
1334         rcu_read_lock();
1335         for (i = 0; i < hash->size; i++) {
1336                 head = &hash->table[i];
1337
1338                 hlist_for_each_entry_rcu(tt_common_entry, node,
1339                                          head, hash_entry) {
1340                         if (tt_count == tt_tot)
1341                                 break;
1342
1343                         if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1344                                 continue;
1345
1346                         memcpy(tt_change->addr, tt_common_entry->addr,
1347                                ETH_ALEN);
1348                         tt_change->flags = NO_FLAGS;
1349
1350                         tt_count++;
1351                         tt_change++;
1352                 }
1353         }
1354         rcu_read_unlock();
1355
1356         /* store in the message the number of entries we have successfully
1357          * copied
1358          */
1359         tt_response->tt_data = htons(tt_count);
1360
1361 out:
1362         return skb;
1363 }
1364
1365 static int send_tt_request(struct bat_priv *bat_priv,
1366                            struct orig_node *dst_orig_node,
1367                            uint8_t ttvn, uint16_t tt_crc, bool full_table)
1368 {
1369         struct sk_buff *skb = NULL;
1370         struct tt_query_packet *tt_request;
1371         struct neigh_node *neigh_node = NULL;
1372         struct hard_iface *primary_if;
1373         struct tt_req_node *tt_req_node = NULL;
1374         int ret = 1;
1375
1376         primary_if = batadv_primary_if_get_selected(bat_priv);
1377         if (!primary_if)
1378                 goto out;
1379
1380         /* The new tt_req will be issued only if I'm not waiting for a
1381          * reply from the same orig_node yet
1382          */
1383         tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1384         if (!tt_req_node)
1385                 goto out;
1386
1387         skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1388         if (!skb)
1389                 goto out;
1390
1391         skb_reserve(skb, ETH_HLEN);
1392
1393         tt_request = (struct tt_query_packet *)skb_put(skb,
1394                                 sizeof(struct tt_query_packet));
1395
1396         tt_request->header.packet_type = BAT_TT_QUERY;
1397         tt_request->header.version = COMPAT_VERSION;
1398         memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1399         memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1400         tt_request->header.ttl = TTL;
1401         tt_request->ttvn = ttvn;
1402         tt_request->tt_data = htons(tt_crc);
1403         tt_request->flags = TT_REQUEST;
1404
1405         if (full_table)
1406                 tt_request->flags |= TT_FULL_TABLE;
1407
1408         neigh_node = batadv_orig_node_get_router(dst_orig_node);
1409         if (!neigh_node)
1410                 goto out;
1411
1412         bat_dbg(DBG_TT, bat_priv,
1413                 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1414                 dst_orig_node->orig, neigh_node->addr,
1415                 (full_table ? 'F' : '.'));
1416
1417         batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX);
1418
1419         batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1420         ret = 0;
1421
1422 out:
1423         if (neigh_node)
1424                 batadv_neigh_node_free_ref(neigh_node);
1425         if (primary_if)
1426                 batadv_hardif_free_ref(primary_if);
1427         if (ret)
1428                 kfree_skb(skb);
1429         if (ret && tt_req_node) {
1430                 spin_lock_bh(&bat_priv->tt_req_list_lock);
1431                 list_del(&tt_req_node->list);
1432                 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1433                 kfree(tt_req_node);
1434         }
1435         return ret;
1436 }
1437
1438 static bool send_other_tt_response(struct bat_priv *bat_priv,
1439                                    struct tt_query_packet *tt_request)
1440 {
1441         struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1442         struct neigh_node *neigh_node = NULL;
1443         struct hard_iface *primary_if = NULL;
1444         uint8_t orig_ttvn, req_ttvn, ttvn;
1445         int ret = false;
1446         unsigned char *tt_buff;
1447         bool full_table;
1448         uint16_t tt_len, tt_tot;
1449         struct sk_buff *skb = NULL;
1450         struct tt_query_packet *tt_response;
1451
1452         bat_dbg(DBG_TT, bat_priv,
1453                 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1454                 tt_request->src, tt_request->ttvn, tt_request->dst,
1455                 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1456
1457         /* Let's get the orig node of the REAL destination */
1458         req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
1459         if (!req_dst_orig_node)
1460                 goto out;
1461
1462         res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1463         if (!res_dst_orig_node)
1464                 goto out;
1465
1466         neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1467         if (!neigh_node)
1468                 goto out;
1469
1470         primary_if = batadv_primary_if_get_selected(bat_priv);
1471         if (!primary_if)
1472                 goto out;
1473
1474         orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1475         req_ttvn = tt_request->ttvn;
1476
1477         /* I don't have the requested data */
1478         if (orig_ttvn != req_ttvn ||
1479             tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1480                 goto out;
1481
1482         /* If the full table has been explicitly requested */
1483         if (tt_request->flags & TT_FULL_TABLE ||
1484             !req_dst_orig_node->tt_buff)
1485                 full_table = true;
1486         else
1487                 full_table = false;
1488
1489         /* In this version, fragmentation is not implemented, then
1490          * I'll send only one packet with as much TT entries as I can
1491          */
1492         if (!full_table) {
1493                 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1494                 tt_len = req_dst_orig_node->tt_buff_len;
1495                 tt_tot = tt_len / sizeof(struct tt_change);
1496
1497                 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1498                                     tt_len + ETH_HLEN);
1499                 if (!skb)
1500                         goto unlock;
1501
1502                 skb_reserve(skb, ETH_HLEN);
1503                 tt_response = (struct tt_query_packet *)skb_put(skb,
1504                                 sizeof(struct tt_query_packet) + tt_len);
1505                 tt_response->ttvn = req_ttvn;
1506                 tt_response->tt_data = htons(tt_tot);
1507
1508                 tt_buff = skb->data + sizeof(struct tt_query_packet);
1509                 /* Copy the last orig_node's OGM buffer */
1510                 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1511                        req_dst_orig_node->tt_buff_len);
1512
1513                 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1514         } else {
1515                 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1516                                                 sizeof(struct tt_change);
1517                 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1518
1519                 skb = tt_response_fill_table(tt_len, ttvn,
1520                                              bat_priv->tt_global_hash,
1521                                              primary_if, tt_global_valid_entry,
1522                                              req_dst_orig_node);
1523                 if (!skb)
1524                         goto out;
1525
1526                 tt_response = (struct tt_query_packet *)skb->data;
1527         }
1528
1529         tt_response->header.packet_type = BAT_TT_QUERY;
1530         tt_response->header.version = COMPAT_VERSION;
1531         tt_response->header.ttl = TTL;
1532         memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1533         memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1534         tt_response->flags = TT_RESPONSE;
1535
1536         if (full_table)
1537                 tt_response->flags |= TT_FULL_TABLE;
1538
1539         bat_dbg(DBG_TT, bat_priv,
1540                 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1541                 res_dst_orig_node->orig, neigh_node->addr,
1542                 req_dst_orig_node->orig, req_ttvn);
1543
1544         batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1545
1546         batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1547         ret = true;
1548         goto out;
1549
1550 unlock:
1551         spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1552
1553 out:
1554         if (res_dst_orig_node)
1555                 batadv_orig_node_free_ref(res_dst_orig_node);
1556         if (req_dst_orig_node)
1557                 batadv_orig_node_free_ref(req_dst_orig_node);
1558         if (neigh_node)
1559                 batadv_neigh_node_free_ref(neigh_node);
1560         if (primary_if)
1561                 batadv_hardif_free_ref(primary_if);
1562         if (!ret)
1563                 kfree_skb(skb);
1564         return ret;
1565
1566 }
1567 static bool send_my_tt_response(struct bat_priv *bat_priv,
1568                                 struct tt_query_packet *tt_request)
1569 {
1570         struct orig_node *orig_node = NULL;
1571         struct neigh_node *neigh_node = NULL;
1572         struct hard_iface *primary_if = NULL;
1573         uint8_t my_ttvn, req_ttvn, ttvn;
1574         int ret = false;
1575         unsigned char *tt_buff;
1576         bool full_table;
1577         uint16_t tt_len, tt_tot;
1578         struct sk_buff *skb = NULL;
1579         struct tt_query_packet *tt_response;
1580
1581         bat_dbg(DBG_TT, bat_priv,
1582                 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1583                 tt_request->src, tt_request->ttvn,
1584                 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1585
1586
1587         my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1588         req_ttvn = tt_request->ttvn;
1589
1590         orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1591         if (!orig_node)
1592                 goto out;
1593
1594         neigh_node = batadv_orig_node_get_router(orig_node);
1595         if (!neigh_node)
1596                 goto out;
1597
1598         primary_if = batadv_primary_if_get_selected(bat_priv);
1599         if (!primary_if)
1600                 goto out;
1601
1602         /* If the full table has been explicitly requested or the gap
1603          * is too big send the whole local translation table
1604          */
1605         if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1606             !bat_priv->tt_buff)
1607                 full_table = true;
1608         else
1609                 full_table = false;
1610
1611         /* In this version, fragmentation is not implemented, then
1612          * I'll send only one packet with as much TT entries as I can
1613          */
1614         if (!full_table) {
1615                 spin_lock_bh(&bat_priv->tt_buff_lock);
1616                 tt_len = bat_priv->tt_buff_len;
1617                 tt_tot = tt_len / sizeof(struct tt_change);
1618
1619                 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1620                                     tt_len + ETH_HLEN);
1621                 if (!skb)
1622                         goto unlock;
1623
1624                 skb_reserve(skb, ETH_HLEN);
1625                 tt_response = (struct tt_query_packet *)skb_put(skb,
1626                                 sizeof(struct tt_query_packet) + tt_len);
1627                 tt_response->ttvn = req_ttvn;
1628                 tt_response->tt_data = htons(tt_tot);
1629
1630                 tt_buff = skb->data + sizeof(struct tt_query_packet);
1631                 memcpy(tt_buff, bat_priv->tt_buff,
1632                        bat_priv->tt_buff_len);
1633                 spin_unlock_bh(&bat_priv->tt_buff_lock);
1634         } else {
1635                 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1636                                                 sizeof(struct tt_change);
1637                 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1638
1639                 skb = tt_response_fill_table(tt_len, ttvn,
1640                                              bat_priv->tt_local_hash,
1641                                              primary_if, tt_local_valid_entry,
1642                                              NULL);
1643                 if (!skb)
1644                         goto out;
1645
1646                 tt_response = (struct tt_query_packet *)skb->data;
1647         }
1648
1649         tt_response->header.packet_type = BAT_TT_QUERY;
1650         tt_response->header.version = COMPAT_VERSION;
1651         tt_response->header.ttl = TTL;
1652         memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1653         memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1654         tt_response->flags = TT_RESPONSE;
1655
1656         if (full_table)
1657                 tt_response->flags |= TT_FULL_TABLE;
1658
1659         bat_dbg(DBG_TT, bat_priv,
1660                 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1661                 orig_node->orig, neigh_node->addr,
1662                 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1663
1664         batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1665
1666         batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1667         ret = true;
1668         goto out;
1669
1670 unlock:
1671         spin_unlock_bh(&bat_priv->tt_buff_lock);
1672 out:
1673         if (orig_node)
1674                 batadv_orig_node_free_ref(orig_node);
1675         if (neigh_node)
1676                 batadv_neigh_node_free_ref(neigh_node);
1677         if (primary_if)
1678                 batadv_hardif_free_ref(primary_if);
1679         if (!ret)
1680                 kfree_skb(skb);
1681         /* This packet was for me, so it doesn't need to be re-routed */
1682         return true;
1683 }
1684
1685 bool batadv_send_tt_response(struct bat_priv *bat_priv,
1686                              struct tt_query_packet *tt_request)
1687 {
1688         if (batadv_is_my_mac(tt_request->dst)) {
1689                 /* don't answer backbone gws! */
1690                 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1691                         return true;
1692
1693                 return send_my_tt_response(bat_priv, tt_request);
1694         } else {
1695                 return send_other_tt_response(bat_priv, tt_request);
1696         }
1697 }
1698
1699 static void _tt_update_changes(struct bat_priv *bat_priv,
1700                                struct orig_node *orig_node,
1701                                struct tt_change *tt_change,
1702                                uint16_t tt_num_changes, uint8_t ttvn)
1703 {
1704         int i;
1705         int is_wifi;
1706
1707         for (i = 0; i < tt_num_changes; i++) {
1708                 if ((tt_change + i)->flags & TT_CLIENT_DEL) {
1709                         tt_global_del(bat_priv, orig_node,
1710                                       (tt_change + i)->addr,
1711                                       "tt removed by changes",
1712                                       (tt_change + i)->flags & TT_CLIENT_ROAM);
1713                 } else {
1714                         is_wifi = (tt_change + i)->flags & TT_CLIENT_WIFI;
1715                         if (!batadv_tt_global_add(bat_priv, orig_node,
1716                                                   (tt_change + i)->addr, ttvn,
1717                                                   false, is_wifi))
1718                                 /* In case of problem while storing a
1719                                  * global_entry, we stop the updating
1720                                  * procedure without committing the
1721                                  * ttvn change. This will avoid to send
1722                                  * corrupted data on tt_request
1723                                  */
1724                                 return;
1725                 }
1726         }
1727         orig_node->tt_initialised = true;
1728 }
1729
1730 static void tt_fill_gtable(struct bat_priv *bat_priv,
1731                            struct tt_query_packet *tt_response)
1732 {
1733         struct orig_node *orig_node = NULL;
1734
1735         orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1736         if (!orig_node)
1737                 goto out;
1738
1739         /* Purge the old table first.. */
1740         batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
1741
1742         _tt_update_changes(bat_priv, orig_node,
1743                            (struct tt_change *)(tt_response + 1),
1744                            ntohs(tt_response->tt_data), tt_response->ttvn);
1745
1746         spin_lock_bh(&orig_node->tt_buff_lock);
1747         kfree(orig_node->tt_buff);
1748         orig_node->tt_buff_len = 0;
1749         orig_node->tt_buff = NULL;
1750         spin_unlock_bh(&orig_node->tt_buff_lock);
1751
1752         atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1753
1754 out:
1755         if (orig_node)
1756                 batadv_orig_node_free_ref(orig_node);
1757 }
1758
1759 static void tt_update_changes(struct bat_priv *bat_priv,
1760                               struct orig_node *orig_node,
1761                               uint16_t tt_num_changes, uint8_t ttvn,
1762                               struct tt_change *tt_change)
1763 {
1764         _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1765                            ttvn);
1766
1767         tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1768                             tt_num_changes);
1769         atomic_set(&orig_node->last_ttvn, ttvn);
1770 }
1771
1772 bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1773 {
1774         struct tt_local_entry *tt_local_entry = NULL;
1775         bool ret = false;
1776
1777         tt_local_entry = tt_local_hash_find(bat_priv, addr);
1778         if (!tt_local_entry)
1779                 goto out;
1780         /* Check if the client has been logically deleted (but is kept for
1781          * consistency purpose)
1782          */
1783         if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
1784                 goto out;
1785         ret = true;
1786 out:
1787         if (tt_local_entry)
1788                 tt_local_entry_free_ref(tt_local_entry);
1789         return ret;
1790 }
1791
1792 void batadv_handle_tt_response(struct bat_priv *bat_priv,
1793                                struct tt_query_packet *tt_response)
1794 {
1795         struct tt_req_node *node, *safe;
1796         struct orig_node *orig_node = NULL;
1797
1798         bat_dbg(DBG_TT, bat_priv,
1799                 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1800                 tt_response->src, tt_response->ttvn,
1801                 ntohs(tt_response->tt_data),
1802                 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1803
1804         /* we should have never asked a backbone gw */
1805         if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1806                 goto out;
1807
1808         orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1809         if (!orig_node)
1810                 goto out;
1811
1812         if (tt_response->flags & TT_FULL_TABLE)
1813                 tt_fill_gtable(bat_priv, tt_response);
1814         else
1815                 tt_update_changes(bat_priv, orig_node,
1816                                   ntohs(tt_response->tt_data),
1817                                   tt_response->ttvn,
1818                                   (struct tt_change *)(tt_response + 1));
1819
1820         /* Delete the tt_req_node from pending tt_requests list */
1821         spin_lock_bh(&bat_priv->tt_req_list_lock);
1822         list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1823                 if (!compare_eth(node->addr, tt_response->src))
1824                         continue;
1825                 list_del(&node->list);
1826                 kfree(node);
1827         }
1828         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1829
1830         /* Recalculate the CRC for this orig_node and store it */
1831         orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1832         /* Roaming phase is over: tables are in sync again. I can
1833          * unset the flag
1834          */
1835         orig_node->tt_poss_change = false;
1836 out:
1837         if (orig_node)
1838                 batadv_orig_node_free_ref(orig_node);
1839 }
1840
1841 int batadv_tt_init(struct bat_priv *bat_priv)
1842 {
1843         int ret;
1844
1845         ret = tt_local_init(bat_priv);
1846         if (ret < 0)
1847                 return ret;
1848
1849         ret = tt_global_init(bat_priv);
1850         if (ret < 0)
1851                 return ret;
1852
1853         tt_start_timer(bat_priv);
1854
1855         return 1;
1856 }
1857
1858 static void tt_roam_list_free(struct bat_priv *bat_priv)
1859 {
1860         struct tt_roam_node *node, *safe;
1861
1862         spin_lock_bh(&bat_priv->tt_roam_list_lock);
1863
1864         list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1865                 list_del(&node->list);
1866                 kfree(node);
1867         }
1868
1869         spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1870 }
1871
1872 static void tt_roam_purge(struct bat_priv *bat_priv)
1873 {
1874         struct tt_roam_node *node, *safe;
1875
1876         spin_lock_bh(&bat_priv->tt_roam_list_lock);
1877         list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1878                 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
1879                         continue;
1880
1881                 list_del(&node->list);
1882                 kfree(node);
1883         }
1884         spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1885 }
1886
1887 /* This function checks whether the client already reached the
1888  * maximum number of possible roaming phases. In this case the ROAMING_ADV
1889  * will not be sent.
1890  *
1891  * returns true if the ROAMING_ADV can be sent, false otherwise
1892  */
1893 static bool tt_check_roam_count(struct bat_priv *bat_priv,
1894                                 uint8_t *client)
1895 {
1896         struct tt_roam_node *tt_roam_node;
1897         bool ret = false;
1898
1899         spin_lock_bh(&bat_priv->tt_roam_list_lock);
1900         /* The new tt_req will be issued only if I'm not waiting for a
1901          * reply from the same orig_node yet
1902          */
1903         list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1904                 if (!compare_eth(tt_roam_node->addr, client))
1905                         continue;
1906
1907                 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
1908                         continue;
1909
1910                 if (!atomic_dec_not_zero(&tt_roam_node->counter))
1911                         /* Sorry, you roamed too many times! */
1912                         goto unlock;
1913                 ret = true;
1914                 break;
1915         }
1916
1917         if (!ret) {
1918                 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1919                 if (!tt_roam_node)
1920                         goto unlock;
1921
1922                 tt_roam_node->first_time = jiffies;
1923                 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1924                 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1925
1926                 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1927                 ret = true;
1928         }
1929
1930 unlock:
1931         spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1932         return ret;
1933 }
1934
1935 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1936                           struct orig_node *orig_node)
1937 {
1938         struct neigh_node *neigh_node = NULL;
1939         struct sk_buff *skb = NULL;
1940         struct roam_adv_packet *roam_adv_packet;
1941         int ret = 1;
1942         struct hard_iface *primary_if;
1943
1944         /* before going on we have to check whether the client has
1945          * already roamed to us too many times
1946          */
1947         if (!tt_check_roam_count(bat_priv, client))
1948                 goto out;
1949
1950         skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1951         if (!skb)
1952                 goto out;
1953
1954         skb_reserve(skb, ETH_HLEN);
1955
1956         roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1957                                         sizeof(struct roam_adv_packet));
1958
1959         roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
1960         roam_adv_packet->header.version = COMPAT_VERSION;
1961         roam_adv_packet->header.ttl = TTL;
1962         primary_if = batadv_primary_if_get_selected(bat_priv);
1963         if (!primary_if)
1964                 goto out;
1965         memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1966         batadv_hardif_free_ref(primary_if);
1967         memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1968         memcpy(roam_adv_packet->client, client, ETH_ALEN);
1969
1970         neigh_node = batadv_orig_node_get_router(orig_node);
1971         if (!neigh_node)
1972                 goto out;
1973
1974         bat_dbg(DBG_TT, bat_priv,
1975                 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1976                 orig_node->orig, client, neigh_node->addr);
1977
1978         batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX);
1979
1980         batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1981         ret = 0;
1982
1983 out:
1984         if (neigh_node)
1985                 batadv_neigh_node_free_ref(neigh_node);
1986         if (ret)
1987                 kfree_skb(skb);
1988         return;
1989 }
1990
1991 static void tt_purge(struct work_struct *work)
1992 {
1993         struct delayed_work *delayed_work =
1994                 container_of(work, struct delayed_work, work);
1995         struct bat_priv *bat_priv =
1996                 container_of(delayed_work, struct bat_priv, tt_work);
1997
1998         tt_local_purge(bat_priv);
1999         tt_global_roam_purge(bat_priv);
2000         tt_req_purge(bat_priv);
2001         tt_roam_purge(bat_priv);
2002
2003         tt_start_timer(bat_priv);
2004 }
2005
2006 void batadv_tt_free(struct bat_priv *bat_priv)
2007 {
2008         cancel_delayed_work_sync(&bat_priv->tt_work);
2009
2010         tt_local_table_free(bat_priv);
2011         tt_global_table_free(bat_priv);
2012         tt_req_list_free(bat_priv);
2013         tt_changes_list_free(bat_priv);
2014         tt_roam_list_free(bat_priv);
2015
2016         kfree(bat_priv->tt_buff);
2017 }
2018
2019 /* This function will enable or disable the specified flags for all the entries
2020  * in the given hash table and returns the number of modified entries
2021  */
2022 static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
2023                              bool enable)
2024 {
2025         uint32_t i;
2026         uint16_t changed_num = 0;
2027         struct hlist_head *head;
2028         struct hlist_node *node;
2029         struct tt_common_entry *tt_common_entry;
2030
2031         if (!hash)
2032                 goto out;
2033
2034         for (i = 0; i < hash->size; i++) {
2035                 head = &hash->table[i];
2036
2037                 rcu_read_lock();
2038                 hlist_for_each_entry_rcu(tt_common_entry, node,
2039                                          head, hash_entry) {
2040                         if (enable) {
2041                                 if ((tt_common_entry->flags & flags) == flags)
2042                                         continue;
2043                                 tt_common_entry->flags |= flags;
2044                         } else {
2045                                 if (!(tt_common_entry->flags & flags))
2046                                         continue;
2047                                 tt_common_entry->flags &= ~flags;
2048                         }
2049                         changed_num++;
2050                 }
2051                 rcu_read_unlock();
2052         }
2053 out:
2054         return changed_num;
2055 }
2056
2057 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
2058 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
2059 {
2060         struct hashtable_t *hash = bat_priv->tt_local_hash;
2061         struct tt_common_entry *tt_common_entry;
2062         struct tt_local_entry *tt_local_entry;
2063         struct hlist_node *node, *node_tmp;
2064         struct hlist_head *head;
2065         spinlock_t *list_lock; /* protects write access to the hash lists */
2066         uint32_t i;
2067
2068         if (!hash)
2069                 return;
2070
2071         for (i = 0; i < hash->size; i++) {
2072                 head = &hash->table[i];
2073                 list_lock = &hash->list_locks[i];
2074
2075                 spin_lock_bh(list_lock);
2076                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
2077                                           head, hash_entry) {
2078                         if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
2079                                 continue;
2080
2081                         bat_dbg(DBG_TT, bat_priv,
2082                                 "Deleting local tt entry (%pM): pending\n",
2083                                 tt_common_entry->addr);
2084
2085                         atomic_dec(&bat_priv->num_local_tt);
2086                         hlist_del_rcu(node);
2087                         tt_local_entry = container_of(tt_common_entry,
2088                                                       struct tt_local_entry,
2089                                                       common);
2090                         tt_local_entry_free_ref(tt_local_entry);
2091                 }
2092                 spin_unlock_bh(list_lock);
2093         }
2094
2095 }
2096
2097 static int tt_commit_changes(struct bat_priv *bat_priv,
2098                              unsigned char **packet_buff, int *packet_buff_len,
2099                              int packet_min_len)
2100 {
2101         uint16_t changed_num = 0;
2102
2103         if (atomic_read(&bat_priv->tt_local_changes) < 1)
2104                 return -ENOENT;
2105
2106         changed_num = tt_set_flags(bat_priv->tt_local_hash,
2107                                    TT_CLIENT_NEW, false);
2108
2109         /* all reset entries have to be counted as local entries */
2110         atomic_add(changed_num, &bat_priv->num_local_tt);
2111         tt_local_purge_pending_clients(bat_priv);
2112         bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
2113
2114         /* Increment the TTVN only once per OGM interval */
2115         atomic_inc(&bat_priv->ttvn);
2116         bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2117                 (uint8_t)atomic_read(&bat_priv->ttvn));
2118         bat_priv->tt_poss_change = false;
2119
2120         /* reset the sending counter */
2121         atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
2122
2123         return tt_changes_fill_buff(bat_priv, packet_buff,
2124                                     packet_buff_len, packet_min_len);
2125 }
2126
2127 /* when calling this function (hard_iface == primary_if) has to be true */
2128 int batadv_tt_append_diff(struct bat_priv *bat_priv,
2129                           unsigned char **packet_buff, int *packet_buff_len,
2130                           int packet_min_len)
2131 {
2132         int tt_num_changes;
2133
2134         /* if at least one change happened */
2135         tt_num_changes = tt_commit_changes(bat_priv, packet_buff,
2136                                            packet_buff_len, packet_min_len);
2137
2138         /* if the changes have been sent often enough */
2139         if ((tt_num_changes < 0) &&
2140             (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2141                 tt_realloc_packet_buff(packet_buff, packet_buff_len,
2142                                        packet_min_len, packet_min_len);
2143                 tt_num_changes = 0;
2144         }
2145
2146         return tt_num_changes;
2147 }
2148
2149 bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src,
2150                            uint8_t *dst)
2151 {
2152         struct tt_local_entry *tt_local_entry = NULL;
2153         struct tt_global_entry *tt_global_entry = NULL;
2154         bool ret = true;
2155
2156         if (!atomic_read(&bat_priv->ap_isolation))
2157                 return false;
2158
2159         tt_local_entry = tt_local_hash_find(bat_priv, dst);
2160         if (!tt_local_entry)
2161                 goto out;
2162
2163         tt_global_entry = tt_global_hash_find(bat_priv, src);
2164         if (!tt_global_entry)
2165                 goto out;
2166
2167         if (_is_ap_isolated(tt_local_entry, tt_global_entry))
2168                 goto out;
2169
2170         ret = false;
2171
2172 out:
2173         if (tt_global_entry)
2174                 tt_global_entry_free_ref(tt_global_entry);
2175         if (tt_local_entry)
2176                 tt_local_entry_free_ref(tt_local_entry);
2177         return ret;
2178 }
2179
2180 void batadv_tt_update_orig(struct bat_priv *bat_priv,
2181                            struct orig_node *orig_node,
2182                            const unsigned char *tt_buff, uint8_t tt_num_changes,
2183                            uint8_t ttvn, uint16_t tt_crc)
2184 {
2185         uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2186         bool full_table = true;
2187
2188         /* don't care about a backbone gateways updates. */
2189         if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2190                 return;
2191
2192         /* orig table not initialised AND first diff is in the OGM OR the ttvn
2193          * increased by one -> we can apply the attached changes
2194          */
2195         if ((!orig_node->tt_initialised && ttvn == 1) ||
2196             ttvn - orig_ttvn == 1) {
2197                 /* the OGM could not contain the changes due to their size or
2198                  * because they have already been sent TT_OGM_APPEND_MAX times.
2199                  * In this case send a tt request
2200                  */
2201                 if (!tt_num_changes) {
2202                         full_table = false;
2203                         goto request_table;
2204                 }
2205
2206                 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
2207                                   (struct tt_change *)tt_buff);
2208
2209                 /* Even if we received the precomputed crc with the OGM, we
2210                  * prefer to recompute it to spot any possible inconsistency
2211                  * in the global table
2212                  */
2213                 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
2214
2215                 /* The ttvn alone is not enough to guarantee consistency
2216                  * because a single value could represent different states
2217                  * (due to the wrap around). Thus a node has to check whether
2218                  * the resulting table (after applying the changes) is still
2219                  * consistent or not. E.g. a node could disconnect while its
2220                  * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2221                  * checking the CRC value is mandatory to detect the
2222                  * inconsistency
2223                  */
2224                 if (orig_node->tt_crc != tt_crc)
2225                         goto request_table;
2226
2227                 /* Roaming phase is over: tables are in sync again. I can
2228                  * unset the flag
2229                  */
2230                 orig_node->tt_poss_change = false;
2231         } else {
2232                 /* if we missed more than one change or our tables are not
2233                  * in sync anymore -> request fresh tt data
2234                  */
2235                 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2236                     orig_node->tt_crc != tt_crc) {
2237 request_table:
2238                         bat_dbg(DBG_TT, bat_priv,
2239                                 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2240                                 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2241                                 orig_node->tt_crc, tt_num_changes);
2242                         send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
2243                                         full_table);
2244                         return;
2245                 }
2246         }
2247 }
2248
2249 /* returns true whether we know that the client has moved from its old
2250  * originator to another one. This entry is kept is still kept for consistency
2251  * purposes
2252  */
2253 bool batadv_tt_global_client_is_roaming(struct bat_priv *bat_priv,
2254                                         uint8_t *addr)
2255 {
2256         struct tt_global_entry *tt_global_entry;
2257         bool ret = false;
2258
2259         tt_global_entry = tt_global_hash_find(bat_priv, addr);
2260         if (!tt_global_entry)
2261                 goto out;
2262
2263         ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
2264         tt_global_entry_free_ref(tt_global_entry);
2265 out:
2266         return ret;
2267 }