Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[firefly-linux-kernel-4.4.55.git] / net / batman-adv / translation-table.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19
20 #include "main.h"
21 #include "translation-table.h"
22 #include "soft-interface.h"
23 #include "hard-interface.h"
24 #include "send.h"
25 #include "hash.h"
26 #include "originator.h"
27 #include "routing.h"
28 #include "bridge_loop_avoidance.h"
29
30 #include <linux/crc16.h>
31
32 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
33                           struct orig_node *orig_node);
34 static void tt_purge(struct work_struct *work);
35 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
36
37 /* returns 1 if they are the same mac addr */
38 static int compare_tt(const struct hlist_node *node, const void *data2)
39 {
40         const void *data1 = container_of(node, struct tt_common_entry,
41                                          hash_entry);
42
43         return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
44 }
45
46 static void tt_start_timer(struct bat_priv *bat_priv)
47 {
48         INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
49         queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
50                            msecs_to_jiffies(5000));
51 }
52
53 static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
54                                             const void *data)
55 {
56         struct hlist_head *head;
57         struct hlist_node *node;
58         struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
59         uint32_t index;
60
61         if (!hash)
62                 return NULL;
63
64         index = choose_orig(data, hash->size);
65         head = &hash->table[index];
66
67         rcu_read_lock();
68         hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
69                 if (!compare_eth(tt_common_entry, data))
70                         continue;
71
72                 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
73                         continue;
74
75                 tt_common_entry_tmp = tt_common_entry;
76                 break;
77         }
78         rcu_read_unlock();
79
80         return tt_common_entry_tmp;
81 }
82
83 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
84                                                  const void *data)
85 {
86         struct tt_common_entry *tt_common_entry;
87         struct tt_local_entry *tt_local_entry = NULL;
88
89         tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
90         if (tt_common_entry)
91                 tt_local_entry = container_of(tt_common_entry,
92                                               struct tt_local_entry, common);
93         return tt_local_entry;
94 }
95
96 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
97                                                    const void *data)
98 {
99         struct tt_common_entry *tt_common_entry;
100         struct tt_global_entry *tt_global_entry = NULL;
101
102         tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
103         if (tt_common_entry)
104                 tt_global_entry = container_of(tt_common_entry,
105                                                struct tt_global_entry, common);
106         return tt_global_entry;
107
108 }
109
110 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
111 {
112         if (atomic_dec_and_test(&tt_local_entry->common.refcount))
113                 kfree_rcu(tt_local_entry, common.rcu);
114 }
115
116 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
117 {
118         struct tt_common_entry *tt_common_entry;
119         struct tt_global_entry *tt_global_entry;
120
121         tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
122         tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
123                                        common);
124
125         kfree(tt_global_entry);
126 }
127
128 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
129 {
130         if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
131                 tt_global_del_orig_list(tt_global_entry);
132                 call_rcu(&tt_global_entry->common.rcu,
133                          tt_global_entry_free_rcu);
134         }
135 }
136
137 static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
138 {
139         struct tt_orig_list_entry *orig_entry;
140
141         orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
142         batadv_orig_node_free_ref(orig_entry->orig_node);
143         kfree(orig_entry);
144 }
145
146 static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
147 {
148         /* to avoid race conditions, immediately decrease the tt counter */
149         atomic_dec(&orig_entry->orig_node->tt_size);
150         call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
151 }
152
153 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
154                            uint8_t flags)
155 {
156         struct tt_change_node *tt_change_node;
157
158         tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
159
160         if (!tt_change_node)
161                 return;
162
163         tt_change_node->change.flags = flags;
164         memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
165
166         spin_lock_bh(&bat_priv->tt_changes_list_lock);
167         /* track the change in the OGMinterval list */
168         list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
169         atomic_inc(&bat_priv->tt_local_changes);
170         spin_unlock_bh(&bat_priv->tt_changes_list_lock);
171
172         atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
173 }
174
175 int batadv_tt_len(int changes_num)
176 {
177         return changes_num * sizeof(struct tt_change);
178 }
179
180 static int tt_local_init(struct bat_priv *bat_priv)
181 {
182         if (bat_priv->tt_local_hash)
183                 return 0;
184
185         bat_priv->tt_local_hash = batadv_hash_new(1024);
186
187         if (!bat_priv->tt_local_hash)
188                 return -ENOMEM;
189
190         return 0;
191 }
192
193 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
194                          int ifindex)
195 {
196         struct bat_priv *bat_priv = netdev_priv(soft_iface);
197         struct tt_local_entry *tt_local_entry = NULL;
198         struct tt_global_entry *tt_global_entry = NULL;
199         struct hlist_head *head;
200         struct hlist_node *node;
201         struct tt_orig_list_entry *orig_entry;
202         int hash_added;
203
204         tt_local_entry = tt_local_hash_find(bat_priv, addr);
205
206         if (tt_local_entry) {
207                 tt_local_entry->last_seen = jiffies;
208                 /* possibly unset the TT_CLIENT_PENDING flag */
209                 tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
210                 goto out;
211         }
212
213         tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
214         if (!tt_local_entry)
215                 goto out;
216
217         bat_dbg(DBG_TT, bat_priv,
218                 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
219                 (uint8_t)atomic_read(&bat_priv->ttvn));
220
221         memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
222         tt_local_entry->common.flags = NO_FLAGS;
223         if (batadv_is_wifi_iface(ifindex))
224                 tt_local_entry->common.flags |= TT_CLIENT_WIFI;
225         atomic_set(&tt_local_entry->common.refcount, 2);
226         tt_local_entry->last_seen = jiffies;
227
228         /* the batman interface mac address should never be purged */
229         if (compare_eth(addr, soft_iface->dev_addr))
230                 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
231
232         /* The local entry has to be marked as NEW to avoid to send it in
233          * a full table response going out before the next ttvn increment
234          * (consistency check)
235          */
236         tt_local_entry->common.flags |= TT_CLIENT_NEW;
237
238         hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
239                          &tt_local_entry->common,
240                          &tt_local_entry->common.hash_entry);
241
242         if (unlikely(hash_added != 0)) {
243                 /* remove the reference for the hash */
244                 tt_local_entry_free_ref(tt_local_entry);
245                 goto out;
246         }
247
248         tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
249
250         /* remove address from global hash if present */
251         tt_global_entry = tt_global_hash_find(bat_priv, addr);
252
253         /* Check whether it is a roaming! */
254         if (tt_global_entry) {
255                 /* These node are probably going to update their tt table */
256                 head = &tt_global_entry->orig_list;
257                 rcu_read_lock();
258                 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
259                         orig_entry->orig_node->tt_poss_change = true;
260
261                         send_roam_adv(bat_priv, tt_global_entry->common.addr,
262                                       orig_entry->orig_node);
263                 }
264                 rcu_read_unlock();
265                 /* The global entry has to be marked as ROAMING and
266                  * has to be kept for consistency purpose
267                  */
268                 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
269                 tt_global_entry->roam_at = jiffies;
270         }
271 out:
272         if (tt_local_entry)
273                 tt_local_entry_free_ref(tt_local_entry);
274         if (tt_global_entry)
275                 tt_global_entry_free_ref(tt_global_entry);
276 }
277
278 static void tt_realloc_packet_buff(unsigned char **packet_buff,
279                                    int *packet_buff_len, int min_packet_len,
280                                    int new_packet_len)
281 {
282         unsigned char *new_buff;
283
284         new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
285
286         /* keep old buffer if kmalloc should fail */
287         if (new_buff) {
288                 memcpy(new_buff, *packet_buff, min_packet_len);
289                 kfree(*packet_buff);
290                 *packet_buff = new_buff;
291                 *packet_buff_len = new_packet_len;
292         }
293 }
294
295 static void tt_prepare_packet_buff(struct bat_priv *bat_priv,
296                                    unsigned char **packet_buff,
297                                    int *packet_buff_len, int min_packet_len)
298 {
299         struct hard_iface *primary_if;
300         int req_len;
301
302         primary_if = primary_if_get_selected(bat_priv);
303
304         req_len = min_packet_len;
305         req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
306
307         /* if we have too many changes for one packet don't send any
308          * and wait for the tt table request which will be fragmented
309          */
310         if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
311                 req_len = min_packet_len;
312
313         tt_realloc_packet_buff(packet_buff, packet_buff_len,
314                                min_packet_len, req_len);
315
316         if (primary_if)
317                 hardif_free_ref(primary_if);
318 }
319
320 static int tt_changes_fill_buff(struct bat_priv *bat_priv,
321                                 unsigned char **packet_buff,
322                                 int *packet_buff_len, int min_packet_len)
323 {
324         struct tt_change_node *entry, *safe;
325         int count = 0, tot_changes = 0, new_len;
326         unsigned char *tt_buff;
327
328         tt_prepare_packet_buff(bat_priv, packet_buff,
329                                packet_buff_len, min_packet_len);
330
331         new_len = *packet_buff_len - min_packet_len;
332         tt_buff = *packet_buff + min_packet_len;
333
334         if (new_len > 0)
335                 tot_changes = new_len / batadv_tt_len(1);
336
337         spin_lock_bh(&bat_priv->tt_changes_list_lock);
338         atomic_set(&bat_priv->tt_local_changes, 0);
339
340         list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
341                                  list) {
342                 if (count < tot_changes) {
343                         memcpy(tt_buff + batadv_tt_len(count),
344                                &entry->change, sizeof(struct tt_change));
345                         count++;
346                 }
347                 list_del(&entry->list);
348                 kfree(entry);
349         }
350         spin_unlock_bh(&bat_priv->tt_changes_list_lock);
351
352         /* Keep the buffer for possible tt_request */
353         spin_lock_bh(&bat_priv->tt_buff_lock);
354         kfree(bat_priv->tt_buff);
355         bat_priv->tt_buff_len = 0;
356         bat_priv->tt_buff = NULL;
357         /* check whether this new OGM has no changes due to size problems */
358         if (new_len > 0) {
359                 /* if kmalloc() fails we will reply with the full table
360                  * instead of providing the diff
361                  */
362                 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
363                 if (bat_priv->tt_buff) {
364                         memcpy(bat_priv->tt_buff, tt_buff, new_len);
365                         bat_priv->tt_buff_len = new_len;
366                 }
367         }
368         spin_unlock_bh(&bat_priv->tt_buff_lock);
369
370         return count;
371 }
372
373 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
374 {
375         struct net_device *net_dev = (struct net_device *)seq->private;
376         struct bat_priv *bat_priv = netdev_priv(net_dev);
377         struct hashtable_t *hash = bat_priv->tt_local_hash;
378         struct tt_common_entry *tt_common_entry;
379         struct hard_iface *primary_if;
380         struct hlist_node *node;
381         struct hlist_head *head;
382         uint32_t i;
383         int ret = 0;
384
385         primary_if = primary_if_get_selected(bat_priv);
386         if (!primary_if) {
387                 ret = seq_printf(seq,
388                                  "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
389                                  net_dev->name);
390                 goto out;
391         }
392
393         if (primary_if->if_status != IF_ACTIVE) {
394                 ret = seq_printf(seq,
395                                  "BATMAN mesh %s disabled - primary interface not active\n",
396                                  net_dev->name);
397                 goto out;
398         }
399
400         seq_printf(seq,
401                    "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
402                    net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
403
404         for (i = 0; i < hash->size; i++) {
405                 head = &hash->table[i];
406
407                 rcu_read_lock();
408                 hlist_for_each_entry_rcu(tt_common_entry, node,
409                                          head, hash_entry) {
410                         seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
411                                    tt_common_entry->addr,
412                                    (tt_common_entry->flags &
413                                     TT_CLIENT_ROAM ? 'R' : '.'),
414                                    (tt_common_entry->flags &
415                                     TT_CLIENT_NOPURGE ? 'P' : '.'),
416                                    (tt_common_entry->flags &
417                                     TT_CLIENT_NEW ? 'N' : '.'),
418                                    (tt_common_entry->flags &
419                                     TT_CLIENT_PENDING ? 'X' : '.'),
420                                    (tt_common_entry->flags &
421                                     TT_CLIENT_WIFI ? 'W' : '.'));
422                 }
423                 rcu_read_unlock();
424         }
425 out:
426         if (primary_if)
427                 hardif_free_ref(primary_if);
428         return ret;
429 }
430
431 static void tt_local_set_pending(struct bat_priv *bat_priv,
432                                  struct tt_local_entry *tt_local_entry,
433                                  uint16_t flags, const char *message)
434 {
435         tt_local_event(bat_priv, tt_local_entry->common.addr,
436                        tt_local_entry->common.flags | flags);
437
438         /* The local client has to be marked as "pending to be removed" but has
439          * to be kept in the table in order to send it in a full table
440          * response issued before the net ttvn increment (consistency check)
441          */
442         tt_local_entry->common.flags |= TT_CLIENT_PENDING;
443
444         bat_dbg(DBG_TT, bat_priv,
445                 "Local tt entry (%pM) pending to be removed: %s\n",
446                 tt_local_entry->common.addr, message);
447 }
448
449 void batadv_tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
450                             const char *message, bool roaming)
451 {
452         struct tt_local_entry *tt_local_entry = NULL;
453
454         tt_local_entry = tt_local_hash_find(bat_priv, addr);
455         if (!tt_local_entry)
456                 goto out;
457
458         tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
459                              (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
460 out:
461         if (tt_local_entry)
462                 tt_local_entry_free_ref(tt_local_entry);
463 }
464
465 static void tt_local_purge(struct bat_priv *bat_priv)
466 {
467         struct hashtable_t *hash = bat_priv->tt_local_hash;
468         struct tt_local_entry *tt_local_entry;
469         struct tt_common_entry *tt_common_entry;
470         struct hlist_node *node, *node_tmp;
471         struct hlist_head *head;
472         spinlock_t *list_lock; /* protects write access to the hash lists */
473         uint32_t i;
474
475         for (i = 0; i < hash->size; i++) {
476                 head = &hash->table[i];
477                 list_lock = &hash->list_locks[i];
478
479                 spin_lock_bh(list_lock);
480                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
481                                           head, hash_entry) {
482                         tt_local_entry = container_of(tt_common_entry,
483                                                       struct tt_local_entry,
484                                                       common);
485                         if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
486                                 continue;
487
488                         /* entry already marked for deletion */
489                         if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
490                                 continue;
491
492                         if (!has_timed_out(tt_local_entry->last_seen,
493                                            TT_LOCAL_TIMEOUT))
494                                 continue;
495
496                         tt_local_set_pending(bat_priv, tt_local_entry,
497                                              TT_CLIENT_DEL, "timed out");
498                 }
499                 spin_unlock_bh(list_lock);
500         }
501
502 }
503
504 static void tt_local_table_free(struct bat_priv *bat_priv)
505 {
506         struct hashtable_t *hash;
507         spinlock_t *list_lock; /* protects write access to the hash lists */
508         struct tt_common_entry *tt_common_entry;
509         struct tt_local_entry *tt_local_entry;
510         struct hlist_node *node, *node_tmp;
511         struct hlist_head *head;
512         uint32_t i;
513
514         if (!bat_priv->tt_local_hash)
515                 return;
516
517         hash = bat_priv->tt_local_hash;
518
519         for (i = 0; i < hash->size; i++) {
520                 head = &hash->table[i];
521                 list_lock = &hash->list_locks[i];
522
523                 spin_lock_bh(list_lock);
524                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
525                                           head, hash_entry) {
526                         hlist_del_rcu(node);
527                         tt_local_entry = container_of(tt_common_entry,
528                                                       struct tt_local_entry,
529                                                       common);
530                         tt_local_entry_free_ref(tt_local_entry);
531                 }
532                 spin_unlock_bh(list_lock);
533         }
534
535         batadv_hash_destroy(hash);
536
537         bat_priv->tt_local_hash = NULL;
538 }
539
540 static int tt_global_init(struct bat_priv *bat_priv)
541 {
542         if (bat_priv->tt_global_hash)
543                 return 0;
544
545         bat_priv->tt_global_hash = batadv_hash_new(1024);
546
547         if (!bat_priv->tt_global_hash)
548                 return -ENOMEM;
549
550         return 0;
551 }
552
553 static void tt_changes_list_free(struct bat_priv *bat_priv)
554 {
555         struct tt_change_node *entry, *safe;
556
557         spin_lock_bh(&bat_priv->tt_changes_list_lock);
558
559         list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
560                                  list) {
561                 list_del(&entry->list);
562                 kfree(entry);
563         }
564
565         atomic_set(&bat_priv->tt_local_changes, 0);
566         spin_unlock_bh(&bat_priv->tt_changes_list_lock);
567 }
568
569 /* find out if an orig_node is already in the list of a tt_global_entry.
570  * returns 1 if found, 0 otherwise
571  */
572 static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
573                                      const struct orig_node *orig_node)
574 {
575         struct tt_orig_list_entry *tmp_orig_entry;
576         const struct hlist_head *head;
577         struct hlist_node *node;
578         bool found = false;
579
580         rcu_read_lock();
581         head = &entry->orig_list;
582         hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
583                 if (tmp_orig_entry->orig_node == orig_node) {
584                         found = true;
585                         break;
586                 }
587         }
588         rcu_read_unlock();
589         return found;
590 }
591
592 static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
593                                      struct orig_node *orig_node,
594                                      int ttvn)
595 {
596         struct tt_orig_list_entry *orig_entry;
597
598         orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
599         if (!orig_entry)
600                 return;
601
602         INIT_HLIST_NODE(&orig_entry->list);
603         atomic_inc(&orig_node->refcount);
604         atomic_inc(&orig_node->tt_size);
605         orig_entry->orig_node = orig_node;
606         orig_entry->ttvn = ttvn;
607
608         spin_lock_bh(&tt_global_entry->list_lock);
609         hlist_add_head_rcu(&orig_entry->list,
610                            &tt_global_entry->orig_list);
611         spin_unlock_bh(&tt_global_entry->list_lock);
612 }
613
614 /* caller must hold orig_node refcount */
615 int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
616                          const unsigned char *tt_addr, uint8_t ttvn,
617                          bool roaming, bool wifi)
618 {
619         struct tt_global_entry *tt_global_entry = NULL;
620         int ret = 0;
621         int hash_added;
622
623         tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
624
625         if (!tt_global_entry) {
626                 tt_global_entry = kzalloc(sizeof(*tt_global_entry),
627                                           GFP_ATOMIC);
628                 if (!tt_global_entry)
629                         goto out;
630
631                 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
632
633                 tt_global_entry->common.flags = NO_FLAGS;
634                 tt_global_entry->roam_at = 0;
635                 atomic_set(&tt_global_entry->common.refcount, 2);
636
637                 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
638                 spin_lock_init(&tt_global_entry->list_lock);
639
640                 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
641                                  choose_orig, &tt_global_entry->common,
642                                  &tt_global_entry->common.hash_entry);
643
644                 if (unlikely(hash_added != 0)) {
645                         /* remove the reference for the hash */
646                         tt_global_entry_free_ref(tt_global_entry);
647                         goto out_remove;
648                 }
649
650                 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
651         } else {
652                 /* there is already a global entry, use this one. */
653
654                 /* If there is the TT_CLIENT_ROAM flag set, there is only one
655                  * originator left in the list and we previously received a
656                  * delete + roaming change for this originator.
657                  *
658                  * We should first delete the old originator before adding the
659                  * new one.
660                  */
661                 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
662                         tt_global_del_orig_list(tt_global_entry);
663                         tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
664                         tt_global_entry->roam_at = 0;
665                 }
666
667                 if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
668                         tt_global_add_orig_entry(tt_global_entry, orig_node,
669                                                  ttvn);
670         }
671
672         if (wifi)
673                 tt_global_entry->common.flags |= TT_CLIENT_WIFI;
674
675         bat_dbg(DBG_TT, bat_priv,
676                 "Creating new global tt entry: %pM (via %pM)\n",
677                 tt_global_entry->common.addr, orig_node->orig);
678
679 out_remove:
680         /* remove address from local hash if present */
681         batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
682                                "global tt received", roaming);
683         ret = 1;
684 out:
685         if (tt_global_entry)
686                 tt_global_entry_free_ref(tt_global_entry);
687         return ret;
688 }
689
690 /* print all orig nodes who announce the address for this global entry.
691  * it is assumed that the caller holds rcu_read_lock();
692  */
693 static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
694                                   struct seq_file *seq)
695 {
696         struct hlist_head *head;
697         struct hlist_node *node;
698         struct tt_orig_list_entry *orig_entry;
699         struct tt_common_entry *tt_common_entry;
700         uint16_t flags;
701         uint8_t last_ttvn;
702
703         tt_common_entry = &tt_global_entry->common;
704
705         head = &tt_global_entry->orig_list;
706
707         hlist_for_each_entry_rcu(orig_entry, node, head, list) {
708                 flags = tt_common_entry->flags;
709                 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
710                 seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
711                            tt_global_entry->common.addr, orig_entry->ttvn,
712                            orig_entry->orig_node->orig, last_ttvn,
713                            (flags & TT_CLIENT_ROAM ? 'R' : '.'),
714                            (flags & TT_CLIENT_WIFI ? 'W' : '.'));
715         }
716 }
717
718 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
719 {
720         struct net_device *net_dev = (struct net_device *)seq->private;
721         struct bat_priv *bat_priv = netdev_priv(net_dev);
722         struct hashtable_t *hash = bat_priv->tt_global_hash;
723         struct tt_common_entry *tt_common_entry;
724         struct tt_global_entry *tt_global_entry;
725         struct hard_iface *primary_if;
726         struct hlist_node *node;
727         struct hlist_head *head;
728         uint32_t i;
729         int ret = 0;
730
731         primary_if = primary_if_get_selected(bat_priv);
732         if (!primary_if) {
733                 ret = seq_printf(seq,
734                                  "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
735                                  net_dev->name);
736                 goto out;
737         }
738
739         if (primary_if->if_status != IF_ACTIVE) {
740                 ret = seq_printf(seq,
741                                  "BATMAN mesh %s disabled - primary interface not active\n",
742                                  net_dev->name);
743                 goto out;
744         }
745
746         seq_printf(seq,
747                    "Globally announced TT entries received via the mesh %s\n",
748                    net_dev->name);
749         seq_printf(seq, "       %-13s %s       %-15s %s %s\n",
750                    "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
751
752         for (i = 0; i < hash->size; i++) {
753                 head = &hash->table[i];
754
755                 rcu_read_lock();
756                 hlist_for_each_entry_rcu(tt_common_entry, node,
757                                          head, hash_entry) {
758                         tt_global_entry = container_of(tt_common_entry,
759                                                        struct tt_global_entry,
760                                                        common);
761                         tt_global_print_entry(tt_global_entry, seq);
762                 }
763                 rcu_read_unlock();
764         }
765 out:
766         if (primary_if)
767                 hardif_free_ref(primary_if);
768         return ret;
769 }
770
771 /* deletes the orig list of a tt_global_entry */
772 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
773 {
774         struct hlist_head *head;
775         struct hlist_node *node, *safe;
776         struct tt_orig_list_entry *orig_entry;
777
778         spin_lock_bh(&tt_global_entry->list_lock);
779         head = &tt_global_entry->orig_list;
780         hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
781                 hlist_del_rcu(node);
782                 tt_orig_list_entry_free_ref(orig_entry);
783         }
784         spin_unlock_bh(&tt_global_entry->list_lock);
785
786 }
787
788 static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
789                                      struct tt_global_entry *tt_global_entry,
790                                      struct orig_node *orig_node,
791                                      const char *message)
792 {
793         struct hlist_head *head;
794         struct hlist_node *node, *safe;
795         struct tt_orig_list_entry *orig_entry;
796
797         spin_lock_bh(&tt_global_entry->list_lock);
798         head = &tt_global_entry->orig_list;
799         hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
800                 if (orig_entry->orig_node == orig_node) {
801                         bat_dbg(DBG_TT, bat_priv,
802                                 "Deleting %pM from global tt entry %pM: %s\n",
803                                 orig_node->orig, tt_global_entry->common.addr,
804                                 message);
805                         hlist_del_rcu(node);
806                         tt_orig_list_entry_free_ref(orig_entry);
807                 }
808         }
809         spin_unlock_bh(&tt_global_entry->list_lock);
810 }
811
812 static void tt_global_del_struct(struct bat_priv *bat_priv,
813                                  struct tt_global_entry *tt_global_entry,
814                                  const char *message)
815 {
816         bat_dbg(DBG_TT, bat_priv,
817                 "Deleting global tt entry %pM: %s\n",
818                 tt_global_entry->common.addr, message);
819
820         hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
821                     tt_global_entry->common.addr);
822         tt_global_entry_free_ref(tt_global_entry);
823
824 }
825
826 /* If the client is to be deleted, we check if it is the last origantor entry
827  * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
828  * otherwise we simply remove the originator scheduled for deletion.
829  */
830 static void tt_global_del_roaming(struct bat_priv *bat_priv,
831                                   struct tt_global_entry *tt_global_entry,
832                                   struct orig_node *orig_node,
833                                   const char *message)
834 {
835         bool last_entry = true;
836         struct hlist_head *head;
837         struct hlist_node *node;
838         struct tt_orig_list_entry *orig_entry;
839
840         /* no local entry exists, case 1:
841          * Check if this is the last one or if other entries exist.
842          */
843
844         rcu_read_lock();
845         head = &tt_global_entry->orig_list;
846         hlist_for_each_entry_rcu(orig_entry, node, head, list) {
847                 if (orig_entry->orig_node != orig_node) {
848                         last_entry = false;
849                         break;
850                 }
851         }
852         rcu_read_unlock();
853
854         if (last_entry) {
855                 /* its the last one, mark for roaming. */
856                 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
857                 tt_global_entry->roam_at = jiffies;
858         } else
859                 /* there is another entry, we can simply delete this
860                  * one and can still use the other one.
861                  */
862                 tt_global_del_orig_entry(bat_priv, tt_global_entry,
863                                          orig_node, message);
864 }
865
866
867
868 static void tt_global_del(struct bat_priv *bat_priv,
869                           struct orig_node *orig_node,
870                           const unsigned char *addr,
871                           const char *message, bool roaming)
872 {
873         struct tt_global_entry *tt_global_entry = NULL;
874         struct tt_local_entry *tt_local_entry = NULL;
875
876         tt_global_entry = tt_global_hash_find(bat_priv, addr);
877         if (!tt_global_entry)
878                 goto out;
879
880         if (!roaming) {
881                 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
882                                          message);
883
884                 if (hlist_empty(&tt_global_entry->orig_list))
885                         tt_global_del_struct(bat_priv, tt_global_entry,
886                                              message);
887
888                 goto out;
889         }
890
891         /* if we are deleting a global entry due to a roam
892          * event, there are two possibilities:
893          * 1) the client roamed from node A to node B => if there
894          *    is only one originator left for this client, we mark
895          *    it with TT_CLIENT_ROAM, we start a timer and we
896          *    wait for node B to claim it. In case of timeout
897          *    the entry is purged.
898          *
899          *    If there are other originators left, we directly delete
900          *    the originator.
901          * 2) the client roamed to us => we can directly delete
902          *    the global entry, since it is useless now.
903          */
904         tt_local_entry = tt_local_hash_find(bat_priv,
905                                             tt_global_entry->common.addr);
906         if (tt_local_entry) {
907                 /* local entry exists, case 2: client roamed to us. */
908                 tt_global_del_orig_list(tt_global_entry);
909                 tt_global_del_struct(bat_priv, tt_global_entry, message);
910         } else
911                 /* no local entry exists, case 1: check for roaming */
912                 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
913                                       message);
914
915
916 out:
917         if (tt_global_entry)
918                 tt_global_entry_free_ref(tt_global_entry);
919         if (tt_local_entry)
920                 tt_local_entry_free_ref(tt_local_entry);
921 }
922
923 void batadv_tt_global_del_orig(struct bat_priv *bat_priv,
924                                struct orig_node *orig_node, const char *message)
925 {
926         struct tt_global_entry *tt_global_entry;
927         struct tt_common_entry *tt_common_entry;
928         uint32_t i;
929         struct hashtable_t *hash = bat_priv->tt_global_hash;
930         struct hlist_node *node, *safe;
931         struct hlist_head *head;
932         spinlock_t *list_lock; /* protects write access to the hash lists */
933
934         if (!hash)
935                 return;
936
937         for (i = 0; i < hash->size; i++) {
938                 head = &hash->table[i];
939                 list_lock = &hash->list_locks[i];
940
941                 spin_lock_bh(list_lock);
942                 hlist_for_each_entry_safe(tt_common_entry, node, safe,
943                                           head, hash_entry) {
944                         tt_global_entry = container_of(tt_common_entry,
945                                                        struct tt_global_entry,
946                                                        common);
947
948                         tt_global_del_orig_entry(bat_priv, tt_global_entry,
949                                                  orig_node, message);
950
951                         if (hlist_empty(&tt_global_entry->orig_list)) {
952                                 bat_dbg(DBG_TT, bat_priv,
953                                         "Deleting global tt entry %pM: %s\n",
954                                         tt_global_entry->common.addr,
955                                         message);
956                                 hlist_del_rcu(node);
957                                 tt_global_entry_free_ref(tt_global_entry);
958                         }
959                 }
960                 spin_unlock_bh(list_lock);
961         }
962         orig_node->tt_initialised = false;
963 }
964
965 static void tt_global_roam_purge(struct bat_priv *bat_priv)
966 {
967         struct hashtable_t *hash = bat_priv->tt_global_hash;
968         struct tt_common_entry *tt_common_entry;
969         struct tt_global_entry *tt_global_entry;
970         struct hlist_node *node, *node_tmp;
971         struct hlist_head *head;
972         spinlock_t *list_lock; /* protects write access to the hash lists */
973         uint32_t i;
974
975         for (i = 0; i < hash->size; i++) {
976                 head = &hash->table[i];
977                 list_lock = &hash->list_locks[i];
978
979                 spin_lock_bh(list_lock);
980                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
981                                           head, hash_entry) {
982                         tt_global_entry = container_of(tt_common_entry,
983                                                        struct tt_global_entry,
984                                                        common);
985                         if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
986                                 continue;
987                         if (!has_timed_out(tt_global_entry->roam_at,
988                                            TT_CLIENT_ROAM_TIMEOUT))
989                                 continue;
990
991                         bat_dbg(DBG_TT, bat_priv,
992                                 "Deleting global tt entry (%pM): Roaming timeout\n",
993                                 tt_global_entry->common.addr);
994
995                         hlist_del_rcu(node);
996                         tt_global_entry_free_ref(tt_global_entry);
997                 }
998                 spin_unlock_bh(list_lock);
999         }
1000
1001 }
1002
1003 static void tt_global_table_free(struct bat_priv *bat_priv)
1004 {
1005         struct hashtable_t *hash;
1006         spinlock_t *list_lock; /* protects write access to the hash lists */
1007         struct tt_common_entry *tt_common_entry;
1008         struct tt_global_entry *tt_global_entry;
1009         struct hlist_node *node, *node_tmp;
1010         struct hlist_head *head;
1011         uint32_t i;
1012
1013         if (!bat_priv->tt_global_hash)
1014                 return;
1015
1016         hash = bat_priv->tt_global_hash;
1017
1018         for (i = 0; i < hash->size; i++) {
1019                 head = &hash->table[i];
1020                 list_lock = &hash->list_locks[i];
1021
1022                 spin_lock_bh(list_lock);
1023                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1024                                           head, hash_entry) {
1025                         hlist_del_rcu(node);
1026                         tt_global_entry = container_of(tt_common_entry,
1027                                                        struct tt_global_entry,
1028                                                        common);
1029                         tt_global_entry_free_ref(tt_global_entry);
1030                 }
1031                 spin_unlock_bh(list_lock);
1032         }
1033
1034         batadv_hash_destroy(hash);
1035
1036         bat_priv->tt_global_hash = NULL;
1037 }
1038
1039 static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
1040                             struct tt_global_entry *tt_global_entry)
1041 {
1042         bool ret = false;
1043
1044         if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
1045             tt_global_entry->common.flags & TT_CLIENT_WIFI)
1046                 ret = true;
1047
1048         return ret;
1049 }
1050
1051 struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv,
1052                                            const uint8_t *src,
1053                                            const uint8_t *addr)
1054 {
1055         struct tt_local_entry *tt_local_entry = NULL;
1056         struct tt_global_entry *tt_global_entry = NULL;
1057         struct orig_node *orig_node = NULL;
1058         struct neigh_node *router = NULL;
1059         struct hlist_head *head;
1060         struct hlist_node *node;
1061         struct tt_orig_list_entry *orig_entry;
1062         int best_tq;
1063
1064         if (src && atomic_read(&bat_priv->ap_isolation)) {
1065                 tt_local_entry = tt_local_hash_find(bat_priv, src);
1066                 if (!tt_local_entry)
1067                         goto out;
1068         }
1069
1070         tt_global_entry = tt_global_hash_find(bat_priv, addr);
1071         if (!tt_global_entry)
1072                 goto out;
1073
1074         /* check whether the clients should not communicate due to AP
1075          * isolation
1076          */
1077         if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
1078                 goto out;
1079
1080         best_tq = 0;
1081
1082         rcu_read_lock();
1083         head = &tt_global_entry->orig_list;
1084         hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1085                 router = batadv_orig_node_get_router(orig_entry->orig_node);
1086                 if (!router)
1087                         continue;
1088
1089                 if (router->tq_avg > best_tq) {
1090                         orig_node = orig_entry->orig_node;
1091                         best_tq = router->tq_avg;
1092                 }
1093                 batadv_neigh_node_free_ref(router);
1094         }
1095         /* found anything? */
1096         if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1097                 orig_node = NULL;
1098         rcu_read_unlock();
1099 out:
1100         if (tt_global_entry)
1101                 tt_global_entry_free_ref(tt_global_entry);
1102         if (tt_local_entry)
1103                 tt_local_entry_free_ref(tt_local_entry);
1104
1105         return orig_node;
1106 }
1107
1108 /* Calculates the checksum of the local table of a given orig_node */
1109 static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1110                               struct orig_node *orig_node)
1111 {
1112         uint16_t total = 0, total_one;
1113         struct hashtable_t *hash = bat_priv->tt_global_hash;
1114         struct tt_common_entry *tt_common_entry;
1115         struct tt_global_entry *tt_global_entry;
1116         struct hlist_node *node;
1117         struct hlist_head *head;
1118         uint32_t i;
1119         int j;
1120
1121         for (i = 0; i < hash->size; i++) {
1122                 head = &hash->table[i];
1123
1124                 rcu_read_lock();
1125                 hlist_for_each_entry_rcu(tt_common_entry, node,
1126                                          head, hash_entry) {
1127                         tt_global_entry = container_of(tt_common_entry,
1128                                                        struct tt_global_entry,
1129                                                        common);
1130                         /* Roaming clients are in the global table for
1131                          * consistency only. They don't have to be
1132                          * taken into account while computing the
1133                          * global crc
1134                          */
1135                         if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
1136                                 continue;
1137
1138                         /* find out if this global entry is announced by this
1139                          * originator
1140                          */
1141                         if (!tt_global_entry_has_orig(tt_global_entry,
1142                                                       orig_node))
1143                                 continue;
1144
1145                         total_one = 0;
1146                         for (j = 0; j < ETH_ALEN; j++)
1147                                 total_one = crc16_byte(total_one,
1148                                         tt_global_entry->common.addr[j]);
1149                         total ^= total_one;
1150                 }
1151                 rcu_read_unlock();
1152         }
1153
1154         return total;
1155 }
1156
1157 /* Calculates the checksum of the local table */
1158 static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv)
1159 {
1160         uint16_t total = 0, total_one;
1161         struct hashtable_t *hash = bat_priv->tt_local_hash;
1162         struct tt_common_entry *tt_common_entry;
1163         struct hlist_node *node;
1164         struct hlist_head *head;
1165         uint32_t i;
1166         int j;
1167
1168         for (i = 0; i < hash->size; i++) {
1169                 head = &hash->table[i];
1170
1171                 rcu_read_lock();
1172                 hlist_for_each_entry_rcu(tt_common_entry, node,
1173                                          head, hash_entry) {
1174                         /* not yet committed clients have not to be taken into
1175                          * account while computing the CRC
1176                          */
1177                         if (tt_common_entry->flags & TT_CLIENT_NEW)
1178                                 continue;
1179                         total_one = 0;
1180                         for (j = 0; j < ETH_ALEN; j++)
1181                                 total_one = crc16_byte(total_one,
1182                                                    tt_common_entry->addr[j]);
1183                         total ^= total_one;
1184                 }
1185                 rcu_read_unlock();
1186         }
1187
1188         return total;
1189 }
1190
1191 static void tt_req_list_free(struct bat_priv *bat_priv)
1192 {
1193         struct tt_req_node *node, *safe;
1194
1195         spin_lock_bh(&bat_priv->tt_req_list_lock);
1196
1197         list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1198                 list_del(&node->list);
1199                 kfree(node);
1200         }
1201
1202         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1203 }
1204
1205 static void tt_save_orig_buffer(struct bat_priv *bat_priv,
1206                                 struct orig_node *orig_node,
1207                                 const unsigned char *tt_buff,
1208                                 uint8_t tt_num_changes)
1209 {
1210         uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
1211
1212         /* Replace the old buffer only if I received something in the
1213          * last OGM (the OGM could carry no changes)
1214          */
1215         spin_lock_bh(&orig_node->tt_buff_lock);
1216         if (tt_buff_len > 0) {
1217                 kfree(orig_node->tt_buff);
1218                 orig_node->tt_buff_len = 0;
1219                 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1220                 if (orig_node->tt_buff) {
1221                         memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1222                         orig_node->tt_buff_len = tt_buff_len;
1223                 }
1224         }
1225         spin_unlock_bh(&orig_node->tt_buff_lock);
1226 }
1227
1228 static void tt_req_purge(struct bat_priv *bat_priv)
1229 {
1230         struct tt_req_node *node, *safe;
1231
1232         spin_lock_bh(&bat_priv->tt_req_list_lock);
1233         list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1234                 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
1235                         list_del(&node->list);
1236                         kfree(node);
1237                 }
1238         }
1239         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1240 }
1241
1242 /* returns the pointer to the new tt_req_node struct if no request
1243  * has already been issued for this orig_node, NULL otherwise
1244  */
1245 static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
1246                                           struct orig_node *orig_node)
1247 {
1248         struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1249
1250         spin_lock_bh(&bat_priv->tt_req_list_lock);
1251         list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1252                 if (compare_eth(tt_req_node_tmp, orig_node) &&
1253                     !has_timed_out(tt_req_node_tmp->issued_at,
1254                                    TT_REQUEST_TIMEOUT))
1255                         goto unlock;
1256         }
1257
1258         tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1259         if (!tt_req_node)
1260                 goto unlock;
1261
1262         memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1263         tt_req_node->issued_at = jiffies;
1264
1265         list_add(&tt_req_node->list, &bat_priv->tt_req_list);
1266 unlock:
1267         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1268         return tt_req_node;
1269 }
1270
1271 /* data_ptr is useless here, but has to be kept to respect the prototype */
1272 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
1273 {
1274         const struct tt_common_entry *tt_common_entry = entry_ptr;
1275
1276         if (tt_common_entry->flags & TT_CLIENT_NEW)
1277                 return 0;
1278         return 1;
1279 }
1280
1281 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1282 {
1283         const struct tt_common_entry *tt_common_entry = entry_ptr;
1284         const struct tt_global_entry *tt_global_entry;
1285         const struct orig_node *orig_node = data_ptr;
1286
1287         if (tt_common_entry->flags & TT_CLIENT_ROAM)
1288                 return 0;
1289
1290         tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1291                                        common);
1292
1293         return tt_global_entry_has_orig(tt_global_entry, orig_node);
1294 }
1295
1296 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1297                                               struct hashtable_t *hash,
1298                                               struct hard_iface *primary_if,
1299                                               int (*valid_cb)(const void *,
1300                                                               const void *),
1301                                               void *cb_data)
1302 {
1303         struct tt_common_entry *tt_common_entry;
1304         struct tt_query_packet *tt_response;
1305         struct tt_change *tt_change;
1306         struct hlist_node *node;
1307         struct hlist_head *head;
1308         struct sk_buff *skb = NULL;
1309         uint16_t tt_tot, tt_count;
1310         ssize_t tt_query_size = sizeof(struct tt_query_packet);
1311         uint32_t i;
1312
1313         if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1314                 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1315                 tt_len -= tt_len % sizeof(struct tt_change);
1316         }
1317         tt_tot = tt_len / sizeof(struct tt_change);
1318
1319         skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1320         if (!skb)
1321                 goto out;
1322
1323         skb_reserve(skb, ETH_HLEN);
1324         tt_response = (struct tt_query_packet *)skb_put(skb,
1325                                                      tt_query_size + tt_len);
1326         tt_response->ttvn = ttvn;
1327
1328         tt_change = (struct tt_change *)(skb->data + tt_query_size);
1329         tt_count = 0;
1330
1331         rcu_read_lock();
1332         for (i = 0; i < hash->size; i++) {
1333                 head = &hash->table[i];
1334
1335                 hlist_for_each_entry_rcu(tt_common_entry, node,
1336                                          head, hash_entry) {
1337                         if (tt_count == tt_tot)
1338                                 break;
1339
1340                         if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1341                                 continue;
1342
1343                         memcpy(tt_change->addr, tt_common_entry->addr,
1344                                ETH_ALEN);
1345                         tt_change->flags = NO_FLAGS;
1346
1347                         tt_count++;
1348                         tt_change++;
1349                 }
1350         }
1351         rcu_read_unlock();
1352
1353         /* store in the message the number of entries we have successfully
1354          * copied
1355          */
1356         tt_response->tt_data = htons(tt_count);
1357
1358 out:
1359         return skb;
1360 }
1361
1362 static int send_tt_request(struct bat_priv *bat_priv,
1363                            struct orig_node *dst_orig_node,
1364                            uint8_t ttvn, uint16_t tt_crc, bool full_table)
1365 {
1366         struct sk_buff *skb = NULL;
1367         struct tt_query_packet *tt_request;
1368         struct neigh_node *neigh_node = NULL;
1369         struct hard_iface *primary_if;
1370         struct tt_req_node *tt_req_node = NULL;
1371         int ret = 1;
1372
1373         primary_if = primary_if_get_selected(bat_priv);
1374         if (!primary_if)
1375                 goto out;
1376
1377         /* The new tt_req will be issued only if I'm not waiting for a
1378          * reply from the same orig_node yet
1379          */
1380         tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1381         if (!tt_req_node)
1382                 goto out;
1383
1384         skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1385         if (!skb)
1386                 goto out;
1387
1388         skb_reserve(skb, ETH_HLEN);
1389
1390         tt_request = (struct tt_query_packet *)skb_put(skb,
1391                                 sizeof(struct tt_query_packet));
1392
1393         tt_request->header.packet_type = BAT_TT_QUERY;
1394         tt_request->header.version = COMPAT_VERSION;
1395         memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1396         memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1397         tt_request->header.ttl = TTL;
1398         tt_request->ttvn = ttvn;
1399         tt_request->tt_data = htons(tt_crc);
1400         tt_request->flags = TT_REQUEST;
1401
1402         if (full_table)
1403                 tt_request->flags |= TT_FULL_TABLE;
1404
1405         neigh_node = batadv_orig_node_get_router(dst_orig_node);
1406         if (!neigh_node)
1407                 goto out;
1408
1409         bat_dbg(DBG_TT, bat_priv,
1410                 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1411                 dst_orig_node->orig, neigh_node->addr,
1412                 (full_table ? 'F' : '.'));
1413
1414         batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX);
1415
1416         batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1417         ret = 0;
1418
1419 out:
1420         if (neigh_node)
1421                 batadv_neigh_node_free_ref(neigh_node);
1422         if (primary_if)
1423                 hardif_free_ref(primary_if);
1424         if (ret)
1425                 kfree_skb(skb);
1426         if (ret && tt_req_node) {
1427                 spin_lock_bh(&bat_priv->tt_req_list_lock);
1428                 list_del(&tt_req_node->list);
1429                 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1430                 kfree(tt_req_node);
1431         }
1432         return ret;
1433 }
1434
1435 static bool send_other_tt_response(struct bat_priv *bat_priv,
1436                                    struct tt_query_packet *tt_request)
1437 {
1438         struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1439         struct neigh_node *neigh_node = NULL;
1440         struct hard_iface *primary_if = NULL;
1441         uint8_t orig_ttvn, req_ttvn, ttvn;
1442         int ret = false;
1443         unsigned char *tt_buff;
1444         bool full_table;
1445         uint16_t tt_len, tt_tot;
1446         struct sk_buff *skb = NULL;
1447         struct tt_query_packet *tt_response;
1448
1449         bat_dbg(DBG_TT, bat_priv,
1450                 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1451                 tt_request->src, tt_request->ttvn, tt_request->dst,
1452                 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1453
1454         /* Let's get the orig node of the REAL destination */
1455         req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
1456         if (!req_dst_orig_node)
1457                 goto out;
1458
1459         res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
1460         if (!res_dst_orig_node)
1461                 goto out;
1462
1463         neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1464         if (!neigh_node)
1465                 goto out;
1466
1467         primary_if = primary_if_get_selected(bat_priv);
1468         if (!primary_if)
1469                 goto out;
1470
1471         orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1472         req_ttvn = tt_request->ttvn;
1473
1474         /* I don't have the requested data */
1475         if (orig_ttvn != req_ttvn ||
1476             tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1477                 goto out;
1478
1479         /* If the full table has been explicitly requested */
1480         if (tt_request->flags & TT_FULL_TABLE ||
1481             !req_dst_orig_node->tt_buff)
1482                 full_table = true;
1483         else
1484                 full_table = false;
1485
1486         /* In this version, fragmentation is not implemented, then
1487          * I'll send only one packet with as much TT entries as I can
1488          */
1489         if (!full_table) {
1490                 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1491                 tt_len = req_dst_orig_node->tt_buff_len;
1492                 tt_tot = tt_len / sizeof(struct tt_change);
1493
1494                 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1495                                     tt_len + ETH_HLEN);
1496                 if (!skb)
1497                         goto unlock;
1498
1499                 skb_reserve(skb, ETH_HLEN);
1500                 tt_response = (struct tt_query_packet *)skb_put(skb,
1501                                 sizeof(struct tt_query_packet) + tt_len);
1502                 tt_response->ttvn = req_ttvn;
1503                 tt_response->tt_data = htons(tt_tot);
1504
1505                 tt_buff = skb->data + sizeof(struct tt_query_packet);
1506                 /* Copy the last orig_node's OGM buffer */
1507                 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1508                        req_dst_orig_node->tt_buff_len);
1509
1510                 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1511         } else {
1512                 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1513                                                 sizeof(struct tt_change);
1514                 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1515
1516                 skb = tt_response_fill_table(tt_len, ttvn,
1517                                              bat_priv->tt_global_hash,
1518                                              primary_if, tt_global_valid_entry,
1519                                              req_dst_orig_node);
1520                 if (!skb)
1521                         goto out;
1522
1523                 tt_response = (struct tt_query_packet *)skb->data;
1524         }
1525
1526         tt_response->header.packet_type = BAT_TT_QUERY;
1527         tt_response->header.version = COMPAT_VERSION;
1528         tt_response->header.ttl = TTL;
1529         memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1530         memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1531         tt_response->flags = TT_RESPONSE;
1532
1533         if (full_table)
1534                 tt_response->flags |= TT_FULL_TABLE;
1535
1536         bat_dbg(DBG_TT, bat_priv,
1537                 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1538                 res_dst_orig_node->orig, neigh_node->addr,
1539                 req_dst_orig_node->orig, req_ttvn);
1540
1541         batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1542
1543         batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1544         ret = true;
1545         goto out;
1546
1547 unlock:
1548         spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1549
1550 out:
1551         if (res_dst_orig_node)
1552                 batadv_orig_node_free_ref(res_dst_orig_node);
1553         if (req_dst_orig_node)
1554                 batadv_orig_node_free_ref(req_dst_orig_node);
1555         if (neigh_node)
1556                 batadv_neigh_node_free_ref(neigh_node);
1557         if (primary_if)
1558                 hardif_free_ref(primary_if);
1559         if (!ret)
1560                 kfree_skb(skb);
1561         return ret;
1562
1563 }
1564 static bool send_my_tt_response(struct bat_priv *bat_priv,
1565                                 struct tt_query_packet *tt_request)
1566 {
1567         struct orig_node *orig_node = NULL;
1568         struct neigh_node *neigh_node = NULL;
1569         struct hard_iface *primary_if = NULL;
1570         uint8_t my_ttvn, req_ttvn, ttvn;
1571         int ret = false;
1572         unsigned char *tt_buff;
1573         bool full_table;
1574         uint16_t tt_len, tt_tot;
1575         struct sk_buff *skb = NULL;
1576         struct tt_query_packet *tt_response;
1577
1578         bat_dbg(DBG_TT, bat_priv,
1579                 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1580                 tt_request->src, tt_request->ttvn,
1581                 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1582
1583
1584         my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1585         req_ttvn = tt_request->ttvn;
1586
1587         orig_node = orig_hash_find(bat_priv, tt_request->src);
1588         if (!orig_node)
1589                 goto out;
1590
1591         neigh_node = batadv_orig_node_get_router(orig_node);
1592         if (!neigh_node)
1593                 goto out;
1594
1595         primary_if = primary_if_get_selected(bat_priv);
1596         if (!primary_if)
1597                 goto out;
1598
1599         /* If the full table has been explicitly requested or the gap
1600          * is too big send the whole local translation table
1601          */
1602         if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1603             !bat_priv->tt_buff)
1604                 full_table = true;
1605         else
1606                 full_table = false;
1607
1608         /* In this version, fragmentation is not implemented, then
1609          * I'll send only one packet with as much TT entries as I can
1610          */
1611         if (!full_table) {
1612                 spin_lock_bh(&bat_priv->tt_buff_lock);
1613                 tt_len = bat_priv->tt_buff_len;
1614                 tt_tot = tt_len / sizeof(struct tt_change);
1615
1616                 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1617                                     tt_len + ETH_HLEN);
1618                 if (!skb)
1619                         goto unlock;
1620
1621                 skb_reserve(skb, ETH_HLEN);
1622                 tt_response = (struct tt_query_packet *)skb_put(skb,
1623                                 sizeof(struct tt_query_packet) + tt_len);
1624                 tt_response->ttvn = req_ttvn;
1625                 tt_response->tt_data = htons(tt_tot);
1626
1627                 tt_buff = skb->data + sizeof(struct tt_query_packet);
1628                 memcpy(tt_buff, bat_priv->tt_buff,
1629                        bat_priv->tt_buff_len);
1630                 spin_unlock_bh(&bat_priv->tt_buff_lock);
1631         } else {
1632                 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1633                                                 sizeof(struct tt_change);
1634                 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1635
1636                 skb = tt_response_fill_table(tt_len, ttvn,
1637                                              bat_priv->tt_local_hash,
1638                                              primary_if, tt_local_valid_entry,
1639                                              NULL);
1640                 if (!skb)
1641                         goto out;
1642
1643                 tt_response = (struct tt_query_packet *)skb->data;
1644         }
1645
1646         tt_response->header.packet_type = BAT_TT_QUERY;
1647         tt_response->header.version = COMPAT_VERSION;
1648         tt_response->header.ttl = TTL;
1649         memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1650         memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1651         tt_response->flags = TT_RESPONSE;
1652
1653         if (full_table)
1654                 tt_response->flags |= TT_FULL_TABLE;
1655
1656         bat_dbg(DBG_TT, bat_priv,
1657                 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1658                 orig_node->orig, neigh_node->addr,
1659                 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1660
1661         batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1662
1663         batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1664         ret = true;
1665         goto out;
1666
1667 unlock:
1668         spin_unlock_bh(&bat_priv->tt_buff_lock);
1669 out:
1670         if (orig_node)
1671                 batadv_orig_node_free_ref(orig_node);
1672         if (neigh_node)
1673                 batadv_neigh_node_free_ref(neigh_node);
1674         if (primary_if)
1675                 hardif_free_ref(primary_if);
1676         if (!ret)
1677                 kfree_skb(skb);
1678         /* This packet was for me, so it doesn't need to be re-routed */
1679         return true;
1680 }
1681
1682 bool batadv_send_tt_response(struct bat_priv *bat_priv,
1683                              struct tt_query_packet *tt_request)
1684 {
1685         if (batadv_is_my_mac(tt_request->dst)) {
1686                 /* don't answer backbone gws! */
1687                 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1688                         return true;
1689
1690                 return send_my_tt_response(bat_priv, tt_request);
1691         } else {
1692                 return send_other_tt_response(bat_priv, tt_request);
1693         }
1694 }
1695
1696 static void _tt_update_changes(struct bat_priv *bat_priv,
1697                                struct orig_node *orig_node,
1698                                struct tt_change *tt_change,
1699                                uint16_t tt_num_changes, uint8_t ttvn)
1700 {
1701         int i;
1702         int is_wifi;
1703
1704         for (i = 0; i < tt_num_changes; i++) {
1705                 if ((tt_change + i)->flags & TT_CLIENT_DEL) {
1706                         tt_global_del(bat_priv, orig_node,
1707                                       (tt_change + i)->addr,
1708                                       "tt removed by changes",
1709                                       (tt_change + i)->flags & TT_CLIENT_ROAM);
1710                 } else {
1711                         is_wifi = (tt_change + i)->flags & TT_CLIENT_WIFI;
1712                         if (!batadv_tt_global_add(bat_priv, orig_node,
1713                                                   (tt_change + i)->addr, ttvn,
1714                                                   false, is_wifi))
1715                                 /* In case of problem while storing a
1716                                  * global_entry, we stop the updating
1717                                  * procedure without committing the
1718                                  * ttvn change. This will avoid to send
1719                                  * corrupted data on tt_request
1720                                  */
1721                                 return;
1722                 }
1723         }
1724         orig_node->tt_initialised = true;
1725 }
1726
1727 static void tt_fill_gtable(struct bat_priv *bat_priv,
1728                            struct tt_query_packet *tt_response)
1729 {
1730         struct orig_node *orig_node = NULL;
1731
1732         orig_node = orig_hash_find(bat_priv, tt_response->src);
1733         if (!orig_node)
1734                 goto out;
1735
1736         /* Purge the old table first.. */
1737         batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
1738
1739         _tt_update_changes(bat_priv, orig_node,
1740                            (struct tt_change *)(tt_response + 1),
1741                            ntohs(tt_response->tt_data), tt_response->ttvn);
1742
1743         spin_lock_bh(&orig_node->tt_buff_lock);
1744         kfree(orig_node->tt_buff);
1745         orig_node->tt_buff_len = 0;
1746         orig_node->tt_buff = NULL;
1747         spin_unlock_bh(&orig_node->tt_buff_lock);
1748
1749         atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1750
1751 out:
1752         if (orig_node)
1753                 batadv_orig_node_free_ref(orig_node);
1754 }
1755
1756 static void tt_update_changes(struct bat_priv *bat_priv,
1757                               struct orig_node *orig_node,
1758                               uint16_t tt_num_changes, uint8_t ttvn,
1759                               struct tt_change *tt_change)
1760 {
1761         _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1762                            ttvn);
1763
1764         tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1765                             tt_num_changes);
1766         atomic_set(&orig_node->last_ttvn, ttvn);
1767 }
1768
1769 bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1770 {
1771         struct tt_local_entry *tt_local_entry = NULL;
1772         bool ret = false;
1773
1774         tt_local_entry = tt_local_hash_find(bat_priv, addr);
1775         if (!tt_local_entry)
1776                 goto out;
1777         /* Check if the client has been logically deleted (but is kept for
1778          * consistency purpose)
1779          */
1780         if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
1781                 goto out;
1782         ret = true;
1783 out:
1784         if (tt_local_entry)
1785                 tt_local_entry_free_ref(tt_local_entry);
1786         return ret;
1787 }
1788
1789 void batadv_handle_tt_response(struct bat_priv *bat_priv,
1790                                struct tt_query_packet *tt_response)
1791 {
1792         struct tt_req_node *node, *safe;
1793         struct orig_node *orig_node = NULL;
1794
1795         bat_dbg(DBG_TT, bat_priv,
1796                 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1797                 tt_response->src, tt_response->ttvn,
1798                 ntohs(tt_response->tt_data),
1799                 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1800
1801         /* we should have never asked a backbone gw */
1802         if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1803                 goto out;
1804
1805         orig_node = orig_hash_find(bat_priv, tt_response->src);
1806         if (!orig_node)
1807                 goto out;
1808
1809         if (tt_response->flags & TT_FULL_TABLE)
1810                 tt_fill_gtable(bat_priv, tt_response);
1811         else
1812                 tt_update_changes(bat_priv, orig_node,
1813                                   ntohs(tt_response->tt_data),
1814                                   tt_response->ttvn,
1815                                   (struct tt_change *)(tt_response + 1));
1816
1817         /* Delete the tt_req_node from pending tt_requests list */
1818         spin_lock_bh(&bat_priv->tt_req_list_lock);
1819         list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1820                 if (!compare_eth(node->addr, tt_response->src))
1821                         continue;
1822                 list_del(&node->list);
1823                 kfree(node);
1824         }
1825         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1826
1827         /* Recalculate the CRC for this orig_node and store it */
1828         orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1829         /* Roaming phase is over: tables are in sync again. I can
1830          * unset the flag
1831          */
1832         orig_node->tt_poss_change = false;
1833 out:
1834         if (orig_node)
1835                 batadv_orig_node_free_ref(orig_node);
1836 }
1837
1838 int batadv_tt_init(struct bat_priv *bat_priv)
1839 {
1840         int ret;
1841
1842         ret = tt_local_init(bat_priv);
1843         if (ret < 0)
1844                 return ret;
1845
1846         ret = tt_global_init(bat_priv);
1847         if (ret < 0)
1848                 return ret;
1849
1850         tt_start_timer(bat_priv);
1851
1852         return 1;
1853 }
1854
1855 static void tt_roam_list_free(struct bat_priv *bat_priv)
1856 {
1857         struct tt_roam_node *node, *safe;
1858
1859         spin_lock_bh(&bat_priv->tt_roam_list_lock);
1860
1861         list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1862                 list_del(&node->list);
1863                 kfree(node);
1864         }
1865
1866         spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1867 }
1868
1869 static void tt_roam_purge(struct bat_priv *bat_priv)
1870 {
1871         struct tt_roam_node *node, *safe;
1872
1873         spin_lock_bh(&bat_priv->tt_roam_list_lock);
1874         list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1875                 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
1876                         continue;
1877
1878                 list_del(&node->list);
1879                 kfree(node);
1880         }
1881         spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1882 }
1883
1884 /* This function checks whether the client already reached the
1885  * maximum number of possible roaming phases. In this case the ROAMING_ADV
1886  * will not be sent.
1887  *
1888  * returns true if the ROAMING_ADV can be sent, false otherwise
1889  */
1890 static bool tt_check_roam_count(struct bat_priv *bat_priv,
1891                                 uint8_t *client)
1892 {
1893         struct tt_roam_node *tt_roam_node;
1894         bool ret = false;
1895
1896         spin_lock_bh(&bat_priv->tt_roam_list_lock);
1897         /* The new tt_req will be issued only if I'm not waiting for a
1898          * reply from the same orig_node yet
1899          */
1900         list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1901                 if (!compare_eth(tt_roam_node->addr, client))
1902                         continue;
1903
1904                 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
1905                         continue;
1906
1907                 if (!atomic_dec_not_zero(&tt_roam_node->counter))
1908                         /* Sorry, you roamed too many times! */
1909                         goto unlock;
1910                 ret = true;
1911                 break;
1912         }
1913
1914         if (!ret) {
1915                 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1916                 if (!tt_roam_node)
1917                         goto unlock;
1918
1919                 tt_roam_node->first_time = jiffies;
1920                 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1921                 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1922
1923                 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1924                 ret = true;
1925         }
1926
1927 unlock:
1928         spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1929         return ret;
1930 }
1931
1932 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1933                           struct orig_node *orig_node)
1934 {
1935         struct neigh_node *neigh_node = NULL;
1936         struct sk_buff *skb = NULL;
1937         struct roam_adv_packet *roam_adv_packet;
1938         int ret = 1;
1939         struct hard_iface *primary_if;
1940
1941         /* before going on we have to check whether the client has
1942          * already roamed to us too many times
1943          */
1944         if (!tt_check_roam_count(bat_priv, client))
1945                 goto out;
1946
1947         skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1948         if (!skb)
1949                 goto out;
1950
1951         skb_reserve(skb, ETH_HLEN);
1952
1953         roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1954                                         sizeof(struct roam_adv_packet));
1955
1956         roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
1957         roam_adv_packet->header.version = COMPAT_VERSION;
1958         roam_adv_packet->header.ttl = TTL;
1959         primary_if = primary_if_get_selected(bat_priv);
1960         if (!primary_if)
1961                 goto out;
1962         memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1963         hardif_free_ref(primary_if);
1964         memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1965         memcpy(roam_adv_packet->client, client, ETH_ALEN);
1966
1967         neigh_node = batadv_orig_node_get_router(orig_node);
1968         if (!neigh_node)
1969                 goto out;
1970
1971         bat_dbg(DBG_TT, bat_priv,
1972                 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1973                 orig_node->orig, client, neigh_node->addr);
1974
1975         batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX);
1976
1977         batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1978         ret = 0;
1979
1980 out:
1981         if (neigh_node)
1982                 batadv_neigh_node_free_ref(neigh_node);
1983         if (ret)
1984                 kfree_skb(skb);
1985         return;
1986 }
1987
1988 static void tt_purge(struct work_struct *work)
1989 {
1990         struct delayed_work *delayed_work =
1991                 container_of(work, struct delayed_work, work);
1992         struct bat_priv *bat_priv =
1993                 container_of(delayed_work, struct bat_priv, tt_work);
1994
1995         tt_local_purge(bat_priv);
1996         tt_global_roam_purge(bat_priv);
1997         tt_req_purge(bat_priv);
1998         tt_roam_purge(bat_priv);
1999
2000         tt_start_timer(bat_priv);
2001 }
2002
2003 void batadv_tt_free(struct bat_priv *bat_priv)
2004 {
2005         cancel_delayed_work_sync(&bat_priv->tt_work);
2006
2007         tt_local_table_free(bat_priv);
2008         tt_global_table_free(bat_priv);
2009         tt_req_list_free(bat_priv);
2010         tt_changes_list_free(bat_priv);
2011         tt_roam_list_free(bat_priv);
2012
2013         kfree(bat_priv->tt_buff);
2014 }
2015
2016 /* This function will enable or disable the specified flags for all the entries
2017  * in the given hash table and returns the number of modified entries
2018  */
2019 static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
2020                              bool enable)
2021 {
2022         uint32_t i;
2023         uint16_t changed_num = 0;
2024         struct hlist_head *head;
2025         struct hlist_node *node;
2026         struct tt_common_entry *tt_common_entry;
2027
2028         if (!hash)
2029                 goto out;
2030
2031         for (i = 0; i < hash->size; i++) {
2032                 head = &hash->table[i];
2033
2034                 rcu_read_lock();
2035                 hlist_for_each_entry_rcu(tt_common_entry, node,
2036                                          head, hash_entry) {
2037                         if (enable) {
2038                                 if ((tt_common_entry->flags & flags) == flags)
2039                                         continue;
2040                                 tt_common_entry->flags |= flags;
2041                         } else {
2042                                 if (!(tt_common_entry->flags & flags))
2043                                         continue;
2044                                 tt_common_entry->flags &= ~flags;
2045                         }
2046                         changed_num++;
2047                 }
2048                 rcu_read_unlock();
2049         }
2050 out:
2051         return changed_num;
2052 }
2053
2054 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
2055 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
2056 {
2057         struct hashtable_t *hash = bat_priv->tt_local_hash;
2058         struct tt_common_entry *tt_common_entry;
2059         struct tt_local_entry *tt_local_entry;
2060         struct hlist_node *node, *node_tmp;
2061         struct hlist_head *head;
2062         spinlock_t *list_lock; /* protects write access to the hash lists */
2063         uint32_t i;
2064
2065         if (!hash)
2066                 return;
2067
2068         for (i = 0; i < hash->size; i++) {
2069                 head = &hash->table[i];
2070                 list_lock = &hash->list_locks[i];
2071
2072                 spin_lock_bh(list_lock);
2073                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
2074                                           head, hash_entry) {
2075                         if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
2076                                 continue;
2077
2078                         bat_dbg(DBG_TT, bat_priv,
2079                                 "Deleting local tt entry (%pM): pending\n",
2080                                 tt_common_entry->addr);
2081
2082                         atomic_dec(&bat_priv->num_local_tt);
2083                         hlist_del_rcu(node);
2084                         tt_local_entry = container_of(tt_common_entry,
2085                                                       struct tt_local_entry,
2086                                                       common);
2087                         tt_local_entry_free_ref(tt_local_entry);
2088                 }
2089                 spin_unlock_bh(list_lock);
2090         }
2091
2092 }
2093
2094 static int tt_commit_changes(struct bat_priv *bat_priv,
2095                              unsigned char **packet_buff, int *packet_buff_len,
2096                              int packet_min_len)
2097 {
2098         uint16_t changed_num = 0;
2099
2100         if (atomic_read(&bat_priv->tt_local_changes) < 1)
2101                 return -ENOENT;
2102
2103         changed_num = tt_set_flags(bat_priv->tt_local_hash,
2104                                    TT_CLIENT_NEW, false);
2105
2106         /* all reset entries have to be counted as local entries */
2107         atomic_add(changed_num, &bat_priv->num_local_tt);
2108         tt_local_purge_pending_clients(bat_priv);
2109         bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
2110
2111         /* Increment the TTVN only once per OGM interval */
2112         atomic_inc(&bat_priv->ttvn);
2113         bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2114                 (uint8_t)atomic_read(&bat_priv->ttvn));
2115         bat_priv->tt_poss_change = false;
2116
2117         /* reset the sending counter */
2118         atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
2119
2120         return tt_changes_fill_buff(bat_priv, packet_buff,
2121                                     packet_buff_len, packet_min_len);
2122 }
2123
2124 /* when calling this function (hard_iface == primary_if) has to be true */
2125 int batadv_tt_append_diff(struct bat_priv *bat_priv,
2126                           unsigned char **packet_buff, int *packet_buff_len,
2127                           int packet_min_len)
2128 {
2129         int tt_num_changes;
2130
2131         /* if at least one change happened */
2132         tt_num_changes = tt_commit_changes(bat_priv, packet_buff,
2133                                            packet_buff_len, packet_min_len);
2134
2135         /* if the changes have been sent often enough */
2136         if ((tt_num_changes < 0) &&
2137             (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2138                 tt_realloc_packet_buff(packet_buff, packet_buff_len,
2139                                        packet_min_len, packet_min_len);
2140                 tt_num_changes = 0;
2141         }
2142
2143         return tt_num_changes;
2144 }
2145
2146 bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src,
2147                            uint8_t *dst)
2148 {
2149         struct tt_local_entry *tt_local_entry = NULL;
2150         struct tt_global_entry *tt_global_entry = NULL;
2151         bool ret = false;
2152
2153         if (!atomic_read(&bat_priv->ap_isolation))
2154                 goto out;
2155
2156         tt_local_entry = tt_local_hash_find(bat_priv, dst);
2157         if (!tt_local_entry)
2158                 goto out;
2159
2160         tt_global_entry = tt_global_hash_find(bat_priv, src);
2161         if (!tt_global_entry)
2162                 goto out;
2163
2164         if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
2165                 goto out;
2166
2167         ret = true;
2168
2169 out:
2170         if (tt_global_entry)
2171                 tt_global_entry_free_ref(tt_global_entry);
2172         if (tt_local_entry)
2173                 tt_local_entry_free_ref(tt_local_entry);
2174         return ret;
2175 }
2176
2177 void batadv_tt_update_orig(struct bat_priv *bat_priv,
2178                            struct orig_node *orig_node,
2179                            const unsigned char *tt_buff, uint8_t tt_num_changes,
2180                            uint8_t ttvn, uint16_t tt_crc)
2181 {
2182         uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2183         bool full_table = true;
2184
2185         /* don't care about a backbone gateways updates. */
2186         if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2187                 return;
2188
2189         /* orig table not initialised AND first diff is in the OGM OR the ttvn
2190          * increased by one -> we can apply the attached changes
2191          */
2192         if ((!orig_node->tt_initialised && ttvn == 1) ||
2193             ttvn - orig_ttvn == 1) {
2194                 /* the OGM could not contain the changes due to their size or
2195                  * because they have already been sent TT_OGM_APPEND_MAX times.
2196                  * In this case send a tt request
2197                  */
2198                 if (!tt_num_changes) {
2199                         full_table = false;
2200                         goto request_table;
2201                 }
2202
2203                 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
2204                                   (struct tt_change *)tt_buff);
2205
2206                 /* Even if we received the precomputed crc with the OGM, we
2207                  * prefer to recompute it to spot any possible inconsistency
2208                  * in the global table
2209                  */
2210                 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
2211
2212                 /* The ttvn alone is not enough to guarantee consistency
2213                  * because a single value could represent different states
2214                  * (due to the wrap around). Thus a node has to check whether
2215                  * the resulting table (after applying the changes) is still
2216                  * consistent or not. E.g. a node could disconnect while its
2217                  * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2218                  * checking the CRC value is mandatory to detect the
2219                  * inconsistency
2220                  */
2221                 if (orig_node->tt_crc != tt_crc)
2222                         goto request_table;
2223
2224                 /* Roaming phase is over: tables are in sync again. I can
2225                  * unset the flag
2226                  */
2227                 orig_node->tt_poss_change = false;
2228         } else {
2229                 /* if we missed more than one change or our tables are not
2230                  * in sync anymore -> request fresh tt data
2231                  */
2232                 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2233                     orig_node->tt_crc != tt_crc) {
2234 request_table:
2235                         bat_dbg(DBG_TT, bat_priv,
2236                                 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2237                                 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2238                                 orig_node->tt_crc, tt_num_changes);
2239                         send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
2240                                         full_table);
2241                         return;
2242                 }
2243         }
2244 }
2245
2246 /* returns true whether we know that the client has moved from its old
2247  * originator to another one. This entry is kept is still kept for consistency
2248  * purposes
2249  */
2250 bool batadv_tt_global_client_is_roaming(struct bat_priv *bat_priv,
2251                                         uint8_t *addr)
2252 {
2253         struct tt_global_entry *tt_global_entry;
2254         bool ret = false;
2255
2256         tt_global_entry = tt_global_hash_find(bat_priv, addr);
2257         if (!tt_global_entry)
2258                 goto out;
2259
2260         ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
2261         tt_global_entry_free_ref(tt_global_entry);
2262 out:
2263         return ret;
2264 }