2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "icmp_socket.h"
29 #include "translation-table.h"
30 #include "originator.h"
31 #include "ring_buffer.h"
33 #include "aggregation.h"
34 #include "gateway_common.h"
35 #include "gateway_client.h"
38 void slide_own_bcast_window(struct batman_if *batman_if)
40 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
41 struct hashtable_t *hash = bat_priv->orig_hash;
42 struct hlist_node *walk;
43 struct hlist_head *head;
44 struct element_t *bucket;
45 struct orig_node *orig_node;
50 spin_lock_bh(&bat_priv->orig_hash_lock);
52 for (i = 0; i < hash->size; i++) {
53 head = &hash->table[i];
56 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
57 orig_node = bucket->data;
58 word_index = batman_if->if_num * NUM_WORDS;
59 word = &(orig_node->bcast_own[word_index]);
61 bit_get_packet(bat_priv, word, 1, 0);
62 orig_node->bcast_own_sum[batman_if->if_num] =
63 bit_packet_count(word);
68 spin_unlock_bh(&bat_priv->orig_hash_lock);
71 static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
72 unsigned char *hna_buff, int hna_buff_len)
74 if ((hna_buff_len != orig_node->hna_buff_len) ||
75 ((hna_buff_len > 0) &&
76 (orig_node->hna_buff_len > 0) &&
77 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
79 if (orig_node->hna_buff_len > 0)
80 hna_global_del_orig(bat_priv, orig_node,
81 "originator changed hna");
83 if ((hna_buff_len > 0) && (hna_buff))
84 hna_global_add_orig(bat_priv, orig_node,
85 hna_buff, hna_buff_len);
89 static void update_route(struct bat_priv *bat_priv,
90 struct orig_node *orig_node,
91 struct neigh_node *neigh_node,
92 unsigned char *hna_buff, int hna_buff_len)
94 struct neigh_node *neigh_node_tmp;
97 if ((orig_node->router) && (!neigh_node)) {
99 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
101 hna_global_del_orig(bat_priv, orig_node,
102 "originator timed out");
105 } else if ((!orig_node->router) && (neigh_node)) {
107 bat_dbg(DBG_ROUTES, bat_priv,
108 "Adding route towards: %pM (via %pM)\n",
109 orig_node->orig, neigh_node->addr);
110 hna_global_add_orig(bat_priv, orig_node,
111 hna_buff, hna_buff_len);
115 bat_dbg(DBG_ROUTES, bat_priv,
116 "Changing route towards: %pM "
117 "(now via %pM - was via %pM)\n",
118 orig_node->orig, neigh_node->addr,
119 orig_node->router->addr);
123 kref_get(&neigh_node->refcount);
124 neigh_node_tmp = orig_node->router;
125 orig_node->router = neigh_node;
127 kref_put(&neigh_node_tmp->refcount, neigh_node_free_ref);
131 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
132 struct neigh_node *neigh_node, unsigned char *hna_buff,
139 if (orig_node->router != neigh_node)
140 update_route(bat_priv, orig_node, neigh_node,
141 hna_buff, hna_buff_len);
142 /* may be just HNA changed */
144 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
147 static int is_bidirectional_neigh(struct orig_node *orig_node,
148 struct orig_node *orig_neigh_node,
149 struct batman_packet *batman_packet,
150 struct batman_if *if_incoming)
152 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
153 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
154 struct hlist_node *node;
155 unsigned char total_count;
158 if (orig_node == orig_neigh_node) {
160 hlist_for_each_entry_rcu(tmp_neigh_node, node,
161 &orig_node->neigh_list, list) {
163 if (compare_orig(tmp_neigh_node->addr,
164 orig_neigh_node->orig) &&
165 (tmp_neigh_node->if_incoming == if_incoming))
166 neigh_node = tmp_neigh_node;
170 neigh_node = create_neighbor(orig_node,
172 orig_neigh_node->orig,
174 /* create_neighbor failed, return 0 */
178 kref_get(&neigh_node->refcount);
181 neigh_node->last_valid = jiffies;
183 /* find packet count of corresponding one hop neighbor */
185 hlist_for_each_entry_rcu(tmp_neigh_node, node,
186 &orig_neigh_node->neigh_list, list) {
188 if (compare_orig(tmp_neigh_node->addr,
189 orig_neigh_node->orig) &&
190 (tmp_neigh_node->if_incoming == if_incoming))
191 neigh_node = tmp_neigh_node;
195 neigh_node = create_neighbor(orig_neigh_node,
197 orig_neigh_node->orig,
199 /* create_neighbor failed, return 0 */
203 kref_get(&neigh_node->refcount);
207 orig_node->last_valid = jiffies;
209 /* pay attention to not get a value bigger than 100 % */
210 total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
211 neigh_node->real_packet_count ?
212 neigh_node->real_packet_count :
213 orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
215 /* if we have too few packets (too less data) we set tq_own to zero */
216 /* if we receive too few packets it is not considered bidirectional */
217 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
218 (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
219 orig_neigh_node->tq_own = 0;
221 /* neigh_node->real_packet_count is never zero as we
222 * only purge old information when getting new
224 orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
225 neigh_node->real_packet_count;
228 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
229 * affect the nearly-symmetric links only a little, but
230 * punishes asymmetric links more. This will give a value
231 * between 0 and TQ_MAX_VALUE
233 orig_neigh_node->tq_asym_penalty =
236 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
237 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
238 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
239 (TQ_LOCAL_WINDOW_SIZE *
240 TQ_LOCAL_WINDOW_SIZE *
241 TQ_LOCAL_WINDOW_SIZE);
243 batman_packet->tq = ((batman_packet->tq *
244 orig_neigh_node->tq_own *
245 orig_neigh_node->tq_asym_penalty) /
246 (TQ_MAX_VALUE * TQ_MAX_VALUE));
248 bat_dbg(DBG_BATMAN, bat_priv,
250 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
251 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
253 orig_node->orig, orig_neigh_node->orig, total_count,
254 neigh_node->real_packet_count, orig_neigh_node->tq_own,
255 orig_neigh_node->tq_asym_penalty, batman_packet->tq);
257 /* if link has the minimum required transmission quality
258 * consider it bidirectional */
259 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
268 kref_put(&neigh_node->refcount, neigh_node_free_ref);
272 static void update_orig(struct bat_priv *bat_priv,
273 struct orig_node *orig_node,
274 struct ethhdr *ethhdr,
275 struct batman_packet *batman_packet,
276 struct batman_if *if_incoming,
277 unsigned char *hna_buff, int hna_buff_len,
280 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
281 struct hlist_node *node;
282 int tmp_hna_buff_len;
284 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
285 "Searching and updating originator entry of received packet\n");
288 hlist_for_each_entry_rcu(tmp_neigh_node, node,
289 &orig_node->neigh_list, list) {
290 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
291 (tmp_neigh_node->if_incoming == if_incoming)) {
292 neigh_node = tmp_neigh_node;
299 ring_buffer_set(tmp_neigh_node->tq_recv,
300 &tmp_neigh_node->tq_index, 0);
301 tmp_neigh_node->tq_avg =
302 ring_buffer_avg(tmp_neigh_node->tq_recv);
306 struct orig_node *orig_tmp;
308 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
312 neigh_node = create_neighbor(orig_node, orig_tmp,
313 ethhdr->h_source, if_incoming);
315 kref_put(&orig_tmp->refcount, orig_node_free_ref);
319 bat_dbg(DBG_BATMAN, bat_priv,
320 "Updating existing last-hop neighbor of originator\n");
322 kref_get(&neigh_node->refcount);
325 orig_node->flags = batman_packet->flags;
326 neigh_node->last_valid = jiffies;
328 ring_buffer_set(neigh_node->tq_recv,
329 &neigh_node->tq_index,
331 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
334 orig_node->last_ttl = batman_packet->ttl;
335 neigh_node->last_ttl = batman_packet->ttl;
338 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
339 batman_packet->num_hna * ETH_ALEN : hna_buff_len);
341 /* if this neighbor already is our next hop there is nothing
343 if (orig_node->router == neigh_node)
346 /* if this neighbor does not offer a better TQ we won't consider it */
347 if ((orig_node->router) &&
348 (orig_node->router->tq_avg > neigh_node->tq_avg))
351 /* if the TQ is the same and the link not more symetric we
352 * won't consider it either */
353 if ((orig_node->router) &&
354 ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
355 (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
356 >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
359 update_routes(bat_priv, orig_node, neigh_node,
360 hna_buff, tmp_hna_buff_len);
364 update_routes(bat_priv, orig_node, orig_node->router,
365 hna_buff, tmp_hna_buff_len);
368 if (orig_node->gw_flags != batman_packet->gw_flags)
369 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
371 orig_node->gw_flags = batman_packet->gw_flags;
373 /* restart gateway selection if fast or late switching was enabled */
374 if ((orig_node->gw_flags) &&
375 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
376 (atomic_read(&bat_priv->gw_sel_class) > 2))
377 gw_check_election(bat_priv, orig_node);
385 kref_put(&neigh_node->refcount, neigh_node_free_ref);
388 /* checks whether the host restarted and is in the protection time.
390 * 0 if the packet is to be accepted
391 * 1 if the packet is to be ignored.
393 static int window_protected(struct bat_priv *bat_priv,
394 int32_t seq_num_diff,
395 unsigned long *last_reset)
397 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
398 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
399 if (time_after(jiffies, *last_reset +
400 msecs_to_jiffies(RESET_PROTECTION_MS))) {
402 *last_reset = jiffies;
403 bat_dbg(DBG_BATMAN, bat_priv,
404 "old packet received, start protection\n");
413 /* processes a batman packet for all interfaces, adjusts the sequence number and
414 * finds out whether it is a duplicate.
416 * 1 the packet is a duplicate
417 * 0 the packet has not yet been received
418 * -1 the packet is old and has been received while the seqno window
419 * was protected. Caller should drop it.
421 static char count_real_packets(struct ethhdr *ethhdr,
422 struct batman_packet *batman_packet,
423 struct batman_if *if_incoming)
425 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
426 struct orig_node *orig_node;
427 struct neigh_node *tmp_neigh_node;
428 struct hlist_node *node;
429 char is_duplicate = 0;
434 orig_node = get_orig_node(bat_priv, batman_packet->orig);
438 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
440 /* signalize caller that the packet is to be dropped. */
441 if (window_protected(bat_priv, seq_diff,
442 &orig_node->batman_seqno_reset))
446 hlist_for_each_entry_rcu(tmp_neigh_node, node,
447 &orig_node->neigh_list, list) {
449 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
450 orig_node->last_real_seqno,
451 batman_packet->seqno);
453 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
454 (tmp_neigh_node->if_incoming == if_incoming))
459 /* if the window moved, set the update flag. */
460 need_update |= bit_get_packet(bat_priv,
461 tmp_neigh_node->real_bits,
464 tmp_neigh_node->real_packet_count =
465 bit_packet_count(tmp_neigh_node->real_bits);
470 bat_dbg(DBG_BATMAN, bat_priv,
471 "updating last_seqno: old %d, new %d\n",
472 orig_node->last_real_seqno, batman_packet->seqno);
473 orig_node->last_real_seqno = batman_packet->seqno;
476 kref_put(&orig_node->refcount, orig_node_free_ref);
480 kref_put(&orig_node->refcount, orig_node_free_ref);
484 /* copy primary address for bonding */
485 static void mark_bonding_address(struct orig_node *orig_node,
486 struct orig_node *orig_neigh_node,
487 struct batman_packet *batman_packet)
490 if (batman_packet->flags & PRIMARIES_FIRST_HOP)
491 memcpy(orig_neigh_node->primary_addr,
492 orig_node->orig, ETH_ALEN);
497 /* mark possible bond.candidates in the neighbor list */
498 void update_bonding_candidates(struct orig_node *orig_node)
501 int interference_candidate;
503 struct hlist_node *node, *node2;
504 struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
505 struct neigh_node *first_candidate, *last_candidate;
507 /* update the candidates for this originator */
508 if (!orig_node->router) {
509 orig_node->bond.candidates = 0;
513 best_tq = orig_node->router->tq_avg;
515 /* update bond.candidates */
519 /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
520 * as "bonding partner" */
522 /* first, zero the list */
524 hlist_for_each_entry_rcu(tmp_neigh_node, node,
525 &orig_node->neigh_list, list) {
526 tmp_neigh_node->next_bond_candidate = NULL;
530 first_candidate = NULL;
531 last_candidate = NULL;
534 hlist_for_each_entry_rcu(tmp_neigh_node, node,
535 &orig_node->neigh_list, list) {
537 /* only consider if it has the same primary address ... */
538 if (memcmp(orig_node->orig,
539 tmp_neigh_node->orig_node->primary_addr,
543 /* ... and is good enough to be considered */
544 if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
547 /* check if we have another candidate with the same
548 * mac address or interface. If we do, we won't
549 * select this candidate because of possible interference. */
551 interference_candidate = 0;
552 hlist_for_each_entry_rcu(tmp_neigh_node2, node2,
553 &orig_node->neigh_list, list) {
555 if (tmp_neigh_node2 == tmp_neigh_node)
558 /* we only care if the other candidate is even
559 * considered as candidate. */
560 if (!tmp_neigh_node2->next_bond_candidate)
564 if ((tmp_neigh_node->if_incoming ==
565 tmp_neigh_node2->if_incoming)
566 || (memcmp(tmp_neigh_node->addr,
567 tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
569 interference_candidate = 1;
573 /* don't care further if it is an interference candidate */
574 if (interference_candidate)
577 if (!first_candidate) {
578 first_candidate = tmp_neigh_node;
579 tmp_neigh_node->next_bond_candidate = first_candidate;
581 tmp_neigh_node->next_bond_candidate = last_candidate;
583 last_candidate = tmp_neigh_node;
589 if (candidates > 0) {
590 first_candidate->next_bond_candidate = last_candidate;
591 orig_node->bond.selected = first_candidate;
594 orig_node->bond.candidates = candidates;
597 void receive_bat_packet(struct ethhdr *ethhdr,
598 struct batman_packet *batman_packet,
599 unsigned char *hna_buff, int hna_buff_len,
600 struct batman_if *if_incoming)
602 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
603 struct batman_if *batman_if;
604 struct orig_node *orig_neigh_node, *orig_node;
605 char has_directlink_flag;
606 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
607 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
609 uint32_t if_incoming_seqno;
611 /* Silently drop when the batman packet is actually not a
614 * This might happen if a packet is padded (e.g. Ethernet has a
615 * minimum frame length of 64 byte) and the aggregation interprets
616 * it as an additional length.
618 * TODO: A more sane solution would be to have a bit in the
619 * batman_packet to detect whether the packet is the last
620 * packet in an aggregation. Here we expect that the padding
621 * is always zero (or not 0x01)
623 if (batman_packet->packet_type != BAT_PACKET)
626 /* could be changed by schedule_own_packet() */
627 if_incoming_seqno = atomic_read(&if_incoming->seqno);
629 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
631 is_single_hop_neigh = (compare_orig(ethhdr->h_source,
632 batman_packet->orig) ? 1 : 0);
634 bat_dbg(DBG_BATMAN, bat_priv,
635 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
636 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
637 "TTL %d, V %d, IDF %d)\n",
638 ethhdr->h_source, if_incoming->net_dev->name,
639 if_incoming->net_dev->dev_addr, batman_packet->orig,
640 batman_packet->prev_sender, batman_packet->seqno,
641 batman_packet->tq, batman_packet->ttl, batman_packet->version,
642 has_directlink_flag);
645 list_for_each_entry_rcu(batman_if, &if_list, list) {
646 if (batman_if->if_status != IF_ACTIVE)
649 if (batman_if->soft_iface != if_incoming->soft_iface)
652 if (compare_orig(ethhdr->h_source,
653 batman_if->net_dev->dev_addr))
656 if (compare_orig(batman_packet->orig,
657 batman_if->net_dev->dev_addr))
660 if (compare_orig(batman_packet->prev_sender,
661 batman_if->net_dev->dev_addr))
664 if (compare_orig(ethhdr->h_source, broadcast_addr))
669 if (batman_packet->version != COMPAT_VERSION) {
670 bat_dbg(DBG_BATMAN, bat_priv,
671 "Drop packet: incompatible batman version (%i)\n",
672 batman_packet->version);
677 bat_dbg(DBG_BATMAN, bat_priv,
678 "Drop packet: received my own broadcast (sender: %pM"
685 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
686 "ignoring all packets with broadcast source addr (sender: %pM"
687 ")\n", ethhdr->h_source);
695 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
696 if (!orig_neigh_node)
699 /* neighbor has to indicate direct link and it has to
700 * come via the corresponding interface */
701 /* if received seqno equals last send seqno save new
702 * seqno for bidirectional check */
703 if (has_directlink_flag &&
704 compare_orig(if_incoming->net_dev->dev_addr,
705 batman_packet->orig) &&
706 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
707 offset = if_incoming->if_num * NUM_WORDS;
708 word = &(orig_neigh_node->bcast_own[offset]);
710 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
711 bit_packet_count(word);
714 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
715 "originator packet from myself (via neighbor)\n");
716 kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
721 bat_dbg(DBG_BATMAN, bat_priv,
722 "Drop packet: ignoring all rebroadcast echos (sender: "
723 "%pM)\n", ethhdr->h_source);
727 orig_node = get_orig_node(bat_priv, batman_packet->orig);
731 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
733 if (is_duplicate == -1) {
734 bat_dbg(DBG_BATMAN, bat_priv,
735 "Drop packet: packet within seqno protection time "
736 "(sender: %pM)\n", ethhdr->h_source);
740 if (batman_packet->tq == 0) {
741 bat_dbg(DBG_BATMAN, bat_priv,
742 "Drop packet: originator packet with tq equal 0\n");
746 /* avoid temporary routing loops */
747 if ((orig_node->router) &&
748 (orig_node->router->orig_node->router) &&
749 (compare_orig(orig_node->router->addr,
750 batman_packet->prev_sender)) &&
751 !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
752 (compare_orig(orig_node->router->addr,
753 orig_node->router->orig_node->router->addr))) {
754 bat_dbg(DBG_BATMAN, bat_priv,
755 "Drop packet: ignoring all rebroadcast packets that "
756 "may make me loop (sender: %pM)\n", ethhdr->h_source);
760 /* if sender is a direct neighbor the sender mac equals
762 orig_neigh_node = (is_single_hop_neigh ?
764 get_orig_node(bat_priv, ethhdr->h_source));
765 if (!orig_neigh_node)
768 /* drop packet if sender is not a direct neighbor and if we
769 * don't route towards it */
770 if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
771 bat_dbg(DBG_BATMAN, bat_priv,
772 "Drop packet: OGM via unknown neighbor!\n");
776 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
777 batman_packet, if_incoming);
779 /* update ranking if it is not a duplicate or has the same
780 * seqno and similar ttl as the non-duplicate */
781 if (is_bidirectional &&
783 ((orig_node->last_real_seqno == batman_packet->seqno) &&
784 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
785 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
786 if_incoming, hna_buff, hna_buff_len, is_duplicate);
788 mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
789 update_bonding_candidates(orig_node);
791 /* is single hop (direct) neighbor */
792 if (is_single_hop_neigh) {
794 /* mark direct link on incoming interface */
795 schedule_forward_packet(orig_node, ethhdr, batman_packet,
796 1, hna_buff_len, if_incoming);
798 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
799 "rebroadcast neighbor packet with direct link flag\n");
803 /* multihop originator */
804 if (!is_bidirectional) {
805 bat_dbg(DBG_BATMAN, bat_priv,
806 "Drop packet: not received via bidirectional link\n");
811 bat_dbg(DBG_BATMAN, bat_priv,
812 "Drop packet: duplicate packet received\n");
816 bat_dbg(DBG_BATMAN, bat_priv,
817 "Forwarding packet: rebroadcast originator packet\n");
818 schedule_forward_packet(orig_node, ethhdr, batman_packet,
819 0, hna_buff_len, if_incoming);
822 if (!is_single_hop_neigh)
823 kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
825 kref_put(&orig_node->refcount, orig_node_free_ref);
828 int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
830 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
831 struct ethhdr *ethhdr;
833 /* drop packet if it has not necessary minimum size */
834 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
837 ethhdr = (struct ethhdr *)skb_mac_header(skb);
839 /* packet with broadcast indication but unicast recipient */
840 if (!is_broadcast_ether_addr(ethhdr->h_dest))
843 /* packet with broadcast sender address */
844 if (is_broadcast_ether_addr(ethhdr->h_source))
847 /* create a copy of the skb, if needed, to modify it. */
848 if (skb_cow(skb, 0) < 0)
851 /* keep skb linear */
852 if (skb_linearize(skb) < 0)
855 ethhdr = (struct ethhdr *)skb_mac_header(skb);
857 spin_lock_bh(&bat_priv->orig_hash_lock);
858 receive_aggr_bat_packet(ethhdr,
862 spin_unlock_bh(&bat_priv->orig_hash_lock);
865 return NET_RX_SUCCESS;
868 static int recv_my_icmp_packet(struct bat_priv *bat_priv,
869 struct sk_buff *skb, size_t icmp_len)
871 struct orig_node *orig_node;
872 struct icmp_packet_rr *icmp_packet;
873 struct batman_if *batman_if;
875 uint8_t dstaddr[ETH_ALEN];
877 icmp_packet = (struct icmp_packet_rr *)skb->data;
879 /* add data to device queue */
880 if (icmp_packet->msg_type != ECHO_REQUEST) {
881 bat_socket_receive_packet(icmp_packet, icmp_len);
885 if (!bat_priv->primary_if)
888 /* answer echo request (ping) */
889 /* get routing information */
890 spin_lock_bh(&bat_priv->orig_hash_lock);
892 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
893 compare_orig, choose_orig,
898 if ((orig_node) && (orig_node->router)) {
900 /* don't lock while sending the packets ... we therefore
901 * copy the required data before sending */
902 batman_if = orig_node->router->if_incoming;
903 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
904 spin_unlock_bh(&bat_priv->orig_hash_lock);
906 /* create a copy of the skb, if needed, to modify it. */
907 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
910 icmp_packet = (struct icmp_packet_rr *)skb->data;
912 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
913 memcpy(icmp_packet->orig,
914 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
915 icmp_packet->msg_type = ECHO_REPLY;
916 icmp_packet->ttl = TTL;
918 send_skb_packet(skb, batman_if, dstaddr);
919 ret = NET_RX_SUCCESS;
922 spin_unlock_bh(&bat_priv->orig_hash_lock);
927 static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
930 struct orig_node *orig_node;
931 struct icmp_packet *icmp_packet;
932 struct batman_if *batman_if;
934 uint8_t dstaddr[ETH_ALEN];
936 icmp_packet = (struct icmp_packet *)skb->data;
938 /* send TTL exceeded if packet is an echo request (traceroute) */
939 if (icmp_packet->msg_type != ECHO_REQUEST) {
940 pr_debug("Warning - can't forward icmp packet from %pM to "
941 "%pM: ttl exceeded\n", icmp_packet->orig,
946 if (!bat_priv->primary_if)
949 /* get routing information */
950 spin_lock_bh(&bat_priv->orig_hash_lock);
952 orig_node = ((struct orig_node *)
953 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
958 if ((orig_node) && (orig_node->router)) {
960 /* don't lock while sending the packets ... we therefore
961 * copy the required data before sending */
962 batman_if = orig_node->router->if_incoming;
963 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
964 spin_unlock_bh(&bat_priv->orig_hash_lock);
966 /* create a copy of the skb, if needed, to modify it. */
967 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
970 icmp_packet = (struct icmp_packet *) skb->data;
972 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
973 memcpy(icmp_packet->orig,
974 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
975 icmp_packet->msg_type = TTL_EXCEEDED;
976 icmp_packet->ttl = TTL;
978 send_skb_packet(skb, batman_if, dstaddr);
979 ret = NET_RX_SUCCESS;
982 spin_unlock_bh(&bat_priv->orig_hash_lock);
988 int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
990 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
991 struct icmp_packet_rr *icmp_packet;
992 struct ethhdr *ethhdr;
993 struct orig_node *orig_node;
994 struct batman_if *batman_if;
995 int hdr_size = sizeof(struct icmp_packet);
997 uint8_t dstaddr[ETH_ALEN];
1000 * we truncate all incoming icmp packets if they don't match our size
1002 if (skb->len >= sizeof(struct icmp_packet_rr))
1003 hdr_size = sizeof(struct icmp_packet_rr);
1005 /* drop packet if it has not necessary minimum size */
1006 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1009 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1011 /* packet with unicast indication but broadcast recipient */
1012 if (is_broadcast_ether_addr(ethhdr->h_dest))
1015 /* packet with broadcast sender address */
1016 if (is_broadcast_ether_addr(ethhdr->h_source))
1020 if (!is_my_mac(ethhdr->h_dest))
1023 icmp_packet = (struct icmp_packet_rr *)skb->data;
1025 /* add record route information if not full */
1026 if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
1027 (icmp_packet->rr_cur < BAT_RR_LEN)) {
1028 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
1029 ethhdr->h_dest, ETH_ALEN);
1030 icmp_packet->rr_cur++;
1034 if (is_my_mac(icmp_packet->dst))
1035 return recv_my_icmp_packet(bat_priv, skb, hdr_size);
1038 if (icmp_packet->ttl < 2)
1039 return recv_icmp_ttl_exceeded(bat_priv, skb);
1043 /* get routing information */
1044 spin_lock_bh(&bat_priv->orig_hash_lock);
1046 orig_node = ((struct orig_node *)
1047 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1051 if ((orig_node) && (orig_node->router)) {
1053 /* don't lock while sending the packets ... we therefore
1054 * copy the required data before sending */
1055 batman_if = orig_node->router->if_incoming;
1056 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
1057 spin_unlock_bh(&bat_priv->orig_hash_lock);
1059 /* create a copy of the skb, if needed, to modify it. */
1060 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1063 icmp_packet = (struct icmp_packet_rr *)skb->data;
1069 send_skb_packet(skb, batman_if, dstaddr);
1070 ret = NET_RX_SUCCESS;
1073 spin_unlock_bh(&bat_priv->orig_hash_lock);
1078 /* find a suitable router for this originator, and use
1079 * bonding if possible. */
1080 struct neigh_node *find_router(struct bat_priv *bat_priv,
1081 struct orig_node *orig_node,
1082 struct batman_if *recv_if)
1084 struct orig_node *primary_orig_node;
1085 struct orig_node *router_orig;
1086 struct neigh_node *router, *first_candidate, *best_router;
1087 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1088 int bonding_enabled;
1093 if (!orig_node->router)
1096 /* without bonding, the first node should
1097 * always choose the default router. */
1099 bonding_enabled = atomic_read(&bat_priv->bonding);
1101 if ((!recv_if) && (!bonding_enabled))
1102 return orig_node->router;
1104 router_orig = orig_node->router->orig_node;
1106 /* if we have something in the primary_addr, we can search
1107 * for a potential bonding candidate. */
1108 if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
1109 return orig_node->router;
1111 /* find the orig_node which has the primary interface. might
1112 * even be the same as our router_orig in many cases */
1114 if (memcmp(router_orig->primary_addr,
1115 router_orig->orig, ETH_ALEN) == 0) {
1116 primary_orig_node = router_orig;
1119 primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
1121 router_orig->primary_addr);
1124 if (!primary_orig_node)
1125 return orig_node->router;
1128 /* with less than 2 candidates, we can't do any
1129 * bonding and prefer the original router. */
1131 if (primary_orig_node->bond.candidates < 2)
1132 return orig_node->router;
1135 /* all nodes between should choose a candidate which
1136 * is is not on the interface where the packet came
1138 first_candidate = primary_orig_node->bond.selected;
1139 router = first_candidate;
1141 if (bonding_enabled) {
1142 /* in the bonding case, send the packets in a round
1143 * robin fashion over the remaining interfaces. */
1145 /* recv_if == NULL on the first node. */
1146 if (router->if_incoming != recv_if)
1149 router = router->next_bond_candidate;
1150 } while (router != first_candidate);
1152 primary_orig_node->bond.selected = router->next_bond_candidate;
1155 /* if bonding is disabled, use the best of the
1156 * remaining candidates which are not using
1157 * this interface. */
1158 best_router = first_candidate;
1161 /* recv_if == NULL on the first node. */
1162 if ((router->if_incoming != recv_if) &&
1163 (router->tq_avg > best_router->tq_avg))
1164 best_router = router;
1166 router = router->next_bond_candidate;
1167 } while (router != first_candidate);
1169 router = best_router;
1175 static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
1177 struct ethhdr *ethhdr;
1179 /* drop packet if it has not necessary minimum size */
1180 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1183 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1185 /* packet with unicast indication but broadcast recipient */
1186 if (is_broadcast_ether_addr(ethhdr->h_dest))
1189 /* packet with broadcast sender address */
1190 if (is_broadcast_ether_addr(ethhdr->h_source))
1194 if (!is_my_mac(ethhdr->h_dest))
1200 int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
1203 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1204 struct orig_node *orig_node;
1205 struct neigh_node *router;
1206 struct batman_if *batman_if;
1207 uint8_t dstaddr[ETH_ALEN];
1208 struct unicast_packet *unicast_packet;
1209 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
1211 struct sk_buff *new_skb;
1213 unicast_packet = (struct unicast_packet *)skb->data;
1216 if (unicast_packet->ttl < 2) {
1217 pr_debug("Warning - can't forward unicast packet from %pM to "
1218 "%pM: ttl exceeded\n", ethhdr->h_source,
1219 unicast_packet->dest);
1223 /* get routing information */
1224 spin_lock_bh(&bat_priv->orig_hash_lock);
1226 orig_node = ((struct orig_node *)
1227 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1228 unicast_packet->dest));
1231 router = find_router(bat_priv, orig_node, recv_if);
1234 spin_unlock_bh(&bat_priv->orig_hash_lock);
1238 /* don't lock while sending the packets ... we therefore
1239 * copy the required data before sending */
1241 batman_if = router->if_incoming;
1242 memcpy(dstaddr, router->addr, ETH_ALEN);
1244 spin_unlock_bh(&bat_priv->orig_hash_lock);
1246 /* create a copy of the skb, if needed, to modify it. */
1247 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1250 unicast_packet = (struct unicast_packet *)skb->data;
1252 if (unicast_packet->packet_type == BAT_UNICAST &&
1253 atomic_read(&bat_priv->fragmentation) &&
1254 skb->len > batman_if->net_dev->mtu)
1255 return frag_send_skb(skb, bat_priv, batman_if,
1258 if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
1259 frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
1261 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1263 if (ret == NET_RX_DROP)
1266 /* packet was buffered for late merge */
1268 return NET_RX_SUCCESS;
1271 unicast_packet = (struct unicast_packet *)skb->data;
1275 unicast_packet->ttl--;
1278 send_skb_packet(skb, batman_if, dstaddr);
1280 return NET_RX_SUCCESS;
1283 int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1285 struct unicast_packet *unicast_packet;
1286 int hdr_size = sizeof(struct unicast_packet);
1288 if (check_unicast_packet(skb, hdr_size) < 0)
1291 unicast_packet = (struct unicast_packet *)skb->data;
1294 if (is_my_mac(unicast_packet->dest)) {
1295 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1296 return NET_RX_SUCCESS;
1299 return route_unicast_packet(skb, recv_if, hdr_size);
1302 int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
1304 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1305 struct unicast_frag_packet *unicast_packet;
1306 int hdr_size = sizeof(struct unicast_frag_packet);
1307 struct sk_buff *new_skb = NULL;
1310 if (check_unicast_packet(skb, hdr_size) < 0)
1313 unicast_packet = (struct unicast_frag_packet *)skb->data;
1316 if (is_my_mac(unicast_packet->dest)) {
1318 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1320 if (ret == NET_RX_DROP)
1323 /* packet was buffered for late merge */
1325 return NET_RX_SUCCESS;
1327 interface_rx(recv_if->soft_iface, new_skb, recv_if,
1328 sizeof(struct unicast_packet));
1329 return NET_RX_SUCCESS;
1332 return route_unicast_packet(skb, recv_if, hdr_size);
1336 int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1338 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1339 struct orig_node *orig_node;
1340 struct bcast_packet *bcast_packet;
1341 struct ethhdr *ethhdr;
1342 int hdr_size = sizeof(struct bcast_packet);
1345 /* drop packet if it has not necessary minimum size */
1346 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1349 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1351 /* packet with broadcast indication but unicast recipient */
1352 if (!is_broadcast_ether_addr(ethhdr->h_dest))
1355 /* packet with broadcast sender address */
1356 if (is_broadcast_ether_addr(ethhdr->h_source))
1359 /* ignore broadcasts sent by myself */
1360 if (is_my_mac(ethhdr->h_source))
1363 bcast_packet = (struct bcast_packet *)skb->data;
1365 /* ignore broadcasts originated by myself */
1366 if (is_my_mac(bcast_packet->orig))
1369 if (bcast_packet->ttl < 2)
1372 spin_lock_bh(&bat_priv->orig_hash_lock);
1374 orig_node = ((struct orig_node *)
1375 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1376 bcast_packet->orig));
1380 spin_unlock_bh(&bat_priv->orig_hash_lock);
1384 /* check whether the packet is a duplicate */
1385 if (get_bit_status(orig_node->bcast_bits,
1386 orig_node->last_bcast_seqno,
1387 ntohl(bcast_packet->seqno))) {
1388 spin_unlock_bh(&bat_priv->orig_hash_lock);
1392 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1394 /* check whether the packet is old and the host just restarted. */
1395 if (window_protected(bat_priv, seq_diff,
1396 &orig_node->bcast_seqno_reset)) {
1397 spin_unlock_bh(&bat_priv->orig_hash_lock);
1401 /* mark broadcast in flood history, update window position
1403 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1404 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1406 spin_unlock_bh(&bat_priv->orig_hash_lock);
1407 /* rebroadcast packet */
1408 add_bcast_packet_to_list(bat_priv, skb);
1410 /* broadcast for me */
1411 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1413 return NET_RX_SUCCESS;
1416 int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
1418 struct vis_packet *vis_packet;
1419 struct ethhdr *ethhdr;
1420 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1421 int hdr_size = sizeof(struct vis_packet);
1423 /* keep skb linear */
1424 if (skb_linearize(skb) < 0)
1427 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1430 vis_packet = (struct vis_packet *)skb->data;
1431 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1434 if (!is_my_mac(ethhdr->h_dest))
1437 /* ignore own packets */
1438 if (is_my_mac(vis_packet->vis_orig))
1441 if (is_my_mac(vis_packet->sender_orig))
1444 switch (vis_packet->vis_type) {
1445 case VIS_TYPE_SERVER_SYNC:
1446 receive_server_sync_packet(bat_priv, vis_packet,
1450 case VIS_TYPE_CLIENT_UPDATE:
1451 receive_client_update_packet(bat_priv, vis_packet,
1455 default: /* ignore unknown packet */
1459 /* We take a copy of the data in the packet, so we should
1460 always free the skbuf. */