2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "icmp_socket.h"
29 #include "translation-table.h"
30 #include "originator.h"
31 #include "ring_buffer.h"
33 #include "aggregation.h"
34 #include "gateway_common.h"
35 #include "gateway_client.h"
38 void slide_own_bcast_window(struct batman_if *batman_if)
40 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
41 struct hashtable_t *hash = bat_priv->orig_hash;
42 struct hlist_node *walk;
43 struct hlist_head *head;
44 struct element_t *bucket;
45 struct orig_node *orig_node;
50 spin_lock_bh(&bat_priv->orig_hash_lock);
52 for (i = 0; i < hash->size; i++) {
53 head = &hash->table[i];
55 hlist_for_each_entry(bucket, walk, head, hlist) {
56 orig_node = bucket->data;
57 word_index = batman_if->if_num * NUM_WORDS;
58 word = &(orig_node->bcast_own[word_index]);
60 bit_get_packet(bat_priv, word, 1, 0);
61 orig_node->bcast_own_sum[batman_if->if_num] =
62 bit_packet_count(word);
66 spin_unlock_bh(&bat_priv->orig_hash_lock);
69 static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
70 unsigned char *hna_buff, int hna_buff_len)
72 if ((hna_buff_len != orig_node->hna_buff_len) ||
73 ((hna_buff_len > 0) &&
74 (orig_node->hna_buff_len > 0) &&
75 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
77 if (orig_node->hna_buff_len > 0)
78 hna_global_del_orig(bat_priv, orig_node,
79 "originator changed hna");
81 if ((hna_buff_len > 0) && (hna_buff))
82 hna_global_add_orig(bat_priv, orig_node,
83 hna_buff, hna_buff_len);
87 static void update_route(struct bat_priv *bat_priv,
88 struct orig_node *orig_node,
89 struct neigh_node *neigh_node,
90 unsigned char *hna_buff, int hna_buff_len)
92 struct neigh_node *neigh_node_tmp;
95 if ((orig_node->router) && (!neigh_node)) {
97 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
99 hna_global_del_orig(bat_priv, orig_node,
100 "originator timed out");
103 } else if ((!orig_node->router) && (neigh_node)) {
105 bat_dbg(DBG_ROUTES, bat_priv,
106 "Adding route towards: %pM (via %pM)\n",
107 orig_node->orig, neigh_node->addr);
108 hna_global_add_orig(bat_priv, orig_node,
109 hna_buff, hna_buff_len);
113 bat_dbg(DBG_ROUTES, bat_priv,
114 "Changing route towards: %pM "
115 "(now via %pM - was via %pM)\n",
116 orig_node->orig, neigh_node->addr,
117 orig_node->router->addr);
121 kref_get(&neigh_node->refcount);
122 neigh_node_tmp = orig_node->router;
123 orig_node->router = neigh_node;
125 kref_put(&neigh_node_tmp->refcount, neigh_node_free_ref);
129 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
130 struct neigh_node *neigh_node, unsigned char *hna_buff,
137 if (orig_node->router != neigh_node)
138 update_route(bat_priv, orig_node, neigh_node,
139 hna_buff, hna_buff_len);
140 /* may be just HNA changed */
142 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
145 static int is_bidirectional_neigh(struct orig_node *orig_node,
146 struct orig_node *orig_neigh_node,
147 struct batman_packet *batman_packet,
148 struct batman_if *if_incoming)
150 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
151 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
152 struct hlist_node *node;
153 unsigned char total_count;
156 if (orig_node == orig_neigh_node) {
158 hlist_for_each_entry_rcu(tmp_neigh_node, node,
159 &orig_node->neigh_list, list) {
161 if (compare_orig(tmp_neigh_node->addr,
162 orig_neigh_node->orig) &&
163 (tmp_neigh_node->if_incoming == if_incoming))
164 neigh_node = tmp_neigh_node;
168 neigh_node = create_neighbor(orig_node,
170 orig_neigh_node->orig,
172 /* create_neighbor failed, return 0 */
176 kref_get(&neigh_node->refcount);
179 neigh_node->last_valid = jiffies;
181 /* find packet count of corresponding one hop neighbor */
183 hlist_for_each_entry_rcu(tmp_neigh_node, node,
184 &orig_neigh_node->neigh_list, list) {
186 if (compare_orig(tmp_neigh_node->addr,
187 orig_neigh_node->orig) &&
188 (tmp_neigh_node->if_incoming == if_incoming))
189 neigh_node = tmp_neigh_node;
193 neigh_node = create_neighbor(orig_neigh_node,
195 orig_neigh_node->orig,
197 /* create_neighbor failed, return 0 */
201 kref_get(&neigh_node->refcount);
205 orig_node->last_valid = jiffies;
207 /* pay attention to not get a value bigger than 100 % */
208 total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
209 neigh_node->real_packet_count ?
210 neigh_node->real_packet_count :
211 orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
213 /* if we have too few packets (too less data) we set tq_own to zero */
214 /* if we receive too few packets it is not considered bidirectional */
215 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
216 (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
217 orig_neigh_node->tq_own = 0;
219 /* neigh_node->real_packet_count is never zero as we
220 * only purge old information when getting new
222 orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
223 neigh_node->real_packet_count;
226 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
227 * affect the nearly-symmetric links only a little, but
228 * punishes asymmetric links more. This will give a value
229 * between 0 and TQ_MAX_VALUE
231 orig_neigh_node->tq_asym_penalty =
234 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
235 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
236 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
237 (TQ_LOCAL_WINDOW_SIZE *
238 TQ_LOCAL_WINDOW_SIZE *
239 TQ_LOCAL_WINDOW_SIZE);
241 batman_packet->tq = ((batman_packet->tq *
242 orig_neigh_node->tq_own *
243 orig_neigh_node->tq_asym_penalty) /
244 (TQ_MAX_VALUE * TQ_MAX_VALUE));
246 bat_dbg(DBG_BATMAN, bat_priv,
248 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
249 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
251 orig_node->orig, orig_neigh_node->orig, total_count,
252 neigh_node->real_packet_count, orig_neigh_node->tq_own,
253 orig_neigh_node->tq_asym_penalty, batman_packet->tq);
255 /* if link has the minimum required transmission quality
256 * consider it bidirectional */
257 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
266 kref_put(&neigh_node->refcount, neigh_node_free_ref);
270 static void update_orig(struct bat_priv *bat_priv,
271 struct orig_node *orig_node,
272 struct ethhdr *ethhdr,
273 struct batman_packet *batman_packet,
274 struct batman_if *if_incoming,
275 unsigned char *hna_buff, int hna_buff_len,
278 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
279 struct hlist_node *node;
280 int tmp_hna_buff_len;
282 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
283 "Searching and updating originator entry of received packet\n");
286 hlist_for_each_entry_rcu(tmp_neigh_node, node,
287 &orig_node->neigh_list, list) {
288 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
289 (tmp_neigh_node->if_incoming == if_incoming)) {
290 neigh_node = tmp_neigh_node;
297 ring_buffer_set(tmp_neigh_node->tq_recv,
298 &tmp_neigh_node->tq_index, 0);
299 tmp_neigh_node->tq_avg =
300 ring_buffer_avg(tmp_neigh_node->tq_recv);
304 struct orig_node *orig_tmp;
306 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
310 neigh_node = create_neighbor(orig_node, orig_tmp,
311 ethhdr->h_source, if_incoming);
315 bat_dbg(DBG_BATMAN, bat_priv,
316 "Updating existing last-hop neighbor of originator\n");
318 kref_get(&neigh_node->refcount);
321 orig_node->flags = batman_packet->flags;
322 neigh_node->last_valid = jiffies;
324 ring_buffer_set(neigh_node->tq_recv,
325 &neigh_node->tq_index,
327 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
330 orig_node->last_ttl = batman_packet->ttl;
331 neigh_node->last_ttl = batman_packet->ttl;
334 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
335 batman_packet->num_hna * ETH_ALEN : hna_buff_len);
337 /* if this neighbor already is our next hop there is nothing
339 if (orig_node->router == neigh_node)
342 /* if this neighbor does not offer a better TQ we won't consider it */
343 if ((orig_node->router) &&
344 (orig_node->router->tq_avg > neigh_node->tq_avg))
347 /* if the TQ is the same and the link not more symetric we
348 * won't consider it either */
349 if ((orig_node->router) &&
350 ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
351 (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
352 >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
355 update_routes(bat_priv, orig_node, neigh_node,
356 hna_buff, tmp_hna_buff_len);
360 update_routes(bat_priv, orig_node, orig_node->router,
361 hna_buff, tmp_hna_buff_len);
364 if (orig_node->gw_flags != batman_packet->gw_flags)
365 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
367 orig_node->gw_flags = batman_packet->gw_flags;
369 /* restart gateway selection if fast or late switching was enabled */
370 if ((orig_node->gw_flags) &&
371 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
372 (atomic_read(&bat_priv->gw_sel_class) > 2))
373 gw_check_election(bat_priv, orig_node);
381 kref_put(&neigh_node->refcount, neigh_node_free_ref);
384 /* checks whether the host restarted and is in the protection time.
386 * 0 if the packet is to be accepted
387 * 1 if the packet is to be ignored.
389 static int window_protected(struct bat_priv *bat_priv,
390 int32_t seq_num_diff,
391 unsigned long *last_reset)
393 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
394 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
395 if (time_after(jiffies, *last_reset +
396 msecs_to_jiffies(RESET_PROTECTION_MS))) {
398 *last_reset = jiffies;
399 bat_dbg(DBG_BATMAN, bat_priv,
400 "old packet received, start protection\n");
409 /* processes a batman packet for all interfaces, adjusts the sequence number and
410 * finds out whether it is a duplicate.
412 * 1 the packet is a duplicate
413 * 0 the packet has not yet been received
414 * -1 the packet is old and has been received while the seqno window
415 * was protected. Caller should drop it.
417 static char count_real_packets(struct ethhdr *ethhdr,
418 struct batman_packet *batman_packet,
419 struct batman_if *if_incoming)
421 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
422 struct orig_node *orig_node;
423 struct neigh_node *tmp_neigh_node;
424 struct hlist_node *node;
425 char is_duplicate = 0;
430 orig_node = get_orig_node(bat_priv, batman_packet->orig);
434 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
436 /* signalize caller that the packet is to be dropped. */
437 if (window_protected(bat_priv, seq_diff,
438 &orig_node->batman_seqno_reset))
442 hlist_for_each_entry_rcu(tmp_neigh_node, node,
443 &orig_node->neigh_list, list) {
445 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
446 orig_node->last_real_seqno,
447 batman_packet->seqno);
449 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
450 (tmp_neigh_node->if_incoming == if_incoming))
455 /* if the window moved, set the update flag. */
456 need_update |= bit_get_packet(bat_priv,
457 tmp_neigh_node->real_bits,
460 tmp_neigh_node->real_packet_count =
461 bit_packet_count(tmp_neigh_node->real_bits);
466 bat_dbg(DBG_BATMAN, bat_priv,
467 "updating last_seqno: old %d, new %d\n",
468 orig_node->last_real_seqno, batman_packet->seqno);
469 orig_node->last_real_seqno = batman_packet->seqno;
475 /* copy primary address for bonding */
476 static void mark_bonding_address(struct orig_node *orig_node,
477 struct orig_node *orig_neigh_node,
478 struct batman_packet *batman_packet)
481 if (batman_packet->flags & PRIMARIES_FIRST_HOP)
482 memcpy(orig_neigh_node->primary_addr,
483 orig_node->orig, ETH_ALEN);
488 /* mark possible bond.candidates in the neighbor list */
489 void update_bonding_candidates(struct orig_node *orig_node)
492 int interference_candidate;
494 struct hlist_node *node, *node2;
495 struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
496 struct neigh_node *first_candidate, *last_candidate;
498 /* update the candidates for this originator */
499 if (!orig_node->router) {
500 orig_node->bond.candidates = 0;
504 best_tq = orig_node->router->tq_avg;
506 /* update bond.candidates */
510 /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
511 * as "bonding partner" */
513 /* first, zero the list */
515 hlist_for_each_entry_rcu(tmp_neigh_node, node,
516 &orig_node->neigh_list, list) {
517 tmp_neigh_node->next_bond_candidate = NULL;
521 first_candidate = NULL;
522 last_candidate = NULL;
525 hlist_for_each_entry_rcu(tmp_neigh_node, node,
526 &orig_node->neigh_list, list) {
528 /* only consider if it has the same primary address ... */
529 if (memcmp(orig_node->orig,
530 tmp_neigh_node->orig_node->primary_addr,
534 /* ... and is good enough to be considered */
535 if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
538 /* check if we have another candidate with the same
539 * mac address or interface. If we do, we won't
540 * select this candidate because of possible interference. */
542 interference_candidate = 0;
543 hlist_for_each_entry_rcu(tmp_neigh_node2, node2,
544 &orig_node->neigh_list, list) {
546 if (tmp_neigh_node2 == tmp_neigh_node)
549 /* we only care if the other candidate is even
550 * considered as candidate. */
551 if (!tmp_neigh_node2->next_bond_candidate)
555 if ((tmp_neigh_node->if_incoming ==
556 tmp_neigh_node2->if_incoming)
557 || (memcmp(tmp_neigh_node->addr,
558 tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
560 interference_candidate = 1;
564 /* don't care further if it is an interference candidate */
565 if (interference_candidate)
568 if (!first_candidate) {
569 first_candidate = tmp_neigh_node;
570 tmp_neigh_node->next_bond_candidate = first_candidate;
572 tmp_neigh_node->next_bond_candidate = last_candidate;
574 last_candidate = tmp_neigh_node;
580 if (candidates > 0) {
581 first_candidate->next_bond_candidate = last_candidate;
582 orig_node->bond.selected = first_candidate;
585 orig_node->bond.candidates = candidates;
588 void receive_bat_packet(struct ethhdr *ethhdr,
589 struct batman_packet *batman_packet,
590 unsigned char *hna_buff, int hna_buff_len,
591 struct batman_if *if_incoming)
593 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
594 struct batman_if *batman_if;
595 struct orig_node *orig_neigh_node, *orig_node;
596 char has_directlink_flag;
597 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
598 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
600 uint32_t if_incoming_seqno;
602 /* Silently drop when the batman packet is actually not a
605 * This might happen if a packet is padded (e.g. Ethernet has a
606 * minimum frame length of 64 byte) and the aggregation interprets
607 * it as an additional length.
609 * TODO: A more sane solution would be to have a bit in the
610 * batman_packet to detect whether the packet is the last
611 * packet in an aggregation. Here we expect that the padding
612 * is always zero (or not 0x01)
614 if (batman_packet->packet_type != BAT_PACKET)
617 /* could be changed by schedule_own_packet() */
618 if_incoming_seqno = atomic_read(&if_incoming->seqno);
620 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
622 is_single_hop_neigh = (compare_orig(ethhdr->h_source,
623 batman_packet->orig) ? 1 : 0);
625 bat_dbg(DBG_BATMAN, bat_priv,
626 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
627 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
628 "TTL %d, V %d, IDF %d)\n",
629 ethhdr->h_source, if_incoming->net_dev->name,
630 if_incoming->net_dev->dev_addr, batman_packet->orig,
631 batman_packet->prev_sender, batman_packet->seqno,
632 batman_packet->tq, batman_packet->ttl, batman_packet->version,
633 has_directlink_flag);
636 list_for_each_entry_rcu(batman_if, &if_list, list) {
637 if (batman_if->if_status != IF_ACTIVE)
640 if (batman_if->soft_iface != if_incoming->soft_iface)
643 if (compare_orig(ethhdr->h_source,
644 batman_if->net_dev->dev_addr))
647 if (compare_orig(batman_packet->orig,
648 batman_if->net_dev->dev_addr))
651 if (compare_orig(batman_packet->prev_sender,
652 batman_if->net_dev->dev_addr))
655 if (compare_orig(ethhdr->h_source, broadcast_addr))
660 if (batman_packet->version != COMPAT_VERSION) {
661 bat_dbg(DBG_BATMAN, bat_priv,
662 "Drop packet: incompatible batman version (%i)\n",
663 batman_packet->version);
668 bat_dbg(DBG_BATMAN, bat_priv,
669 "Drop packet: received my own broadcast (sender: %pM"
676 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
677 "ignoring all packets with broadcast source addr (sender: %pM"
678 ")\n", ethhdr->h_source);
686 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
688 if (!orig_neigh_node)
691 /* neighbor has to indicate direct link and it has to
692 * come via the corresponding interface */
693 /* if received seqno equals last send seqno save new
694 * seqno for bidirectional check */
695 if (has_directlink_flag &&
696 compare_orig(if_incoming->net_dev->dev_addr,
697 batman_packet->orig) &&
698 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
699 offset = if_incoming->if_num * NUM_WORDS;
700 word = &(orig_neigh_node->bcast_own[offset]);
702 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
703 bit_packet_count(word);
706 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
707 "originator packet from myself (via neighbor)\n");
712 bat_dbg(DBG_BATMAN, bat_priv,
713 "Drop packet: ignoring all rebroadcast echos (sender: "
714 "%pM)\n", ethhdr->h_source);
718 orig_node = get_orig_node(bat_priv, batman_packet->orig);
722 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
724 if (is_duplicate == -1) {
725 bat_dbg(DBG_BATMAN, bat_priv,
726 "Drop packet: packet within seqno protection time "
727 "(sender: %pM)\n", ethhdr->h_source);
731 if (batman_packet->tq == 0) {
732 bat_dbg(DBG_BATMAN, bat_priv,
733 "Drop packet: originator packet with tq equal 0\n");
737 /* avoid temporary routing loops */
738 if ((orig_node->router) &&
739 (orig_node->router->orig_node->router) &&
740 (compare_orig(orig_node->router->addr,
741 batman_packet->prev_sender)) &&
742 !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
743 (compare_orig(orig_node->router->addr,
744 orig_node->router->orig_node->router->addr))) {
745 bat_dbg(DBG_BATMAN, bat_priv,
746 "Drop packet: ignoring all rebroadcast packets that "
747 "may make me loop (sender: %pM)\n", ethhdr->h_source);
751 /* if sender is a direct neighbor the sender mac equals
753 orig_neigh_node = (is_single_hop_neigh ?
755 get_orig_node(bat_priv, ethhdr->h_source));
756 if (!orig_neigh_node)
759 /* drop packet if sender is not a direct neighbor and if we
760 * don't route towards it */
761 if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
762 bat_dbg(DBG_BATMAN, bat_priv,
763 "Drop packet: OGM via unknown neighbor!\n");
767 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
768 batman_packet, if_incoming);
770 /* update ranking if it is not a duplicate or has the same
771 * seqno and similar ttl as the non-duplicate */
772 if (is_bidirectional &&
774 ((orig_node->last_real_seqno == batman_packet->seqno) &&
775 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
776 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
777 if_incoming, hna_buff, hna_buff_len, is_duplicate);
779 mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
780 update_bonding_candidates(orig_node);
782 /* is single hop (direct) neighbor */
783 if (is_single_hop_neigh) {
785 /* mark direct link on incoming interface */
786 schedule_forward_packet(orig_node, ethhdr, batman_packet,
787 1, hna_buff_len, if_incoming);
789 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
790 "rebroadcast neighbor packet with direct link flag\n");
794 /* multihop originator */
795 if (!is_bidirectional) {
796 bat_dbg(DBG_BATMAN, bat_priv,
797 "Drop packet: not received via bidirectional link\n");
802 bat_dbg(DBG_BATMAN, bat_priv,
803 "Drop packet: duplicate packet received\n");
807 bat_dbg(DBG_BATMAN, bat_priv,
808 "Forwarding packet: rebroadcast originator packet\n");
809 schedule_forward_packet(orig_node, ethhdr, batman_packet,
810 0, hna_buff_len, if_incoming);
813 int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
815 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
816 struct ethhdr *ethhdr;
818 /* drop packet if it has not necessary minimum size */
819 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
822 ethhdr = (struct ethhdr *)skb_mac_header(skb);
824 /* packet with broadcast indication but unicast recipient */
825 if (!is_broadcast_ether_addr(ethhdr->h_dest))
828 /* packet with broadcast sender address */
829 if (is_broadcast_ether_addr(ethhdr->h_source))
832 /* create a copy of the skb, if needed, to modify it. */
833 if (skb_cow(skb, 0) < 0)
836 /* keep skb linear */
837 if (skb_linearize(skb) < 0)
840 ethhdr = (struct ethhdr *)skb_mac_header(skb);
842 spin_lock_bh(&bat_priv->orig_hash_lock);
843 receive_aggr_bat_packet(ethhdr,
847 spin_unlock_bh(&bat_priv->orig_hash_lock);
850 return NET_RX_SUCCESS;
853 static int recv_my_icmp_packet(struct bat_priv *bat_priv,
854 struct sk_buff *skb, size_t icmp_len)
856 struct orig_node *orig_node;
857 struct icmp_packet_rr *icmp_packet;
858 struct batman_if *batman_if;
860 uint8_t dstaddr[ETH_ALEN];
862 icmp_packet = (struct icmp_packet_rr *)skb->data;
864 /* add data to device queue */
865 if (icmp_packet->msg_type != ECHO_REQUEST) {
866 bat_socket_receive_packet(icmp_packet, icmp_len);
870 if (!bat_priv->primary_if)
873 /* answer echo request (ping) */
874 /* get routing information */
875 spin_lock_bh(&bat_priv->orig_hash_lock);
876 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
877 compare_orig, choose_orig,
881 if ((orig_node) && (orig_node->router)) {
883 /* don't lock while sending the packets ... we therefore
884 * copy the required data before sending */
885 batman_if = orig_node->router->if_incoming;
886 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
887 spin_unlock_bh(&bat_priv->orig_hash_lock);
889 /* create a copy of the skb, if needed, to modify it. */
890 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
893 icmp_packet = (struct icmp_packet_rr *)skb->data;
895 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
896 memcpy(icmp_packet->orig,
897 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
898 icmp_packet->msg_type = ECHO_REPLY;
899 icmp_packet->ttl = TTL;
901 send_skb_packet(skb, batman_if, dstaddr);
902 ret = NET_RX_SUCCESS;
905 spin_unlock_bh(&bat_priv->orig_hash_lock);
910 static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
913 struct orig_node *orig_node;
914 struct icmp_packet *icmp_packet;
915 struct batman_if *batman_if;
917 uint8_t dstaddr[ETH_ALEN];
919 icmp_packet = (struct icmp_packet *)skb->data;
921 /* send TTL exceeded if packet is an echo request (traceroute) */
922 if (icmp_packet->msg_type != ECHO_REQUEST) {
923 pr_debug("Warning - can't forward icmp packet from %pM to "
924 "%pM: ttl exceeded\n", icmp_packet->orig,
929 if (!bat_priv->primary_if)
932 /* get routing information */
933 spin_lock_bh(&bat_priv->orig_hash_lock);
934 orig_node = ((struct orig_node *)
935 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
939 if ((orig_node) && (orig_node->router)) {
941 /* don't lock while sending the packets ... we therefore
942 * copy the required data before sending */
943 batman_if = orig_node->router->if_incoming;
944 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
945 spin_unlock_bh(&bat_priv->orig_hash_lock);
947 /* create a copy of the skb, if needed, to modify it. */
948 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
951 icmp_packet = (struct icmp_packet *) skb->data;
953 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
954 memcpy(icmp_packet->orig,
955 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
956 icmp_packet->msg_type = TTL_EXCEEDED;
957 icmp_packet->ttl = TTL;
959 send_skb_packet(skb, batman_if, dstaddr);
960 ret = NET_RX_SUCCESS;
963 spin_unlock_bh(&bat_priv->orig_hash_lock);
969 int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
971 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
972 struct icmp_packet_rr *icmp_packet;
973 struct ethhdr *ethhdr;
974 struct orig_node *orig_node;
975 struct batman_if *batman_if;
976 int hdr_size = sizeof(struct icmp_packet);
978 uint8_t dstaddr[ETH_ALEN];
981 * we truncate all incoming icmp packets if they don't match our size
983 if (skb->len >= sizeof(struct icmp_packet_rr))
984 hdr_size = sizeof(struct icmp_packet_rr);
986 /* drop packet if it has not necessary minimum size */
987 if (unlikely(!pskb_may_pull(skb, hdr_size)))
990 ethhdr = (struct ethhdr *)skb_mac_header(skb);
992 /* packet with unicast indication but broadcast recipient */
993 if (is_broadcast_ether_addr(ethhdr->h_dest))
996 /* packet with broadcast sender address */
997 if (is_broadcast_ether_addr(ethhdr->h_source))
1001 if (!is_my_mac(ethhdr->h_dest))
1004 icmp_packet = (struct icmp_packet_rr *)skb->data;
1006 /* add record route information if not full */
1007 if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
1008 (icmp_packet->rr_cur < BAT_RR_LEN)) {
1009 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
1010 ethhdr->h_dest, ETH_ALEN);
1011 icmp_packet->rr_cur++;
1015 if (is_my_mac(icmp_packet->dst))
1016 return recv_my_icmp_packet(bat_priv, skb, hdr_size);
1019 if (icmp_packet->ttl < 2)
1020 return recv_icmp_ttl_exceeded(bat_priv, skb);
1024 /* get routing information */
1025 spin_lock_bh(&bat_priv->orig_hash_lock);
1026 orig_node = ((struct orig_node *)
1027 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1030 if ((orig_node) && (orig_node->router)) {
1032 /* don't lock while sending the packets ... we therefore
1033 * copy the required data before sending */
1034 batman_if = orig_node->router->if_incoming;
1035 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
1036 spin_unlock_bh(&bat_priv->orig_hash_lock);
1038 /* create a copy of the skb, if needed, to modify it. */
1039 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1042 icmp_packet = (struct icmp_packet_rr *)skb->data;
1048 send_skb_packet(skb, batman_if, dstaddr);
1049 ret = NET_RX_SUCCESS;
1052 spin_unlock_bh(&bat_priv->orig_hash_lock);
1057 /* find a suitable router for this originator, and use
1058 * bonding if possible. */
1059 struct neigh_node *find_router(struct bat_priv *bat_priv,
1060 struct orig_node *orig_node,
1061 struct batman_if *recv_if)
1063 struct orig_node *primary_orig_node;
1064 struct orig_node *router_orig;
1065 struct neigh_node *router, *first_candidate, *best_router;
1066 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1067 int bonding_enabled;
1072 if (!orig_node->router)
1075 /* without bonding, the first node should
1076 * always choose the default router. */
1078 bonding_enabled = atomic_read(&bat_priv->bonding);
1080 if ((!recv_if) && (!bonding_enabled))
1081 return orig_node->router;
1083 router_orig = orig_node->router->orig_node;
1085 /* if we have something in the primary_addr, we can search
1086 * for a potential bonding candidate. */
1087 if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
1088 return orig_node->router;
1090 /* find the orig_node which has the primary interface. might
1091 * even be the same as our router_orig in many cases */
1093 if (memcmp(router_orig->primary_addr,
1094 router_orig->orig, ETH_ALEN) == 0) {
1095 primary_orig_node = router_orig;
1097 primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
1099 router_orig->primary_addr);
1101 if (!primary_orig_node)
1102 return orig_node->router;
1105 /* with less than 2 candidates, we can't do any
1106 * bonding and prefer the original router. */
1108 if (primary_orig_node->bond.candidates < 2)
1109 return orig_node->router;
1112 /* all nodes between should choose a candidate which
1113 * is is not on the interface where the packet came
1115 first_candidate = primary_orig_node->bond.selected;
1116 router = first_candidate;
1118 if (bonding_enabled) {
1119 /* in the bonding case, send the packets in a round
1120 * robin fashion over the remaining interfaces. */
1122 /* recv_if == NULL on the first node. */
1123 if (router->if_incoming != recv_if)
1126 router = router->next_bond_candidate;
1127 } while (router != first_candidate);
1129 primary_orig_node->bond.selected = router->next_bond_candidate;
1132 /* if bonding is disabled, use the best of the
1133 * remaining candidates which are not using
1134 * this interface. */
1135 best_router = first_candidate;
1138 /* recv_if == NULL on the first node. */
1139 if ((router->if_incoming != recv_if) &&
1140 (router->tq_avg > best_router->tq_avg))
1141 best_router = router;
1143 router = router->next_bond_candidate;
1144 } while (router != first_candidate);
1146 router = best_router;
1152 static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
1154 struct ethhdr *ethhdr;
1156 /* drop packet if it has not necessary minimum size */
1157 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1160 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1162 /* packet with unicast indication but broadcast recipient */
1163 if (is_broadcast_ether_addr(ethhdr->h_dest))
1166 /* packet with broadcast sender address */
1167 if (is_broadcast_ether_addr(ethhdr->h_source))
1171 if (!is_my_mac(ethhdr->h_dest))
1177 int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
1180 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1181 struct orig_node *orig_node;
1182 struct neigh_node *router;
1183 struct batman_if *batman_if;
1184 uint8_t dstaddr[ETH_ALEN];
1185 struct unicast_packet *unicast_packet;
1186 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
1188 struct sk_buff *new_skb;
1190 unicast_packet = (struct unicast_packet *)skb->data;
1193 if (unicast_packet->ttl < 2) {
1194 pr_debug("Warning - can't forward unicast packet from %pM to "
1195 "%pM: ttl exceeded\n", ethhdr->h_source,
1196 unicast_packet->dest);
1200 /* get routing information */
1201 spin_lock_bh(&bat_priv->orig_hash_lock);
1202 orig_node = ((struct orig_node *)
1203 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1204 unicast_packet->dest));
1206 router = find_router(bat_priv, orig_node, recv_if);
1209 spin_unlock_bh(&bat_priv->orig_hash_lock);
1213 /* don't lock while sending the packets ... we therefore
1214 * copy the required data before sending */
1216 batman_if = router->if_incoming;
1217 memcpy(dstaddr, router->addr, ETH_ALEN);
1219 spin_unlock_bh(&bat_priv->orig_hash_lock);
1221 /* create a copy of the skb, if needed, to modify it. */
1222 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1225 unicast_packet = (struct unicast_packet *)skb->data;
1227 if (unicast_packet->packet_type == BAT_UNICAST &&
1228 atomic_read(&bat_priv->fragmentation) &&
1229 skb->len > batman_if->net_dev->mtu)
1230 return frag_send_skb(skb, bat_priv, batman_if,
1233 if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
1234 frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
1236 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1238 if (ret == NET_RX_DROP)
1241 /* packet was buffered for late merge */
1243 return NET_RX_SUCCESS;
1246 unicast_packet = (struct unicast_packet *)skb->data;
1250 unicast_packet->ttl--;
1253 send_skb_packet(skb, batman_if, dstaddr);
1255 return NET_RX_SUCCESS;
1258 int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1260 struct unicast_packet *unicast_packet;
1261 int hdr_size = sizeof(struct unicast_packet);
1263 if (check_unicast_packet(skb, hdr_size) < 0)
1266 unicast_packet = (struct unicast_packet *)skb->data;
1269 if (is_my_mac(unicast_packet->dest)) {
1270 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1271 return NET_RX_SUCCESS;
1274 return route_unicast_packet(skb, recv_if, hdr_size);
1277 int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
1279 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1280 struct unicast_frag_packet *unicast_packet;
1281 int hdr_size = sizeof(struct unicast_frag_packet);
1282 struct sk_buff *new_skb = NULL;
1285 if (check_unicast_packet(skb, hdr_size) < 0)
1288 unicast_packet = (struct unicast_frag_packet *)skb->data;
1291 if (is_my_mac(unicast_packet->dest)) {
1293 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1295 if (ret == NET_RX_DROP)
1298 /* packet was buffered for late merge */
1300 return NET_RX_SUCCESS;
1302 interface_rx(recv_if->soft_iface, new_skb, recv_if,
1303 sizeof(struct unicast_packet));
1304 return NET_RX_SUCCESS;
1307 return route_unicast_packet(skb, recv_if, hdr_size);
1311 int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1313 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1314 struct orig_node *orig_node;
1315 struct bcast_packet *bcast_packet;
1316 struct ethhdr *ethhdr;
1317 int hdr_size = sizeof(struct bcast_packet);
1320 /* drop packet if it has not necessary minimum size */
1321 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1324 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1326 /* packet with broadcast indication but unicast recipient */
1327 if (!is_broadcast_ether_addr(ethhdr->h_dest))
1330 /* packet with broadcast sender address */
1331 if (is_broadcast_ether_addr(ethhdr->h_source))
1334 /* ignore broadcasts sent by myself */
1335 if (is_my_mac(ethhdr->h_source))
1338 bcast_packet = (struct bcast_packet *)skb->data;
1340 /* ignore broadcasts originated by myself */
1341 if (is_my_mac(bcast_packet->orig))
1344 if (bcast_packet->ttl < 2)
1347 spin_lock_bh(&bat_priv->orig_hash_lock);
1348 orig_node = ((struct orig_node *)
1349 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1350 bcast_packet->orig));
1353 spin_unlock_bh(&bat_priv->orig_hash_lock);
1357 /* check whether the packet is a duplicate */
1358 if (get_bit_status(orig_node->bcast_bits,
1359 orig_node->last_bcast_seqno,
1360 ntohl(bcast_packet->seqno))) {
1361 spin_unlock_bh(&bat_priv->orig_hash_lock);
1365 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1367 /* check whether the packet is old and the host just restarted. */
1368 if (window_protected(bat_priv, seq_diff,
1369 &orig_node->bcast_seqno_reset)) {
1370 spin_unlock_bh(&bat_priv->orig_hash_lock);
1374 /* mark broadcast in flood history, update window position
1376 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1377 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1379 spin_unlock_bh(&bat_priv->orig_hash_lock);
1380 /* rebroadcast packet */
1381 add_bcast_packet_to_list(bat_priv, skb);
1383 /* broadcast for me */
1384 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1386 return NET_RX_SUCCESS;
1389 int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
1391 struct vis_packet *vis_packet;
1392 struct ethhdr *ethhdr;
1393 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1394 int hdr_size = sizeof(struct vis_packet);
1396 /* keep skb linear */
1397 if (skb_linearize(skb) < 0)
1400 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1403 vis_packet = (struct vis_packet *)skb->data;
1404 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1407 if (!is_my_mac(ethhdr->h_dest))
1410 /* ignore own packets */
1411 if (is_my_mac(vis_packet->vis_orig))
1414 if (is_my_mac(vis_packet->sender_orig))
1417 switch (vis_packet->vis_type) {
1418 case VIS_TYPE_SERVER_SYNC:
1419 receive_server_sync_packet(bat_priv, vis_packet,
1423 case VIS_TYPE_CLIENT_UPDATE:
1424 receive_client_update_packet(bat_priv, vis_packet,
1428 default: /* ignore unknown packet */
1432 /* We take a copy of the data in the packet, so we should
1433 always free the skbuf. */