2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
29 #include "aggregation.h"
30 #include "gateway_common.h"
31 #include "originator.h"
33 static void send_outstanding_bcast_packet(struct work_struct *work);
35 /* apply hop penalty for a normal link */
36 static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv)
38 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
42 /* when do we schedule our own packet to be sent */
43 static unsigned long own_send_time(struct bat_priv *bat_priv)
45 return jiffies + msecs_to_jiffies(
46 atomic_read(&bat_priv->orig_interval) -
47 JITTER + (random32() % 2*JITTER));
50 /* when do we schedule a forwarded packet to be sent */
51 static unsigned long forward_send_time(void)
53 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
56 /* send out an already prepared packet to the given address via the
57 * specified batman interface */
58 int send_skb_packet(struct sk_buff *skb,
59 struct batman_if *batman_if,
62 struct ethhdr *ethhdr;
64 if (batman_if->if_status != IF_ACTIVE)
67 if (unlikely(!batman_if->net_dev))
70 if (!(batman_if->net_dev->flags & IFF_UP)) {
71 pr_warning("Interface %s is not up - can't send packet via "
72 "that interface!\n", batman_if->net_dev->name);
76 /* push to the ethernet header. */
77 if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
80 skb_reset_mac_header(skb);
82 ethhdr = (struct ethhdr *) skb_mac_header(skb);
83 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
84 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
85 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
87 skb_set_network_header(skb, ETH_HLEN);
88 skb->priority = TC_PRIO_CONTROL;
89 skb->protocol = __constant_htons(ETH_P_BATMAN);
91 skb->dev = batman_if->net_dev;
93 /* dev_queue_xmit() returns a negative result on error. However on
94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95 * (which is > 0). This will not be treated as an error. */
97 return dev_queue_xmit(skb);
100 return NET_XMIT_DROP;
103 /* Send a packet to a given interface */
104 static void send_packet_to_if(struct forw_packet *forw_packet,
105 struct batman_if *batman_if)
107 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
111 struct batman_packet *batman_packet;
114 if (batman_if->if_status != IF_ACTIVE)
119 batman_packet = (struct batman_packet *)forw_packet->skb->data;
121 /* adjust all flags and log packets */
122 while (aggregated_packet(buff_pos,
123 forw_packet->packet_len,
124 batman_packet->num_hna)) {
126 /* we might have aggregated direct link packets with an
127 * ordinary base packet */
128 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
129 (forw_packet->if_incoming == batman_if))
130 batman_packet->flags |= DIRECTLINK;
132 batman_packet->flags &= ~DIRECTLINK;
134 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
137 bat_dbg(DBG_BATMAN, bat_priv,
138 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
139 " IDF %s) on interface %s [%pM]\n",
140 fwd_str, (packet_num > 0 ? "aggregated " : ""),
141 batman_packet->orig, ntohl(batman_packet->seqno),
142 batman_packet->tq, batman_packet->ttl,
143 (batman_packet->flags & DIRECTLINK ?
145 batman_if->net_dev->name, batman_if->net_dev->dev_addr);
147 buff_pos += sizeof(struct batman_packet) +
148 (batman_packet->num_hna * ETH_ALEN);
150 batman_packet = (struct batman_packet *)
151 (forw_packet->skb->data + buff_pos);
154 /* create clone because function is called more than once */
155 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
157 send_skb_packet(skb, batman_if, broadcast_addr);
160 /* send a batman packet */
161 static void send_packet(struct forw_packet *forw_packet)
163 struct batman_if *batman_if;
164 struct net_device *soft_iface;
165 struct bat_priv *bat_priv;
166 struct batman_packet *batman_packet =
167 (struct batman_packet *)(forw_packet->skb->data);
168 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170 if (!forw_packet->if_incoming) {
171 pr_err("Error - can't forward packet: incoming iface not "
176 soft_iface = forw_packet->if_incoming->soft_iface;
177 bat_priv = netdev_priv(soft_iface);
179 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
182 /* multihomed peer assumed */
183 /* non-primary OGMs are only broadcasted on their interface */
184 if ((directlink && (batman_packet->ttl == 1)) ||
185 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
187 /* FIXME: what about aggregated packets ? */
188 bat_dbg(DBG_BATMAN, bat_priv,
189 "%s packet (originator %pM, seqno %d, TTL %d) "
190 "on interface %s [%pM]\n",
191 (forw_packet->own ? "Sending own" : "Forwarding"),
192 batman_packet->orig, ntohl(batman_packet->seqno),
194 forw_packet->if_incoming->net_dev->name,
195 forw_packet->if_incoming->net_dev->dev_addr);
197 /* skb is only used once and than forw_packet is free'd */
198 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
200 forw_packet->skb = NULL;
205 /* broadcast on every interface */
207 list_for_each_entry_rcu(batman_if, &hardif_list, list) {
208 if (batman_if->soft_iface != soft_iface)
211 send_packet_to_if(forw_packet, batman_if);
216 static void rebuild_batman_packet(struct bat_priv *bat_priv,
217 struct batman_if *batman_if)
220 unsigned char *new_buff;
221 struct batman_packet *batman_packet;
223 new_len = sizeof(struct batman_packet) +
224 (bat_priv->num_local_hna * ETH_ALEN);
225 new_buff = kmalloc(new_len, GFP_ATOMIC);
227 /* keep old buffer if kmalloc should fail */
229 memcpy(new_buff, batman_if->packet_buff,
230 sizeof(struct batman_packet));
231 batman_packet = (struct batman_packet *)new_buff;
233 batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
234 new_buff + sizeof(struct batman_packet),
235 new_len - sizeof(struct batman_packet));
237 kfree(batman_if->packet_buff);
238 batman_if->packet_buff = new_buff;
239 batman_if->packet_len = new_len;
243 void schedule_own_packet(struct batman_if *batman_if)
245 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
246 unsigned long send_time;
247 struct batman_packet *batman_packet;
250 if ((batman_if->if_status == IF_NOT_IN_USE) ||
251 (batman_if->if_status == IF_TO_BE_REMOVED))
254 vis_server = atomic_read(&bat_priv->vis_mode);
257 * the interface gets activated here to avoid race conditions between
258 * the moment of activating the interface in
259 * hardif_activate_interface() where the originator mac is set and
260 * outdated packets (especially uninitialized mac addresses) in the
263 if (batman_if->if_status == IF_TO_BE_ACTIVATED)
264 batman_if->if_status = IF_ACTIVE;
266 /* if local hna has changed and interface is a primary interface */
267 if ((atomic_read(&bat_priv->hna_local_changed)) &&
268 (batman_if == bat_priv->primary_if))
269 rebuild_batman_packet(bat_priv, batman_if);
272 * NOTE: packet_buff might just have been re-allocated in
273 * rebuild_batman_packet()
275 batman_packet = (struct batman_packet *)batman_if->packet_buff;
277 /* change sequence number to network order */
278 batman_packet->seqno =
279 htonl((uint32_t)atomic_read(&batman_if->seqno));
281 if (vis_server == VIS_TYPE_SERVER_SYNC)
282 batman_packet->flags |= VIS_SERVER;
284 batman_packet->flags &= ~VIS_SERVER;
286 if ((batman_if == bat_priv->primary_if) &&
287 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
288 batman_packet->gw_flags =
289 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
291 batman_packet->gw_flags = 0;
293 atomic_inc(&batman_if->seqno);
295 slide_own_bcast_window(batman_if);
296 send_time = own_send_time(bat_priv);
297 add_bat_packet_to_list(bat_priv,
298 batman_if->packet_buff,
299 batman_if->packet_len,
300 batman_if, 1, send_time);
303 void schedule_forward_packet(struct orig_node *orig_node,
304 struct ethhdr *ethhdr,
305 struct batman_packet *batman_packet,
306 uint8_t directlink, int hna_buff_len,
307 struct batman_if *if_incoming)
309 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
310 unsigned char in_tq, in_ttl, tq_avg = 0;
311 unsigned long send_time;
313 if (batman_packet->ttl <= 1) {
314 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
318 in_tq = batman_packet->tq;
319 in_ttl = batman_packet->ttl;
321 batman_packet->ttl--;
322 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
324 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
325 * of our best tq value */
326 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
328 /* rebroadcast ogm of best ranking neighbor as is */
329 if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) {
330 batman_packet->tq = orig_node->router->tq_avg;
332 if (orig_node->router->last_ttl)
333 batman_packet->ttl = orig_node->router->last_ttl
337 tq_avg = orig_node->router->tq_avg;
340 /* apply hop penalty */
341 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
343 bat_dbg(DBG_BATMAN, bat_priv,
344 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
345 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
346 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
349 batman_packet->seqno = htonl(batman_packet->seqno);
351 /* switch of primaries first hop flag when forwarding */
352 batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
354 batman_packet->flags |= DIRECTLINK;
356 batman_packet->flags &= ~DIRECTLINK;
358 send_time = forward_send_time();
359 add_bat_packet_to_list(bat_priv,
360 (unsigned char *)batman_packet,
361 sizeof(struct batman_packet) + hna_buff_len,
362 if_incoming, 0, send_time);
365 static void forw_packet_free(struct forw_packet *forw_packet)
367 if (forw_packet->skb)
368 kfree_skb(forw_packet->skb);
372 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
373 struct forw_packet *forw_packet,
374 unsigned long send_time)
376 INIT_HLIST_NODE(&forw_packet->list);
378 /* add new packet to packet list */
379 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
380 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
381 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
383 /* start timer for this packet */
384 INIT_DELAYED_WORK(&forw_packet->delayed_work,
385 send_outstanding_bcast_packet);
386 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
390 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
391 /* add a broadcast packet to the queue and setup timers. broadcast packets
392 * are sent multiple times to increase probability for beeing received.
394 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
397 * The skb is not consumed, so the caller should make sure that the
399 int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
401 struct forw_packet *forw_packet;
402 struct bcast_packet *bcast_packet;
404 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
405 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
409 if (!bat_priv->primary_if)
412 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
417 skb = skb_copy(skb, GFP_ATOMIC);
421 /* as we have a copy now, it is safe to decrease the TTL */
422 bcast_packet = (struct bcast_packet *)skb->data;
425 skb_reset_mac_header(skb);
427 forw_packet->skb = skb;
428 forw_packet->if_incoming = bat_priv->primary_if;
430 /* how often did we send the bcast packet ? */
431 forw_packet->num_packets = 0;
433 _add_bcast_packet_to_list(bat_priv, forw_packet, 1);
439 atomic_inc(&bat_priv->bcast_queue_left);
441 return NETDEV_TX_BUSY;
444 static void send_outstanding_bcast_packet(struct work_struct *work)
446 struct batman_if *batman_if;
447 struct delayed_work *delayed_work =
448 container_of(work, struct delayed_work, work);
449 struct forw_packet *forw_packet =
450 container_of(delayed_work, struct forw_packet, delayed_work);
451 struct sk_buff *skb1;
452 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
453 struct bat_priv *bat_priv = netdev_priv(soft_iface);
455 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
456 hlist_del(&forw_packet->list);
457 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
459 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
462 /* rebroadcast packet */
464 list_for_each_entry_rcu(batman_if, &hardif_list, list) {
465 if (batman_if->soft_iface != soft_iface)
468 /* send a copy of the saved skb */
469 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
471 send_skb_packet(skb1, batman_if, broadcast_addr);
475 forw_packet->num_packets++;
477 /* if we still have some more bcasts to send */
478 if (forw_packet->num_packets < 3) {
479 _add_bcast_packet_to_list(bat_priv, forw_packet,
485 forw_packet_free(forw_packet);
486 atomic_inc(&bat_priv->bcast_queue_left);
489 void send_outstanding_bat_packet(struct work_struct *work)
491 struct delayed_work *delayed_work =
492 container_of(work, struct delayed_work, work);
493 struct forw_packet *forw_packet =
494 container_of(delayed_work, struct forw_packet, delayed_work);
495 struct bat_priv *bat_priv;
497 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
498 spin_lock_bh(&bat_priv->forw_bat_list_lock);
499 hlist_del(&forw_packet->list);
500 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
502 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
505 send_packet(forw_packet);
508 * we have to have at least one packet in the queue
509 * to determine the queues wake up time unless we are
512 if (forw_packet->own)
513 schedule_own_packet(forw_packet->if_incoming);
516 /* don't count own packet */
517 if (!forw_packet->own)
518 atomic_inc(&bat_priv->batman_queue_left);
520 forw_packet_free(forw_packet);
523 void purge_outstanding_packets(struct bat_priv *bat_priv,
524 struct batman_if *batman_if)
526 struct forw_packet *forw_packet;
527 struct hlist_node *tmp_node, *safe_tmp_node;
530 bat_dbg(DBG_BATMAN, bat_priv,
531 "purge_outstanding_packets(): %s\n",
532 batman_if->net_dev->name);
534 bat_dbg(DBG_BATMAN, bat_priv,
535 "purge_outstanding_packets()\n");
537 /* free bcast list */
538 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
539 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
540 &bat_priv->forw_bcast_list, list) {
543 * if purge_outstanding_packets() was called with an argmument
544 * we delete only packets belonging to the given interface
547 (forw_packet->if_incoming != batman_if))
550 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
553 * send_outstanding_bcast_packet() will lock the list to
554 * delete the item from the list
556 cancel_delayed_work_sync(&forw_packet->delayed_work);
557 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
559 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
561 /* free batman packet list */
562 spin_lock_bh(&bat_priv->forw_bat_list_lock);
563 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
564 &bat_priv->forw_bat_list, list) {
567 * if purge_outstanding_packets() was called with an argmument
568 * we delete only packets belonging to the given interface
571 (forw_packet->if_incoming != batman_if))
574 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
577 * send_outstanding_bat_packet() will lock the list to
578 * delete the item from the list
580 cancel_delayed_work_sync(&forw_packet->delayed_work);
581 spin_lock_bh(&bat_priv->forw_bat_list_lock);
583 spin_unlock_bh(&bat_priv->forw_bat_list_lock);