2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
20 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21 #define INIT_PATHS_SIZE_ORDER 2
23 /* Keep the mean chain length below this constant */
24 #define MEAN_CHAIN_LEN 2
26 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27 time_after(jiffies, mpath->exp_time) && \
28 !(mpath->flags & MESH_PATH_FIXED))
31 struct hlist_node list;
33 /* This indirection allows two different tables to point to the same
34 * mesh_path structure, useful when resizing
36 struct mesh_path *mpath;
39 static struct mesh_table *mesh_paths;
40 static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
42 int mesh_paths_generation;
44 /* This lock will have the grow table function as writer and add / delete nodes
45 * as readers. When reading the table (i.e. doing lookups) we are well protected
48 static DEFINE_RWLOCK(pathtbl_resize_lock);
51 static struct mesh_table *mesh_table_alloc(int size_order)
54 struct mesh_table *newtbl;
56 newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
60 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
61 (1 << size_order), GFP_KERNEL);
63 if (!newtbl->hash_buckets) {
68 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
69 (1 << size_order), GFP_KERNEL);
70 if (!newtbl->hashwlock) {
71 kfree(newtbl->hash_buckets);
76 newtbl->size_order = size_order;
77 newtbl->hash_mask = (1 << size_order) - 1;
78 atomic_set(&newtbl->entries, 0);
79 get_random_bytes(&newtbl->hash_rnd,
80 sizeof(newtbl->hash_rnd));
81 for (i = 0; i <= newtbl->hash_mask; i++)
82 spin_lock_init(&newtbl->hashwlock[i]);
87 static void __mesh_table_free(struct mesh_table *tbl)
89 kfree(tbl->hash_buckets);
90 kfree(tbl->hashwlock);
94 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
96 struct hlist_head *mesh_hash;
97 struct hlist_node *p, *q;
100 mesh_hash = tbl->hash_buckets;
101 for (i = 0; i <= tbl->hash_mask; i++) {
102 spin_lock_bh(&tbl->hashwlock[i]);
103 hlist_for_each_safe(p, q, &mesh_hash[i]) {
104 tbl->free_node(p, free_leafs);
105 atomic_dec(&tbl->entries);
107 spin_unlock_bh(&tbl->hashwlock[i]);
109 __mesh_table_free(tbl);
112 static int mesh_table_grow(struct mesh_table *oldtbl,
113 struct mesh_table *newtbl)
115 struct hlist_head *oldhash;
116 struct hlist_node *p, *q;
119 if (atomic_read(&oldtbl->entries)
120 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
123 newtbl->free_node = oldtbl->free_node;
124 newtbl->mean_chain_len = oldtbl->mean_chain_len;
125 newtbl->copy_node = oldtbl->copy_node;
126 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
128 oldhash = oldtbl->hash_buckets;
129 for (i = 0; i <= oldtbl->hash_mask; i++)
130 hlist_for_each(p, &oldhash[i])
131 if (oldtbl->copy_node(p, newtbl) < 0)
137 for (i = 0; i <= newtbl->hash_mask; i++) {
138 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
139 oldtbl->free_node(p, 0);
144 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
145 struct mesh_table *tbl)
147 /* Use last four bytes of hw addr and interface index as hash index */
148 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
155 * mesh_path_assign_nexthop - update mesh path next hop
157 * @mpath: mesh path to update
158 * @sta: next hop to assign
160 * Locking: mpath->state_lock must be held when calling this function
162 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
165 struct ieee80211_hdr *hdr;
166 struct sk_buff_head tmpq;
169 rcu_assign_pointer(mpath->next_hop, sta);
171 __skb_queue_head_init(&tmpq);
173 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
175 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
176 hdr = (struct ieee80211_hdr *) skb->data;
177 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
178 __skb_queue_tail(&tmpq, skb);
181 skb_queue_splice(&tmpq, &mpath->frame_queue);
182 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
187 * mesh_path_lookup - look up a path in the mesh path table
188 * @dst: hardware address (ETH_ALEN length) of destination
189 * @sdata: local subif
191 * Returns: pointer to the mesh path structure, or NULL if not found
193 * Locking: must be called within a read rcu section.
195 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
197 struct mesh_path *mpath;
198 struct hlist_node *n;
199 struct hlist_head *bucket;
200 struct mesh_table *tbl;
201 struct mpath_node *node;
203 tbl = rcu_dereference(mesh_paths);
205 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
206 hlist_for_each_entry_rcu(node, n, bucket, list) {
208 if (mpath->sdata == sdata &&
209 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
210 if (MPATH_EXPIRED(mpath)) {
211 spin_lock_bh(&mpath->state_lock);
212 if (MPATH_EXPIRED(mpath))
213 mpath->flags &= ~MESH_PATH_ACTIVE;
214 spin_unlock_bh(&mpath->state_lock);
222 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
224 struct mesh_path *mpath;
225 struct hlist_node *n;
226 struct hlist_head *bucket;
227 struct mesh_table *tbl;
228 struct mpath_node *node;
230 tbl = rcu_dereference(mpp_paths);
232 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
233 hlist_for_each_entry_rcu(node, n, bucket, list) {
235 if (mpath->sdata == sdata &&
236 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
237 if (MPATH_EXPIRED(mpath)) {
238 spin_lock_bh(&mpath->state_lock);
239 if (MPATH_EXPIRED(mpath))
240 mpath->flags &= ~MESH_PATH_ACTIVE;
241 spin_unlock_bh(&mpath->state_lock);
251 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
253 * @sdata: local subif, or NULL for all entries
255 * Returns: pointer to the mesh path structure, or NULL if not found.
257 * Locking: must be called within a read rcu section.
259 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
261 struct mpath_node *node;
262 struct hlist_node *p;
266 for_each_mesh_entry(mesh_paths, p, node, i) {
267 if (sdata && node->mpath->sdata != sdata)
270 if (MPATH_EXPIRED(node->mpath)) {
271 spin_lock_bh(&node->mpath->state_lock);
272 if (MPATH_EXPIRED(node->mpath))
273 node->mpath->flags &= ~MESH_PATH_ACTIVE;
274 spin_unlock_bh(&node->mpath->state_lock);
284 * mesh_path_add - allocate and add a new path to the mesh path table
285 * @addr: destination address of the path (ETH_ALEN length)
286 * @sdata: local subif
288 * Returns: 0 on success
290 * State: the initial state of the new path is set to 0
292 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
294 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
295 struct ieee80211_local *local = sdata->local;
296 struct mesh_path *mpath, *new_mpath;
297 struct mpath_node *node, *new_node;
298 struct hlist_head *bucket;
299 struct hlist_node *n;
304 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
305 /* never add ourselves as neighbours */
308 if (is_multicast_ether_addr(dst))
311 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
315 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
319 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
323 read_lock_bh(&pathtbl_resize_lock);
324 memcpy(new_mpath->dst, dst, ETH_ALEN);
325 new_mpath->sdata = sdata;
326 new_mpath->flags = 0;
327 skb_queue_head_init(&new_mpath->frame_queue);
328 new_node->mpath = new_mpath;
329 new_mpath->timer.data = (unsigned long) new_mpath;
330 new_mpath->timer.function = mesh_path_timer;
331 new_mpath->exp_time = jiffies;
332 spin_lock_init(&new_mpath->state_lock);
333 init_timer(&new_mpath->timer);
335 hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
336 bucket = &mesh_paths->hash_buckets[hash_idx];
338 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
341 hlist_for_each_entry(node, n, bucket, list) {
343 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
347 hlist_add_head_rcu(&new_node->list, bucket);
348 if (atomic_inc_return(&mesh_paths->entries) >=
349 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
352 mesh_paths_generation++;
354 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
355 read_unlock_bh(&pathtbl_resize_lock);
357 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
358 ieee80211_queue_work(&local->hw, &sdata->work);
363 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
364 read_unlock_bh(&pathtbl_resize_lock);
369 atomic_dec(&sdata->u.mesh.mpaths);
373 void mesh_mpath_table_grow(void)
375 struct mesh_table *oldtbl, *newtbl;
378 newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
381 write_lock_bh(&pathtbl_resize_lock);
383 if (mesh_table_grow(mesh_paths, newtbl) < 0) {
385 __mesh_table_free(newtbl);
386 write_unlock_bh(&pathtbl_resize_lock);
390 rcu_assign_pointer(mesh_paths, newtbl);
391 write_unlock_bh(&pathtbl_resize_lock);
394 mesh_table_free(oldtbl, false);
397 void mesh_mpp_table_grow(void)
399 struct mesh_table *oldtbl, *newtbl;
402 newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
405 write_lock_bh(&pathtbl_resize_lock);
407 if (mesh_table_grow(mpp_paths, newtbl) < 0) {
409 __mesh_table_free(newtbl);
410 write_unlock_bh(&pathtbl_resize_lock);
414 rcu_assign_pointer(mpp_paths, newtbl);
415 write_unlock_bh(&pathtbl_resize_lock);
418 mesh_table_free(oldtbl, false);
421 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
423 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
424 struct ieee80211_local *local = sdata->local;
425 struct mesh_path *mpath, *new_mpath;
426 struct mpath_node *node, *new_node;
427 struct hlist_head *bucket;
428 struct hlist_node *n;
433 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
434 /* never add ourselves as neighbours */
437 if (is_multicast_ether_addr(dst))
441 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
445 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
449 read_lock_bh(&pathtbl_resize_lock);
450 memcpy(new_mpath->dst, dst, ETH_ALEN);
451 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
452 new_mpath->sdata = sdata;
453 new_mpath->flags = 0;
454 skb_queue_head_init(&new_mpath->frame_queue);
455 new_node->mpath = new_mpath;
456 new_mpath->exp_time = jiffies;
457 spin_lock_init(&new_mpath->state_lock);
459 hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
460 bucket = &mpp_paths->hash_buckets[hash_idx];
462 spin_lock_bh(&mpp_paths->hashwlock[hash_idx]);
465 hlist_for_each_entry(node, n, bucket, list) {
467 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
471 hlist_add_head_rcu(&new_node->list, bucket);
472 if (atomic_inc_return(&mpp_paths->entries) >=
473 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
476 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
477 read_unlock_bh(&pathtbl_resize_lock);
479 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
480 ieee80211_queue_work(&local->hw, &sdata->work);
485 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
486 read_unlock_bh(&pathtbl_resize_lock);
496 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
498 * @sta: broken peer link
500 * This function must be called from the rate control algorithm if enough
501 * delivery errors suggest that a peer link is no longer usable.
503 void mesh_plink_broken(struct sta_info *sta)
505 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
506 struct mesh_path *mpath;
507 struct mpath_node *node;
508 struct hlist_node *p;
509 struct ieee80211_sub_if_data *sdata = sta->sdata;
513 for_each_mesh_entry(mesh_paths, p, node, i) {
515 spin_lock_bh(&mpath->state_lock);
516 if (mpath->next_hop == sta &&
517 mpath->flags & MESH_PATH_ACTIVE &&
518 !(mpath->flags & MESH_PATH_FIXED)) {
519 mpath->flags &= ~MESH_PATH_ACTIVE;
521 spin_unlock_bh(&mpath->state_lock);
522 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
523 mpath->dst, cpu_to_le32(mpath->sn),
524 cpu_to_le16(PERR_RCODE_DEST_UNREACH),
527 spin_unlock_bh(&mpath->state_lock);
533 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
535 * @sta - mesh peer to match
537 * RCU notes: this function is called when a mesh plink transitions from
538 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
539 * allows path creation. This will happen before the sta can be freed (because
540 * sta_info_destroy() calls this) so any reader in a rcu read block will be
541 * protected against the plink disappearing.
543 void mesh_path_flush_by_nexthop(struct sta_info *sta)
545 struct mesh_path *mpath;
546 struct mpath_node *node;
547 struct hlist_node *p;
550 for_each_mesh_entry(mesh_paths, p, node, i) {
552 if (mpath->next_hop == sta)
553 mesh_path_del(mpath->dst, mpath->sdata);
557 void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
559 struct mesh_path *mpath;
560 struct mpath_node *node;
561 struct hlist_node *p;
564 for_each_mesh_entry(mesh_paths, p, node, i) {
566 if (mpath->sdata == sdata)
567 mesh_path_del(mpath->dst, mpath->sdata);
571 static void mesh_path_node_reclaim(struct rcu_head *rp)
573 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
574 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
576 del_timer_sync(&node->mpath->timer);
577 atomic_dec(&sdata->u.mesh.mpaths);
583 * mesh_path_del - delete a mesh path from the table
585 * @addr: dst address (ETH_ALEN length)
586 * @sdata: local subif
588 * Returns: 0 if successful
590 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
592 struct mesh_path *mpath;
593 struct mpath_node *node;
594 struct hlist_head *bucket;
595 struct hlist_node *n;
599 read_lock_bh(&pathtbl_resize_lock);
600 hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
601 bucket = &mesh_paths->hash_buckets[hash_idx];
603 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
604 hlist_for_each_entry(node, n, bucket, list) {
606 if (mpath->sdata == sdata &&
607 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
608 spin_lock_bh(&mpath->state_lock);
609 mpath->flags |= MESH_PATH_RESOLVING;
610 hlist_del_rcu(&node->list);
611 call_rcu(&node->rcu, mesh_path_node_reclaim);
612 atomic_dec(&mesh_paths->entries);
613 spin_unlock_bh(&mpath->state_lock);
620 mesh_paths_generation++;
621 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
622 read_unlock_bh(&pathtbl_resize_lock);
627 * mesh_path_tx_pending - sends pending frames in a mesh path queue
629 * @mpath: mesh path to activate
631 * Locking: the state_lock of the mpath structure must NOT be held when calling
634 void mesh_path_tx_pending(struct mesh_path *mpath)
636 if (mpath->flags & MESH_PATH_ACTIVE)
637 ieee80211_add_pending_skbs(mpath->sdata->local,
638 &mpath->frame_queue);
642 * mesh_path_discard_frame - discard a frame whose path could not be resolved
644 * @skb: frame to discard
645 * @sdata: network subif the frame was to be sent through
647 * If the frame was being forwarded from another MP, a PERR frame will be sent
648 * to the precursor. The precursor's address (i.e. the previous hop) was saved
649 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
650 * the destination is successfully resolved.
652 * Locking: the function must me called within a rcu_read_lock region
654 void mesh_path_discard_frame(struct sk_buff *skb,
655 struct ieee80211_sub_if_data *sdata)
657 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
658 struct mesh_path *mpath;
661 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
666 mpath = mesh_path_lookup(da, sdata);
669 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
671 cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
675 sdata->u.mesh.mshstats.dropped_frames_no_route++;
679 * mesh_path_flush_pending - free the pending queue of a mesh path
681 * @mpath: mesh path whose queue has to be freed
683 * Locking: the function must me called within a rcu_read_lock region
685 void mesh_path_flush_pending(struct mesh_path *mpath)
689 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
690 (mpath->flags & MESH_PATH_ACTIVE))
691 mesh_path_discard_frame(skb, mpath->sdata);
695 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
697 * @mpath: the mesh path to modify
698 * @next_hop: the next hop to force
700 * Locking: this function must be called holding mpath->state_lock
702 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
704 spin_lock_bh(&mpath->state_lock);
705 mesh_path_assign_nexthop(mpath, next_hop);
708 mpath->hop_count = 0;
710 mpath->flags |= MESH_PATH_FIXED;
711 mesh_path_activate(mpath);
712 spin_unlock_bh(&mpath->state_lock);
713 mesh_path_tx_pending(mpath);
716 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
718 struct mesh_path *mpath;
719 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
727 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
729 struct mesh_path *mpath;
730 struct mpath_node *node, *new_node;
733 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
734 if (new_node == NULL)
737 node = hlist_entry(p, struct mpath_node, list);
739 new_node->mpath = mpath;
740 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
741 hlist_add_head(&new_node->list,
742 &newtbl->hash_buckets[hash_idx]);
746 int mesh_pathtbl_init(void)
748 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
751 mesh_paths->free_node = &mesh_path_node_free;
752 mesh_paths->copy_node = &mesh_path_node_copy;
753 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
755 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
757 mesh_table_free(mesh_paths, true);
760 mpp_paths->free_node = &mesh_path_node_free;
761 mpp_paths->copy_node = &mesh_path_node_copy;
762 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
767 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
769 struct mesh_path *mpath;
770 struct mpath_node *node;
771 struct hlist_node *p;
774 read_lock_bh(&pathtbl_resize_lock);
775 for_each_mesh_entry(mesh_paths, p, node, i) {
776 if (node->mpath->sdata != sdata)
779 spin_lock_bh(&mpath->state_lock);
780 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
781 (!(mpath->flags & MESH_PATH_FIXED)) &&
783 mpath->exp_time + MESH_PATH_EXPIRE)) {
784 spin_unlock_bh(&mpath->state_lock);
785 mesh_path_del(mpath->dst, mpath->sdata);
787 spin_unlock_bh(&mpath->state_lock);
789 read_unlock_bh(&pathtbl_resize_lock);
792 void mesh_pathtbl_unregister(void)
794 mesh_table_free(mesh_paths, true);
795 mesh_table_free(mpp_paths, true);