1 /*
2  * Copyright (c) 2008, 2009 open80211s Ltd.
3  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
18 #include "mesh.h"
19 
20 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21 #define INIT_PATHS_SIZE_ORDER	2
22 
23 /* Keep the mean chain length below this constant */
24 #define MEAN_CHAIN_LEN		2
25 
26 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27 				time_after(jiffies, mpath->exp_time) && \
28 				!(mpath->flags & MESH_PATH_FIXED))
29 
30 struct mpath_node {
31 	struct hlist_node list;
32 	struct rcu_head rcu;
33 	/* This indirection allows two different tables to point to the same
34 	 * mesh_path structure, useful when resizing
35 	 */
36 	struct mesh_path *mpath;
37 };
38 
39 static struct mesh_table *mesh_paths;
40 static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
41 
42 int mesh_paths_generation;
__mesh_table_free(struct mesh_table * tbl)43 static void __mesh_table_free(struct mesh_table *tbl)
44 {
45 	kfree(tbl->hash_buckets);
46 	kfree(tbl->hashwlock);
47 	kfree(tbl);
48 }
49 
mesh_table_free(struct mesh_table * tbl,bool free_leafs)50 void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
51 {
52 	struct hlist_head *mesh_hash;
53 	struct hlist_node *p, *q;
54 	int i;
55 
56 	mesh_hash = tbl->hash_buckets;
57 	for (i = 0; i <= tbl->hash_mask; i++) {
58 		spin_lock(&tbl->hashwlock[i]);
59 		hlist_for_each_safe(p, q, &mesh_hash[i]) {
60 			tbl->free_node(p, free_leafs);
61 			atomic_dec(&tbl->entries);
62 		}
63 		spin_unlock(&tbl->hashwlock[i]);
64 	}
65 	__mesh_table_free(tbl);
66 }
67 
mesh_table_grow(struct mesh_table * tbl)68 static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
69 {
70 	struct mesh_table *newtbl;
71 	struct hlist_head *oldhash;
72 	struct hlist_node *p, *q;
73 	int i;
74 
75 	if (atomic_read(&tbl->entries)
76 			< tbl->mean_chain_len * (tbl->hash_mask + 1))
77 		goto endgrow;
78 
79 	newtbl = mesh_table_alloc(tbl->size_order + 1);
80 	if (!newtbl)
81 		goto endgrow;
82 
83 	newtbl->free_node = tbl->free_node;
84 	newtbl->mean_chain_len = tbl->mean_chain_len;
85 	newtbl->copy_node = tbl->copy_node;
86 	atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
87 
88 	oldhash = tbl->hash_buckets;
89 	for (i = 0; i <= tbl->hash_mask; i++)
90 		hlist_for_each(p, &oldhash[i])
91 			if (tbl->copy_node(p, newtbl) < 0)
92 				goto errcopy;
93 
94 	return newtbl;
95 
96 errcopy:
97 	for (i = 0; i <= newtbl->hash_mask; i++) {
98 		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
99 			tbl->free_node(p, 0);
100 	}
101 	__mesh_table_free(newtbl);
102 endgrow:
103 	return NULL;
104 }
105 
106 
107 /* This lock will have the grow table function as writer and add / delete nodes
108  * as readers. When reading the table (i.e. doing lookups) we are well protected
109  * by RCU
110  */
111 static DEFINE_RWLOCK(pathtbl_resize_lock);
112 
113 /**
114  *
115  * mesh_path_assign_nexthop - update mesh path next hop
116  *
117  * @mpath: mesh path to update
118  * @sta: next hop to assign
119  *
120  * Locking: mpath->state_lock must be held when calling this function
121  */
mesh_path_assign_nexthop(struct mesh_path * mpath,struct sta_info * sta)122 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
123 {
124 	struct sk_buff *skb;
125 	struct ieee80211_hdr *hdr;
126 	struct sk_buff_head tmpq;
127 	unsigned long flags;
128 
129 	rcu_assign_pointer(mpath->next_hop, sta);
130 
131 	__skb_queue_head_init(&tmpq);
132 
133 	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
134 
135 	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
136 		hdr = (struct ieee80211_hdr *) skb->data;
137 		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
138 		__skb_queue_tail(&tmpq, skb);
139 	}
140 
141 	skb_queue_splice(&tmpq, &mpath->frame_queue);
142 	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
143 }
144 
145 
146 /**
147  * mesh_path_lookup - look up a path in the mesh path table
148  * @dst: hardware address (ETH_ALEN length) of destination
149  * @sdata: local subif
150  *
151  * Returns: pointer to the mesh path structure, or NULL if not found
152  *
153  * Locking: must be called within a read rcu section.
154  */
mesh_path_lookup(u8 * dst,struct ieee80211_sub_if_data * sdata)155 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
156 {
157 	struct mesh_path *mpath;
158 	struct hlist_node *n;
159 	struct hlist_head *bucket;
160 	struct mesh_table *tbl;
161 	struct mpath_node *node;
162 
163 	tbl = rcu_dereference(mesh_paths);
164 
165 	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
166 	hlist_for_each_entry_rcu(node, n, bucket, list) {
167 		mpath = node->mpath;
168 		if (mpath->sdata == sdata &&
169 				memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
170 			if (MPATH_EXPIRED(mpath)) {
171 				spin_lock_bh(&mpath->state_lock);
172 				if (MPATH_EXPIRED(mpath))
173 					mpath->flags &= ~MESH_PATH_ACTIVE;
174 				spin_unlock_bh(&mpath->state_lock);
175 			}
176 			return mpath;
177 		}
178 	}
179 	return NULL;
180 }
181 
mpp_path_lookup(u8 * dst,struct ieee80211_sub_if_data * sdata)182 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
183 {
184 	struct mesh_path *mpath;
185 	struct hlist_node *n;
186 	struct hlist_head *bucket;
187 	struct mesh_table *tbl;
188 	struct mpath_node *node;
189 
190 	tbl = rcu_dereference(mpp_paths);
191 
192 	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
193 	hlist_for_each_entry_rcu(node, n, bucket, list) {
194 		mpath = node->mpath;
195 		if (mpath->sdata == sdata &&
196 		    memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
197 			if (MPATH_EXPIRED(mpath)) {
198 				spin_lock_bh(&mpath->state_lock);
199 				if (MPATH_EXPIRED(mpath))
200 					mpath->flags &= ~MESH_PATH_ACTIVE;
201 				spin_unlock_bh(&mpath->state_lock);
202 			}
203 			return mpath;
204 		}
205 	}
206 	return NULL;
207 }
208 
209 
210 /**
211  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
212  * @idx: index
213  * @sdata: local subif, or NULL for all entries
214  *
215  * Returns: pointer to the mesh path structure, or NULL if not found.
216  *
217  * Locking: must be called within a read rcu section.
218  */
mesh_path_lookup_by_idx(int idx,struct ieee80211_sub_if_data * sdata)219 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
220 {
221 	struct mpath_node *node;
222 	struct hlist_node *p;
223 	int i;
224 	int j = 0;
225 
226 	for_each_mesh_entry(mesh_paths, p, node, i) {
227 		if (sdata && node->mpath->sdata != sdata)
228 			continue;
229 		if (j++ == idx) {
230 			if (MPATH_EXPIRED(node->mpath)) {
231 				spin_lock_bh(&node->mpath->state_lock);
232 				if (MPATH_EXPIRED(node->mpath))
233 					node->mpath->flags &= ~MESH_PATH_ACTIVE;
234 				spin_unlock_bh(&node->mpath->state_lock);
235 			}
236 			return node->mpath;
237 		}
238 	}
239 
240 	return NULL;
241 }
242 
243 /**
244  * mesh_path_add - allocate and add a new path to the mesh path table
245  * @addr: destination address of the path (ETH_ALEN length)
246  * @sdata: local subif
247  *
248  * Returns: 0 on success
249  *
250  * State: the initial state of the new path is set to 0
251  */
mesh_path_add(u8 * dst,struct ieee80211_sub_if_data * sdata)252 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
253 {
254 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
255 	struct ieee80211_local *local = sdata->local;
256 	struct mesh_path *mpath, *new_mpath;
257 	struct mpath_node *node, *new_node;
258 	struct hlist_head *bucket;
259 	struct hlist_node *n;
260 	int grow = 0;
261 	int err = 0;
262 	u32 hash_idx;
263 
264 	if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
265 		/* never add ourselves as neighbours */
266 		return -ENOTSUPP;
267 
268 	if (is_multicast_ether_addr(dst))
269 		return -ENOTSUPP;
270 
271 	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
272 		return -ENOSPC;
273 
274 	err = -ENOMEM;
275 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
276 	if (!new_mpath)
277 		goto err_path_alloc;
278 
279 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
280 	if (!new_node)
281 		goto err_node_alloc;
282 
283 	read_lock(&pathtbl_resize_lock);
284 	memcpy(new_mpath->dst, dst, ETH_ALEN);
285 	new_mpath->sdata = sdata;
286 	new_mpath->flags = 0;
287 	skb_queue_head_init(&new_mpath->frame_queue);
288 	new_node->mpath = new_mpath;
289 	new_mpath->timer.data = (unsigned long) new_mpath;
290 	new_mpath->timer.function = mesh_path_timer;
291 	new_mpath->exp_time = jiffies;
292 	spin_lock_init(&new_mpath->state_lock);
293 	init_timer(&new_mpath->timer);
294 
295 	hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
296 	bucket = &mesh_paths->hash_buckets[hash_idx];
297 
298 	spin_lock(&mesh_paths->hashwlock[hash_idx]);
299 
300 	err = -EEXIST;
301 	hlist_for_each_entry(node, n, bucket, list) {
302 		mpath = node->mpath;
303 		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
304 			goto err_exists;
305 	}
306 
307 	hlist_add_head_rcu(&new_node->list, bucket);
308 	if (atomic_inc_return(&mesh_paths->entries) >=
309 		mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
310 		grow = 1;
311 
312 	mesh_paths_generation++;
313 
314 	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
315 	read_unlock(&pathtbl_resize_lock);
316 	if (grow) {
317 		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
318 		ieee80211_queue_work(&local->hw, &sdata->work);
319 	}
320 	return 0;
321 
322 err_exists:
323 	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
324 	read_unlock(&pathtbl_resize_lock);
325 	kfree(new_node);
326 err_node_alloc:
327 	kfree(new_mpath);
328 err_path_alloc:
329 	atomic_dec(&sdata->u.mesh.mpaths);
330 	return err;
331 }
332 
mesh_mpath_table_grow(void)333 void mesh_mpath_table_grow(void)
334 {
335 	struct mesh_table *oldtbl, *newtbl;
336 
337 	write_lock(&pathtbl_resize_lock);
338 	oldtbl = mesh_paths;
339 	newtbl = mesh_table_grow(mesh_paths);
340 	if (!newtbl) {
341 		write_unlock(&pathtbl_resize_lock);
342 		return;
343 	}
344 	rcu_assign_pointer(mesh_paths, newtbl);
345 	write_unlock(&pathtbl_resize_lock);
346 
347 	synchronize_rcu();
348 	mesh_table_free(oldtbl, false);
349 }
350 
mesh_mpp_table_grow(void)351 void mesh_mpp_table_grow(void)
352 {
353 	struct mesh_table *oldtbl, *newtbl;
354 
355 	write_lock(&pathtbl_resize_lock);
356 	oldtbl = mpp_paths;
357 	newtbl = mesh_table_grow(mpp_paths);
358 	if (!newtbl) {
359 		write_unlock(&pathtbl_resize_lock);
360 		return;
361 	}
362 	rcu_assign_pointer(mpp_paths, newtbl);
363 	write_unlock(&pathtbl_resize_lock);
364 
365 	synchronize_rcu();
366 	mesh_table_free(oldtbl, false);
367 }
368 
mpp_path_add(u8 * dst,u8 * mpp,struct ieee80211_sub_if_data * sdata)369 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
370 {
371 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
372 	struct ieee80211_local *local = sdata->local;
373 	struct mesh_path *mpath, *new_mpath;
374 	struct mpath_node *node, *new_node;
375 	struct hlist_head *bucket;
376 	struct hlist_node *n;
377 	int grow = 0;
378 	int err = 0;
379 	u32 hash_idx;
380 
381 	if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
382 		/* never add ourselves as neighbours */
383 		return -ENOTSUPP;
384 
385 	if (is_multicast_ether_addr(dst))
386 		return -ENOTSUPP;
387 
388 	err = -ENOMEM;
389 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
390 	if (!new_mpath)
391 		goto err_path_alloc;
392 
393 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
394 	if (!new_node)
395 		goto err_node_alloc;
396 
397 	read_lock(&pathtbl_resize_lock);
398 	memcpy(new_mpath->dst, dst, ETH_ALEN);
399 	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
400 	new_mpath->sdata = sdata;
401 	new_mpath->flags = 0;
402 	skb_queue_head_init(&new_mpath->frame_queue);
403 	new_node->mpath = new_mpath;
404 	new_mpath->exp_time = jiffies;
405 	spin_lock_init(&new_mpath->state_lock);
406 
407 	hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
408 	bucket = &mpp_paths->hash_buckets[hash_idx];
409 
410 	spin_lock(&mpp_paths->hashwlock[hash_idx]);
411 
412 	err = -EEXIST;
413 	hlist_for_each_entry(node, n, bucket, list) {
414 		mpath = node->mpath;
415 		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
416 			goto err_exists;
417 	}
418 
419 	hlist_add_head_rcu(&new_node->list, bucket);
420 	if (atomic_inc_return(&mpp_paths->entries) >=
421 		mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
422 		grow = 1;
423 
424 	spin_unlock(&mpp_paths->hashwlock[hash_idx]);
425 	read_unlock(&pathtbl_resize_lock);
426 	if (grow) {
427 		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
428 		ieee80211_queue_work(&local->hw, &sdata->work);
429 	}
430 	return 0;
431 
432 err_exists:
433 	spin_unlock(&mpp_paths->hashwlock[hash_idx]);
434 	read_unlock(&pathtbl_resize_lock);
435 	kfree(new_node);
436 err_node_alloc:
437 	kfree(new_mpath);
438 err_path_alloc:
439 	return err;
440 }
441 
442 
443 /**
444  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
445  *
446  * @sta: broken peer link
447  *
448  * This function must be called from the rate control algorithm if enough
449  * delivery errors suggest that a peer link is no longer usable.
450  */
mesh_plink_broken(struct sta_info * sta)451 void mesh_plink_broken(struct sta_info *sta)
452 {
453 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
454 	struct mesh_path *mpath;
455 	struct mpath_node *node;
456 	struct hlist_node *p;
457 	struct ieee80211_sub_if_data *sdata = sta->sdata;
458 	int i;
459 
460 	rcu_read_lock();
461 	for_each_mesh_entry(mesh_paths, p, node, i) {
462 		mpath = node->mpath;
463 		spin_lock_bh(&mpath->state_lock);
464 		if (mpath->next_hop == sta &&
465 		    mpath->flags & MESH_PATH_ACTIVE &&
466 		    !(mpath->flags & MESH_PATH_FIXED)) {
467 			mpath->flags &= ~MESH_PATH_ACTIVE;
468 			++mpath->sn;
469 			spin_unlock_bh(&mpath->state_lock);
470 			mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
471 					mpath->dst, cpu_to_le32(mpath->sn),
472 					cpu_to_le16(PERR_RCODE_DEST_UNREACH),
473 					bcast, sdata);
474 		} else
475 		spin_unlock_bh(&mpath->state_lock);
476 	}
477 	rcu_read_unlock();
478 }
479 
480 /**
481  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
482  *
483  * @sta - mesh peer to match
484  *
485  * RCU notes: this function is called when a mesh plink transitions from
486  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
487  * allows path creation. This will happen before the sta can be freed (because
488  * sta_info_destroy() calls this) so any reader in a rcu read block will be
489  * protected against the plink disappearing.
490  */
mesh_path_flush_by_nexthop(struct sta_info * sta)491 void mesh_path_flush_by_nexthop(struct sta_info *sta)
492 {
493 	struct mesh_path *mpath;
494 	struct mpath_node *node;
495 	struct hlist_node *p;
496 	int i;
497 
498 	for_each_mesh_entry(mesh_paths, p, node, i) {
499 		mpath = node->mpath;
500 		if (mpath->next_hop == sta)
501 			mesh_path_del(mpath->dst, mpath->sdata);
502 	}
503 }
504 
mesh_path_flush(struct ieee80211_sub_if_data * sdata)505 void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
506 {
507 	struct mesh_path *mpath;
508 	struct mpath_node *node;
509 	struct hlist_node *p;
510 	int i;
511 
512 	for_each_mesh_entry(mesh_paths, p, node, i) {
513 		mpath = node->mpath;
514 		if (mpath->sdata == sdata)
515 			mesh_path_del(mpath->dst, mpath->sdata);
516 	}
517 }
518 
mesh_path_node_reclaim(struct rcu_head * rp)519 static void mesh_path_node_reclaim(struct rcu_head *rp)
520 {
521 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
522 	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
523 
524 	del_timer_sync(&node->mpath->timer);
525 	atomic_dec(&sdata->u.mesh.mpaths);
526 	kfree(node->mpath);
527 	kfree(node);
528 }
529 
530 /**
531  * mesh_path_del - delete a mesh path from the table
532  *
533  * @addr: dst address (ETH_ALEN length)
534  * @sdata: local subif
535  *
536  * Returns: 0 if successful
537  */
mesh_path_del(u8 * addr,struct ieee80211_sub_if_data * sdata)538 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
539 {
540 	struct mesh_path *mpath;
541 	struct mpath_node *node;
542 	struct hlist_head *bucket;
543 	struct hlist_node *n;
544 	int hash_idx;
545 	int err = 0;
546 
547 	read_lock(&pathtbl_resize_lock);
548 	hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
549 	bucket = &mesh_paths->hash_buckets[hash_idx];
550 
551 	spin_lock(&mesh_paths->hashwlock[hash_idx]);
552 	hlist_for_each_entry(node, n, bucket, list) {
553 		mpath = node->mpath;
554 		if (mpath->sdata == sdata &&
555 				memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
556 			spin_lock_bh(&mpath->state_lock);
557 			mpath->flags |= MESH_PATH_RESOLVING;
558 			hlist_del_rcu(&node->list);
559 			call_rcu(&node->rcu, mesh_path_node_reclaim);
560 			atomic_dec(&mesh_paths->entries);
561 			spin_unlock_bh(&mpath->state_lock);
562 			goto enddel;
563 		}
564 	}
565 
566 	err = -ENXIO;
567 enddel:
568 	mesh_paths_generation++;
569 	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
570 	read_unlock(&pathtbl_resize_lock);
571 	return err;
572 }
573 
574 /**
575  * mesh_path_tx_pending - sends pending frames in a mesh path queue
576  *
577  * @mpath: mesh path to activate
578  *
579  * Locking: the state_lock of the mpath structure must NOT be held when calling
580  * this function.
581  */
mesh_path_tx_pending(struct mesh_path * mpath)582 void mesh_path_tx_pending(struct mesh_path *mpath)
583 {
584 	if (mpath->flags & MESH_PATH_ACTIVE)
585 		ieee80211_add_pending_skbs(mpath->sdata->local,
586 				&mpath->frame_queue);
587 }
588 
589 /**
590  * mesh_path_discard_frame - discard a frame whose path could not be resolved
591  *
592  * @skb: frame to discard
593  * @sdata: network subif the frame was to be sent through
594  *
595  * If the frame was being forwarded from another MP, a PERR frame will be sent
596  * to the precursor.  The precursor's address (i.e. the previous hop) was saved
597  * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
598  * the destination is successfully resolved.
599  *
600  * Locking: the function must me called within a rcu_read_lock region
601  */
mesh_path_discard_frame(struct sk_buff * skb,struct ieee80211_sub_if_data * sdata)602 void mesh_path_discard_frame(struct sk_buff *skb,
603 			     struct ieee80211_sub_if_data *sdata)
604 {
605 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
606 	struct mesh_path *mpath;
607 	u32 sn = 0;
608 
609 	if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
610 		u8 *ra, *da;
611 
612 		da = hdr->addr3;
613 		ra = hdr->addr1;
614 		mpath = mesh_path_lookup(da, sdata);
615 		if (mpath)
616 			sn = ++mpath->sn;
617 		mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
618 				   cpu_to_le32(sn),
619 				   cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
620 	}
621 
622 	kfree_skb(skb);
623 	sdata->u.mesh.mshstats.dropped_frames_no_route++;
624 }
625 
626 /**
627  * mesh_path_flush_pending - free the pending queue of a mesh path
628  *
629  * @mpath: mesh path whose queue has to be freed
630  *
631  * Locking: the function must me called within a rcu_read_lock region
632  */
mesh_path_flush_pending(struct mesh_path * mpath)633 void mesh_path_flush_pending(struct mesh_path *mpath)
634 {
635 	struct sk_buff *skb;
636 
637 	while ((skb = skb_dequeue(&mpath->frame_queue)) &&
638 			(mpath->flags & MESH_PATH_ACTIVE))
639 		mesh_path_discard_frame(skb, mpath->sdata);
640 }
641 
642 /**
643  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
644  *
645  * @mpath: the mesh path to modify
646  * @next_hop: the next hop to force
647  *
648  * Locking: this function must be called holding mpath->state_lock
649  */
mesh_path_fix_nexthop(struct mesh_path * mpath,struct sta_info * next_hop)650 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
651 {
652 	spin_lock_bh(&mpath->state_lock);
653 	mesh_path_assign_nexthop(mpath, next_hop);
654 	mpath->sn = 0xffff;
655 	mpath->metric = 0;
656 	mpath->hop_count = 0;
657 	mpath->exp_time = 0;
658 	mpath->flags |= MESH_PATH_FIXED;
659 	mesh_path_activate(mpath);
660 	spin_unlock_bh(&mpath->state_lock);
661 	mesh_path_tx_pending(mpath);
662 }
663 
mesh_path_node_free(struct hlist_node * p,bool free_leafs)664 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
665 {
666 	struct mesh_path *mpath;
667 	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
668 	mpath = node->mpath;
669 	hlist_del_rcu(p);
670 	if (free_leafs)
671 		kfree(mpath);
672 	kfree(node);
673 }
674 
mesh_path_node_copy(struct hlist_node * p,struct mesh_table * newtbl)675 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
676 {
677 	struct mesh_path *mpath;
678 	struct mpath_node *node, *new_node;
679 	u32 hash_idx;
680 
681 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
682 	if (new_node == NULL)
683 		return -ENOMEM;
684 
685 	node = hlist_entry(p, struct mpath_node, list);
686 	mpath = node->mpath;
687 	new_node->mpath = mpath;
688 	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
689 	hlist_add_head(&new_node->list,
690 			&newtbl->hash_buckets[hash_idx]);
691 	return 0;
692 }
693 
mesh_pathtbl_init(void)694 int mesh_pathtbl_init(void)
695 {
696 	mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
697 	if (!mesh_paths)
698 		return -ENOMEM;
699 	mesh_paths->free_node = &mesh_path_node_free;
700 	mesh_paths->copy_node = &mesh_path_node_copy;
701 	mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
702 
703 	mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
704 	if (!mpp_paths) {
705 		mesh_table_free(mesh_paths, true);
706 		return -ENOMEM;
707 	}
708 	mpp_paths->free_node = &mesh_path_node_free;
709 	mpp_paths->copy_node = &mesh_path_node_copy;
710 	mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
711 
712 	return 0;
713 }
714 
mesh_path_expire(struct ieee80211_sub_if_data * sdata)715 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
716 {
717 	struct mesh_path *mpath;
718 	struct mpath_node *node;
719 	struct hlist_node *p;
720 	int i;
721 
722 	read_lock(&pathtbl_resize_lock);
723 	for_each_mesh_entry(mesh_paths, p, node, i) {
724 		if (node->mpath->sdata != sdata)
725 			continue;
726 		mpath = node->mpath;
727 		spin_lock_bh(&mpath->state_lock);
728 		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
729 		    (!(mpath->flags & MESH_PATH_FIXED)) &&
730 			time_after(jiffies,
731 			 mpath->exp_time + MESH_PATH_EXPIRE)) {
732 			spin_unlock_bh(&mpath->state_lock);
733 			mesh_path_del(mpath->dst, mpath->sdata);
734 		} else
735 			spin_unlock_bh(&mpath->state_lock);
736 	}
737 	read_unlock(&pathtbl_resize_lock);
738 }
739 
mesh_pathtbl_unregister(void)740 void mesh_pathtbl_unregister(void)
741 {
742 	mesh_table_free(mesh_paths, true);
743 	mesh_table_free(mpp_paths, true);
744 }
745