1 /*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 /* increase the reference counter for this originator */
23
24 #include "main.h"
25 #include "originator.h"
26 #include "hash.h"
27 #include "translation-table.h"
28 #include "routing.h"
29 #include "gateway_client.h"
30 #include "hard-interface.h"
31 #include "unicast.h"
32 #include "soft-interface.h"
33
34 static void purge_orig(struct work_struct *work);
35
start_purge_timer(struct bat_priv * bat_priv)36 static void start_purge_timer(struct bat_priv *bat_priv)
37 {
38 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
39 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
40 }
41
originator_init(struct bat_priv * bat_priv)42 int originator_init(struct bat_priv *bat_priv)
43 {
44 if (bat_priv->orig_hash)
45 return 1;
46
47 bat_priv->orig_hash = hash_new(1024);
48
49 if (!bat_priv->orig_hash)
50 goto err;
51
52 start_purge_timer(bat_priv);
53 return 1;
54
55 err:
56 return 0;
57 }
58
neigh_node_free_rcu(struct rcu_head * rcu)59 static void neigh_node_free_rcu(struct rcu_head *rcu)
60 {
61 struct neigh_node *neigh_node;
62
63 neigh_node = container_of(rcu, struct neigh_node, rcu);
64 kfree(neigh_node);
65 }
66
neigh_node_free_ref(struct neigh_node * neigh_node)67 void neigh_node_free_ref(struct neigh_node *neigh_node)
68 {
69 if (atomic_dec_and_test(&neigh_node->refcount))
70 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
71 }
72
create_neighbor(struct orig_node * orig_node,struct orig_node * orig_neigh_node,uint8_t * neigh,struct hard_iface * if_incoming)73 struct neigh_node *create_neighbor(struct orig_node *orig_node,
74 struct orig_node *orig_neigh_node,
75 uint8_t *neigh,
76 struct hard_iface *if_incoming)
77 {
78 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
79 struct neigh_node *neigh_node;
80
81 bat_dbg(DBG_BATMAN, bat_priv,
82 "Creating new last-hop neighbor of originator\n");
83
84 neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
85 if (!neigh_node)
86 return NULL;
87
88 INIT_HLIST_NODE(&neigh_node->list);
89 INIT_LIST_HEAD(&neigh_node->bonding_list);
90
91 memcpy(neigh_node->addr, neigh, ETH_ALEN);
92 neigh_node->orig_node = orig_neigh_node;
93 neigh_node->if_incoming = if_incoming;
94
95 /* extra reference for return */
96 atomic_set(&neigh_node->refcount, 2);
97
98 spin_lock_bh(&orig_node->neigh_list_lock);
99 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
100 spin_unlock_bh(&orig_node->neigh_list_lock);
101 return neigh_node;
102 }
103
orig_node_free_rcu(struct rcu_head * rcu)104 static void orig_node_free_rcu(struct rcu_head *rcu)
105 {
106 struct hlist_node *node, *node_tmp;
107 struct neigh_node *neigh_node, *tmp_neigh_node;
108 struct orig_node *orig_node;
109
110 orig_node = container_of(rcu, struct orig_node, rcu);
111
112 spin_lock_bh(&orig_node->neigh_list_lock);
113
114 /* for all bonding members ... */
115 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
116 &orig_node->bond_list, bonding_list) {
117 list_del_rcu(&neigh_node->bonding_list);
118 neigh_node_free_ref(neigh_node);
119 }
120
121 /* for all neighbors towards this originator ... */
122 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
123 &orig_node->neigh_list, list) {
124 hlist_del_rcu(&neigh_node->list);
125 neigh_node_free_ref(neigh_node);
126 }
127
128 spin_unlock_bh(&orig_node->neigh_list_lock);
129
130 frag_list_free(&orig_node->frag_list);
131 hna_global_del_orig(orig_node->bat_priv, orig_node,
132 "originator timed out");
133
134 kfree(orig_node->bcast_own);
135 kfree(orig_node->bcast_own_sum);
136 kfree(orig_node);
137 }
138
orig_node_free_ref(struct orig_node * orig_node)139 void orig_node_free_ref(struct orig_node *orig_node)
140 {
141 if (atomic_dec_and_test(&orig_node->refcount))
142 call_rcu(&orig_node->rcu, orig_node_free_rcu);
143 }
144
originator_free(struct bat_priv * bat_priv)145 void originator_free(struct bat_priv *bat_priv)
146 {
147 struct hashtable_t *hash = bat_priv->orig_hash;
148 struct hlist_node *node, *node_tmp;
149 struct hlist_head *head;
150 spinlock_t *list_lock; /* spinlock to protect write access */
151 struct orig_node *orig_node;
152 int i;
153
154 if (!hash)
155 return;
156
157 cancel_delayed_work_sync(&bat_priv->orig_work);
158
159 bat_priv->orig_hash = NULL;
160
161 for (i = 0; i < hash->size; i++) {
162 head = &hash->table[i];
163 list_lock = &hash->list_locks[i];
164
165 spin_lock_bh(list_lock);
166 hlist_for_each_entry_safe(orig_node, node, node_tmp,
167 head, hash_entry) {
168
169 hlist_del_rcu(node);
170 orig_node_free_ref(orig_node);
171 }
172 spin_unlock_bh(list_lock);
173 }
174
175 hash_destroy(hash);
176 }
177
178 /* this function finds or creates an originator entry for the given
179 * address if it does not exits */
get_orig_node(struct bat_priv * bat_priv,uint8_t * addr)180 struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
181 {
182 struct orig_node *orig_node;
183 int size;
184 int hash_added;
185
186 orig_node = orig_hash_find(bat_priv, addr);
187 if (orig_node)
188 return orig_node;
189
190 bat_dbg(DBG_BATMAN, bat_priv,
191 "Creating new originator: %pM\n", addr);
192
193 orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
194 if (!orig_node)
195 return NULL;
196
197 INIT_HLIST_HEAD(&orig_node->neigh_list);
198 INIT_LIST_HEAD(&orig_node->bond_list);
199 spin_lock_init(&orig_node->ogm_cnt_lock);
200 spin_lock_init(&orig_node->bcast_seqno_lock);
201 spin_lock_init(&orig_node->neigh_list_lock);
202
203 /* extra reference for return */
204 atomic_set(&orig_node->refcount, 2);
205
206 orig_node->bat_priv = bat_priv;
207 memcpy(orig_node->orig, addr, ETH_ALEN);
208 orig_node->router = NULL;
209 orig_node->hna_buff = NULL;
210 orig_node->bcast_seqno_reset = jiffies - 1
211 - msecs_to_jiffies(RESET_PROTECTION_MS);
212 orig_node->batman_seqno_reset = jiffies - 1
213 - msecs_to_jiffies(RESET_PROTECTION_MS);
214
215 atomic_set(&orig_node->bond_candidates, 0);
216
217 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
218
219 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
220 if (!orig_node->bcast_own)
221 goto free_orig_node;
222
223 size = bat_priv->num_ifaces * sizeof(uint8_t);
224 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
225
226 INIT_LIST_HEAD(&orig_node->frag_list);
227 orig_node->last_frag_packet = 0;
228
229 if (!orig_node->bcast_own_sum)
230 goto free_bcast_own;
231
232 hash_added = hash_add(bat_priv->orig_hash, compare_orig,
233 choose_orig, orig_node, &orig_node->hash_entry);
234 if (hash_added < 0)
235 goto free_bcast_own_sum;
236
237 return orig_node;
238 free_bcast_own_sum:
239 kfree(orig_node->bcast_own_sum);
240 free_bcast_own:
241 kfree(orig_node->bcast_own);
242 free_orig_node:
243 kfree(orig_node);
244 return NULL;
245 }
246
purge_orig_neighbors(struct bat_priv * bat_priv,struct orig_node * orig_node,struct neigh_node ** best_neigh_node)247 static bool purge_orig_neighbors(struct bat_priv *bat_priv,
248 struct orig_node *orig_node,
249 struct neigh_node **best_neigh_node)
250 {
251 struct hlist_node *node, *node_tmp;
252 struct neigh_node *neigh_node;
253 bool neigh_purged = false;
254
255 *best_neigh_node = NULL;
256
257 spin_lock_bh(&orig_node->neigh_list_lock);
258
259 /* for all neighbors towards this originator ... */
260 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
261 &orig_node->neigh_list, list) {
262
263 if ((time_after(jiffies,
264 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
265 (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
266 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
267 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
268
269 if ((neigh_node->if_incoming->if_status ==
270 IF_INACTIVE) ||
271 (neigh_node->if_incoming->if_status ==
272 IF_NOT_IN_USE) ||
273 (neigh_node->if_incoming->if_status ==
274 IF_TO_BE_REMOVED))
275 bat_dbg(DBG_BATMAN, bat_priv,
276 "neighbor purge: originator %pM, "
277 "neighbor: %pM, iface: %s\n",
278 orig_node->orig, neigh_node->addr,
279 neigh_node->if_incoming->net_dev->name);
280 else
281 bat_dbg(DBG_BATMAN, bat_priv,
282 "neighbor timeout: originator %pM, "
283 "neighbor: %pM, last_valid: %lu\n",
284 orig_node->orig, neigh_node->addr,
285 (neigh_node->last_valid / HZ));
286
287 neigh_purged = true;
288
289 hlist_del_rcu(&neigh_node->list);
290 bonding_candidate_del(orig_node, neigh_node);
291 neigh_node_free_ref(neigh_node);
292 } else {
293 if ((!*best_neigh_node) ||
294 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
295 *best_neigh_node = neigh_node;
296 }
297 }
298
299 spin_unlock_bh(&orig_node->neigh_list_lock);
300 return neigh_purged;
301 }
302
purge_orig_node(struct bat_priv * bat_priv,struct orig_node * orig_node)303 static bool purge_orig_node(struct bat_priv *bat_priv,
304 struct orig_node *orig_node)
305 {
306 struct neigh_node *best_neigh_node;
307
308 if (time_after(jiffies,
309 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
310
311 bat_dbg(DBG_BATMAN, bat_priv,
312 "Originator timeout: originator %pM, last_valid %lu\n",
313 orig_node->orig, (orig_node->last_valid / HZ));
314 return true;
315 } else {
316 if (purge_orig_neighbors(bat_priv, orig_node,
317 &best_neigh_node)) {
318 update_routes(bat_priv, orig_node,
319 best_neigh_node,
320 orig_node->hna_buff,
321 orig_node->hna_buff_len);
322 }
323 }
324
325 return false;
326 }
327
_purge_orig(struct bat_priv * bat_priv)328 static void _purge_orig(struct bat_priv *bat_priv)
329 {
330 struct hashtable_t *hash = bat_priv->orig_hash;
331 struct hlist_node *node, *node_tmp;
332 struct hlist_head *head;
333 spinlock_t *list_lock; /* spinlock to protect write access */
334 struct orig_node *orig_node;
335 int i;
336
337 if (!hash)
338 return;
339
340 /* for all origins... */
341 for (i = 0; i < hash->size; i++) {
342 head = &hash->table[i];
343 list_lock = &hash->list_locks[i];
344
345 spin_lock_bh(list_lock);
346 hlist_for_each_entry_safe(orig_node, node, node_tmp,
347 head, hash_entry) {
348 if (purge_orig_node(bat_priv, orig_node)) {
349 if (orig_node->gw_flags)
350 gw_node_delete(bat_priv, orig_node);
351 hlist_del_rcu(node);
352 orig_node_free_ref(orig_node);
353 continue;
354 }
355
356 if (time_after(jiffies, orig_node->last_frag_packet +
357 msecs_to_jiffies(FRAG_TIMEOUT)))
358 frag_list_free(&orig_node->frag_list);
359 }
360 spin_unlock_bh(list_lock);
361 }
362
363 gw_node_purge(bat_priv);
364 gw_election(bat_priv);
365
366 softif_neigh_purge(bat_priv);
367 }
368
purge_orig(struct work_struct * work)369 static void purge_orig(struct work_struct *work)
370 {
371 struct delayed_work *delayed_work =
372 container_of(work, struct delayed_work, work);
373 struct bat_priv *bat_priv =
374 container_of(delayed_work, struct bat_priv, orig_work);
375
376 _purge_orig(bat_priv);
377 start_purge_timer(bat_priv);
378 }
379
purge_orig_ref(struct bat_priv * bat_priv)380 void purge_orig_ref(struct bat_priv *bat_priv)
381 {
382 _purge_orig(bat_priv);
383 }
384
orig_seq_print_text(struct seq_file * seq,void * offset)385 int orig_seq_print_text(struct seq_file *seq, void *offset)
386 {
387 struct net_device *net_dev = (struct net_device *)seq->private;
388 struct bat_priv *bat_priv = netdev_priv(net_dev);
389 struct hashtable_t *hash = bat_priv->orig_hash;
390 struct hlist_node *node, *node_tmp;
391 struct hlist_head *head;
392 struct orig_node *orig_node;
393 struct neigh_node *neigh_node;
394 int batman_count = 0;
395 int last_seen_secs;
396 int last_seen_msecs;
397 int i;
398
399 if ((!bat_priv->primary_if) ||
400 (bat_priv->primary_if->if_status != IF_ACTIVE)) {
401 if (!bat_priv->primary_if)
402 return seq_printf(seq, "BATMAN mesh %s disabled - "
403 "please specify interfaces to enable it\n",
404 net_dev->name);
405
406 return seq_printf(seq, "BATMAN mesh %s "
407 "disabled - primary interface not active\n",
408 net_dev->name);
409 }
410
411 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
412 SOURCE_VERSION, REVISION_VERSION_STR,
413 bat_priv->primary_if->net_dev->name,
414 bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
415 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
416 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
417 "outgoingIF", "Potential nexthops");
418
419 for (i = 0; i < hash->size; i++) {
420 head = &hash->table[i];
421
422 rcu_read_lock();
423 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
424 if (!orig_node->router)
425 continue;
426
427 if (orig_node->router->tq_avg == 0)
428 continue;
429
430 last_seen_secs = jiffies_to_msecs(jiffies -
431 orig_node->last_valid) / 1000;
432 last_seen_msecs = jiffies_to_msecs(jiffies -
433 orig_node->last_valid) % 1000;
434
435 neigh_node = orig_node->router;
436 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
437 orig_node->orig, last_seen_secs,
438 last_seen_msecs, neigh_node->tq_avg,
439 neigh_node->addr,
440 neigh_node->if_incoming->net_dev->name);
441
442 hlist_for_each_entry_rcu(neigh_node, node_tmp,
443 &orig_node->neigh_list, list) {
444 seq_printf(seq, " %pM (%3i)", neigh_node->addr,
445 neigh_node->tq_avg);
446 }
447
448 seq_printf(seq, "\n");
449 batman_count++;
450 }
451 rcu_read_unlock();
452 }
453
454 if ((batman_count == 0))
455 seq_printf(seq, "No batman nodes in range ...\n");
456
457 return 0;
458 }
459
orig_node_add_if(struct orig_node * orig_node,int max_if_num)460 static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
461 {
462 void *data_ptr;
463
464 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
465 GFP_ATOMIC);
466 if (!data_ptr) {
467 pr_err("Can't resize orig: out of memory\n");
468 return -1;
469 }
470
471 memcpy(data_ptr, orig_node->bcast_own,
472 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
473 kfree(orig_node->bcast_own);
474 orig_node->bcast_own = data_ptr;
475
476 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
477 if (!data_ptr) {
478 pr_err("Can't resize orig: out of memory\n");
479 return -1;
480 }
481
482 memcpy(data_ptr, orig_node->bcast_own_sum,
483 (max_if_num - 1) * sizeof(uint8_t));
484 kfree(orig_node->bcast_own_sum);
485 orig_node->bcast_own_sum = data_ptr;
486
487 return 0;
488 }
489
orig_hash_add_if(struct hard_iface * hard_iface,int max_if_num)490 int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
491 {
492 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
493 struct hashtable_t *hash = bat_priv->orig_hash;
494 struct hlist_node *node;
495 struct hlist_head *head;
496 struct orig_node *orig_node;
497 int i, ret;
498
499 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
500 * if_num */
501 for (i = 0; i < hash->size; i++) {
502 head = &hash->table[i];
503
504 rcu_read_lock();
505 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
506 spin_lock_bh(&orig_node->ogm_cnt_lock);
507 ret = orig_node_add_if(orig_node, max_if_num);
508 spin_unlock_bh(&orig_node->ogm_cnt_lock);
509
510 if (ret == -1)
511 goto err;
512 }
513 rcu_read_unlock();
514 }
515
516 return 0;
517
518 err:
519 rcu_read_unlock();
520 return -ENOMEM;
521 }
522
orig_node_del_if(struct orig_node * orig_node,int max_if_num,int del_if_num)523 static int orig_node_del_if(struct orig_node *orig_node,
524 int max_if_num, int del_if_num)
525 {
526 void *data_ptr = NULL;
527 int chunk_size;
528
529 /* last interface was removed */
530 if (max_if_num == 0)
531 goto free_bcast_own;
532
533 chunk_size = sizeof(unsigned long) * NUM_WORDS;
534 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
535 if (!data_ptr) {
536 pr_err("Can't resize orig: out of memory\n");
537 return -1;
538 }
539
540 /* copy first part */
541 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
542
543 /* copy second part */
544 memcpy(data_ptr + del_if_num * chunk_size,
545 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
546 (max_if_num - del_if_num) * chunk_size);
547
548 free_bcast_own:
549 kfree(orig_node->bcast_own);
550 orig_node->bcast_own = data_ptr;
551
552 if (max_if_num == 0)
553 goto free_own_sum;
554
555 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
556 if (!data_ptr) {
557 pr_err("Can't resize orig: out of memory\n");
558 return -1;
559 }
560
561 memcpy(data_ptr, orig_node->bcast_own_sum,
562 del_if_num * sizeof(uint8_t));
563
564 memcpy(data_ptr + del_if_num * sizeof(uint8_t),
565 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
566 (max_if_num - del_if_num) * sizeof(uint8_t));
567
568 free_own_sum:
569 kfree(orig_node->bcast_own_sum);
570 orig_node->bcast_own_sum = data_ptr;
571
572 return 0;
573 }
574
orig_hash_del_if(struct hard_iface * hard_iface,int max_if_num)575 int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
576 {
577 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
578 struct hashtable_t *hash = bat_priv->orig_hash;
579 struct hlist_node *node;
580 struct hlist_head *head;
581 struct hard_iface *hard_iface_tmp;
582 struct orig_node *orig_node;
583 int i, ret;
584
585 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
586 * if_num */
587 for (i = 0; i < hash->size; i++) {
588 head = &hash->table[i];
589
590 rcu_read_lock();
591 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
592 spin_lock_bh(&orig_node->ogm_cnt_lock);
593 ret = orig_node_del_if(orig_node, max_if_num,
594 hard_iface->if_num);
595 spin_unlock_bh(&orig_node->ogm_cnt_lock);
596
597 if (ret == -1)
598 goto err;
599 }
600 rcu_read_unlock();
601 }
602
603 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
604 rcu_read_lock();
605 list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) {
606 if (hard_iface_tmp->if_status == IF_NOT_IN_USE)
607 continue;
608
609 if (hard_iface == hard_iface_tmp)
610 continue;
611
612 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
613 continue;
614
615 if (hard_iface_tmp->if_num > hard_iface->if_num)
616 hard_iface_tmp->if_num--;
617 }
618 rcu_read_unlock();
619
620 hard_iface->if_num = -1;
621 return 0;
622
623 err:
624 rcu_read_unlock();
625 return -ENOMEM;
626 }
627