1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21 
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hash.h"
26 #include "originator.h"
27 
28 static void hna_local_purge(struct work_struct *work);
29 static void _hna_global_del_orig(struct bat_priv *bat_priv,
30 				 struct hna_global_entry *hna_global_entry,
31 				 char *message);
32 
33 /* returns 1 if they are the same mac addr */
compare_lhna(struct hlist_node * node,void * data2)34 static int compare_lhna(struct hlist_node *node, void *data2)
35 {
36 	void *data1 = container_of(node, struct hna_local_entry, hash_entry);
37 
38 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
39 }
40 
41 /* returns 1 if they are the same mac addr */
compare_ghna(struct hlist_node * node,void * data2)42 static int compare_ghna(struct hlist_node *node, void *data2)
43 {
44 	void *data1 = container_of(node, struct hna_global_entry, hash_entry);
45 
46 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
47 }
48 
hna_local_start_timer(struct bat_priv * bat_priv)49 static void hna_local_start_timer(struct bat_priv *bat_priv)
50 {
51 	INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
52 	queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
53 }
54 
hna_local_hash_find(struct bat_priv * bat_priv,void * data)55 static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
56 						   void *data)
57 {
58 	struct hashtable_t *hash = bat_priv->hna_local_hash;
59 	struct hlist_head *head;
60 	struct hlist_node *node;
61 	struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL;
62 	int index;
63 
64 	if (!hash)
65 		return NULL;
66 
67 	index = choose_orig(data, hash->size);
68 	head = &hash->table[index];
69 
70 	rcu_read_lock();
71 	hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) {
72 		if (!compare_eth(hna_local_entry, data))
73 			continue;
74 
75 		hna_local_entry_tmp = hna_local_entry;
76 		break;
77 	}
78 	rcu_read_unlock();
79 
80 	return hna_local_entry_tmp;
81 }
82 
hna_global_hash_find(struct bat_priv * bat_priv,void * data)83 static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
84 						     void *data)
85 {
86 	struct hashtable_t *hash = bat_priv->hna_global_hash;
87 	struct hlist_head *head;
88 	struct hlist_node *node;
89 	struct hna_global_entry *hna_global_entry;
90 	struct hna_global_entry *hna_global_entry_tmp = NULL;
91 	int index;
92 
93 	if (!hash)
94 		return NULL;
95 
96 	index = choose_orig(data, hash->size);
97 	head = &hash->table[index];
98 
99 	rcu_read_lock();
100 	hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) {
101 		if (!compare_eth(hna_global_entry, data))
102 			continue;
103 
104 		hna_global_entry_tmp = hna_global_entry;
105 		break;
106 	}
107 	rcu_read_unlock();
108 
109 	return hna_global_entry_tmp;
110 }
111 
hna_local_init(struct bat_priv * bat_priv)112 int hna_local_init(struct bat_priv *bat_priv)
113 {
114 	if (bat_priv->hna_local_hash)
115 		return 1;
116 
117 	bat_priv->hna_local_hash = hash_new(1024);
118 
119 	if (!bat_priv->hna_local_hash)
120 		return 0;
121 
122 	atomic_set(&bat_priv->hna_local_changed, 0);
123 	hna_local_start_timer(bat_priv);
124 
125 	return 1;
126 }
127 
hna_local_add(struct net_device * soft_iface,uint8_t * addr)128 void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
129 {
130 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
131 	struct hna_local_entry *hna_local_entry;
132 	struct hna_global_entry *hna_global_entry;
133 	int required_bytes;
134 
135 	spin_lock_bh(&bat_priv->hna_lhash_lock);
136 	hna_local_entry = hna_local_hash_find(bat_priv, addr);
137 	spin_unlock_bh(&bat_priv->hna_lhash_lock);
138 
139 	if (hna_local_entry) {
140 		hna_local_entry->last_seen = jiffies;
141 		return;
142 	}
143 
144 	/* only announce as many hosts as possible in the batman-packet and
145 	   space in batman_packet->num_hna That also should give a limit to
146 	   MAC-flooding. */
147 	required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
148 	required_bytes += BAT_PACKET_LEN;
149 
150 	if ((required_bytes > ETH_DATA_LEN) ||
151 	    (atomic_read(&bat_priv->aggregated_ogms) &&
152 	     required_bytes > MAX_AGGREGATION_BYTES) ||
153 	    (bat_priv->num_local_hna + 1 > 255)) {
154 		bat_dbg(DBG_ROUTES, bat_priv,
155 			"Can't add new local hna entry (%pM): "
156 			"number of local hna entries exceeds packet size\n",
157 			addr);
158 		return;
159 	}
160 
161 	bat_dbg(DBG_ROUTES, bat_priv,
162 		"Creating new local hna entry: %pM\n", addr);
163 
164 	hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
165 	if (!hna_local_entry)
166 		return;
167 
168 	memcpy(hna_local_entry->addr, addr, ETH_ALEN);
169 	hna_local_entry->last_seen = jiffies;
170 
171 	/* the batman interface mac address should never be purged */
172 	if (compare_eth(addr, soft_iface->dev_addr))
173 		hna_local_entry->never_purge = 1;
174 	else
175 		hna_local_entry->never_purge = 0;
176 
177 	spin_lock_bh(&bat_priv->hna_lhash_lock);
178 
179 	hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig,
180 		 hna_local_entry, &hna_local_entry->hash_entry);
181 	bat_priv->num_local_hna++;
182 	atomic_set(&bat_priv->hna_local_changed, 1);
183 
184 	spin_unlock_bh(&bat_priv->hna_lhash_lock);
185 
186 	/* remove address from global hash if present */
187 	spin_lock_bh(&bat_priv->hna_ghash_lock);
188 
189 	hna_global_entry = hna_global_hash_find(bat_priv, addr);
190 
191 	if (hna_global_entry)
192 		_hna_global_del_orig(bat_priv, hna_global_entry,
193 				     "local hna received");
194 
195 	spin_unlock_bh(&bat_priv->hna_ghash_lock);
196 }
197 
hna_local_fill_buffer(struct bat_priv * bat_priv,unsigned char * buff,int buff_len)198 int hna_local_fill_buffer(struct bat_priv *bat_priv,
199 			  unsigned char *buff, int buff_len)
200 {
201 	struct hashtable_t *hash = bat_priv->hna_local_hash;
202 	struct hna_local_entry *hna_local_entry;
203 	struct hlist_node *node;
204 	struct hlist_head *head;
205 	int i, count = 0;
206 
207 	spin_lock_bh(&bat_priv->hna_lhash_lock);
208 
209 	for (i = 0; i < hash->size; i++) {
210 		head = &hash->table[i];
211 
212 		rcu_read_lock();
213 		hlist_for_each_entry_rcu(hna_local_entry, node,
214 					 head, hash_entry) {
215 			if (buff_len < (count + 1) * ETH_ALEN)
216 				break;
217 
218 			memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
219 			       ETH_ALEN);
220 
221 			count++;
222 		}
223 		rcu_read_unlock();
224 	}
225 
226 	/* if we did not get all new local hnas see you next time  ;-) */
227 	if (count == bat_priv->num_local_hna)
228 		atomic_set(&bat_priv->hna_local_changed, 0);
229 
230 	spin_unlock_bh(&bat_priv->hna_lhash_lock);
231 	return count;
232 }
233 
hna_local_seq_print_text(struct seq_file * seq,void * offset)234 int hna_local_seq_print_text(struct seq_file *seq, void *offset)
235 {
236 	struct net_device *net_dev = (struct net_device *)seq->private;
237 	struct bat_priv *bat_priv = netdev_priv(net_dev);
238 	struct hashtable_t *hash = bat_priv->hna_local_hash;
239 	struct hna_local_entry *hna_local_entry;
240 	struct hlist_node *node;
241 	struct hlist_head *head;
242 	size_t buf_size, pos;
243 	char *buff;
244 	int i;
245 
246 	if (!bat_priv->primary_if) {
247 		return seq_printf(seq, "BATMAN mesh %s disabled - "
248 			       "please specify interfaces to enable it\n",
249 			       net_dev->name);
250 	}
251 
252 	seq_printf(seq, "Locally retrieved addresses (from %s) "
253 		   "announced via HNA:\n",
254 		   net_dev->name);
255 
256 	spin_lock_bh(&bat_priv->hna_lhash_lock);
257 
258 	buf_size = 1;
259 	/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
260 	for (i = 0; i < hash->size; i++) {
261 		head = &hash->table[i];
262 
263 		rcu_read_lock();
264 		__hlist_for_each_rcu(node, head)
265 			buf_size += 21;
266 		rcu_read_unlock();
267 	}
268 
269 	buff = kmalloc(buf_size, GFP_ATOMIC);
270 	if (!buff) {
271 		spin_unlock_bh(&bat_priv->hna_lhash_lock);
272 		return -ENOMEM;
273 	}
274 
275 	buff[0] = '\0';
276 	pos = 0;
277 
278 	for (i = 0; i < hash->size; i++) {
279 		head = &hash->table[i];
280 
281 		rcu_read_lock();
282 		hlist_for_each_entry_rcu(hna_local_entry, node,
283 					 head, hash_entry) {
284 			pos += snprintf(buff + pos, 22, " * %pM\n",
285 					hna_local_entry->addr);
286 		}
287 		rcu_read_unlock();
288 	}
289 
290 	spin_unlock_bh(&bat_priv->hna_lhash_lock);
291 
292 	seq_printf(seq, "%s", buff);
293 	kfree(buff);
294 	return 0;
295 }
296 
_hna_local_del(struct hlist_node * node,void * arg)297 static void _hna_local_del(struct hlist_node *node, void *arg)
298 {
299 	struct bat_priv *bat_priv = (struct bat_priv *)arg;
300 	void *data = container_of(node, struct hna_local_entry, hash_entry);
301 
302 	kfree(data);
303 	bat_priv->num_local_hna--;
304 	atomic_set(&bat_priv->hna_local_changed, 1);
305 }
306 
hna_local_del(struct bat_priv * bat_priv,struct hna_local_entry * hna_local_entry,char * message)307 static void hna_local_del(struct bat_priv *bat_priv,
308 			  struct hna_local_entry *hna_local_entry,
309 			  char *message)
310 {
311 	bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
312 		hna_local_entry->addr, message);
313 
314 	hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig,
315 		    hna_local_entry->addr);
316 	_hna_local_del(&hna_local_entry->hash_entry, bat_priv);
317 }
318 
hna_local_remove(struct bat_priv * bat_priv,uint8_t * addr,char * message)319 void hna_local_remove(struct bat_priv *bat_priv,
320 		      uint8_t *addr, char *message)
321 {
322 	struct hna_local_entry *hna_local_entry;
323 
324 	spin_lock_bh(&bat_priv->hna_lhash_lock);
325 
326 	hna_local_entry = hna_local_hash_find(bat_priv, addr);
327 
328 	if (hna_local_entry)
329 		hna_local_del(bat_priv, hna_local_entry, message);
330 
331 	spin_unlock_bh(&bat_priv->hna_lhash_lock);
332 }
333 
hna_local_purge(struct work_struct * work)334 static void hna_local_purge(struct work_struct *work)
335 {
336 	struct delayed_work *delayed_work =
337 		container_of(work, struct delayed_work, work);
338 	struct bat_priv *bat_priv =
339 		container_of(delayed_work, struct bat_priv, hna_work);
340 	struct hashtable_t *hash = bat_priv->hna_local_hash;
341 	struct hna_local_entry *hna_local_entry;
342 	struct hlist_node *node, *node_tmp;
343 	struct hlist_head *head;
344 	unsigned long timeout;
345 	int i;
346 
347 	spin_lock_bh(&bat_priv->hna_lhash_lock);
348 
349 	for (i = 0; i < hash->size; i++) {
350 		head = &hash->table[i];
351 
352 		hlist_for_each_entry_safe(hna_local_entry, node, node_tmp,
353 					  head, hash_entry) {
354 			if (hna_local_entry->never_purge)
355 				continue;
356 
357 			timeout = hna_local_entry->last_seen;
358 			timeout += LOCAL_HNA_TIMEOUT * HZ;
359 
360 			if (time_before(jiffies, timeout))
361 				continue;
362 
363 			hna_local_del(bat_priv, hna_local_entry,
364 				      "address timed out");
365 		}
366 	}
367 
368 	spin_unlock_bh(&bat_priv->hna_lhash_lock);
369 	hna_local_start_timer(bat_priv);
370 }
371 
hna_local_free(struct bat_priv * bat_priv)372 void hna_local_free(struct bat_priv *bat_priv)
373 {
374 	if (!bat_priv->hna_local_hash)
375 		return;
376 
377 	cancel_delayed_work_sync(&bat_priv->hna_work);
378 	hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
379 	bat_priv->hna_local_hash = NULL;
380 }
381 
hna_global_init(struct bat_priv * bat_priv)382 int hna_global_init(struct bat_priv *bat_priv)
383 {
384 	if (bat_priv->hna_global_hash)
385 		return 1;
386 
387 	bat_priv->hna_global_hash = hash_new(1024);
388 
389 	if (!bat_priv->hna_global_hash)
390 		return 0;
391 
392 	return 1;
393 }
394 
hna_global_add_orig(struct bat_priv * bat_priv,struct orig_node * orig_node,unsigned char * hna_buff,int hna_buff_len)395 void hna_global_add_orig(struct bat_priv *bat_priv,
396 			 struct orig_node *orig_node,
397 			 unsigned char *hna_buff, int hna_buff_len)
398 {
399 	struct hna_global_entry *hna_global_entry;
400 	struct hna_local_entry *hna_local_entry;
401 	int hna_buff_count = 0;
402 	unsigned char *hna_ptr;
403 
404 	while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
405 		spin_lock_bh(&bat_priv->hna_ghash_lock);
406 
407 		hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
408 		hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
409 
410 		if (!hna_global_entry) {
411 			spin_unlock_bh(&bat_priv->hna_ghash_lock);
412 
413 			hna_global_entry =
414 				kmalloc(sizeof(struct hna_global_entry),
415 					GFP_ATOMIC);
416 
417 			if (!hna_global_entry)
418 				break;
419 
420 			memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
421 
422 			bat_dbg(DBG_ROUTES, bat_priv,
423 				"Creating new global hna entry: "
424 				"%pM (via %pM)\n",
425 				hna_global_entry->addr, orig_node->orig);
426 
427 			spin_lock_bh(&bat_priv->hna_ghash_lock);
428 			hash_add(bat_priv->hna_global_hash, compare_ghna,
429 				 choose_orig, hna_global_entry,
430 				 &hna_global_entry->hash_entry);
431 
432 		}
433 
434 		hna_global_entry->orig_node = orig_node;
435 		spin_unlock_bh(&bat_priv->hna_ghash_lock);
436 
437 		/* remove address from local hash if present */
438 		spin_lock_bh(&bat_priv->hna_lhash_lock);
439 
440 		hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
441 		hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr);
442 
443 		if (hna_local_entry)
444 			hna_local_del(bat_priv, hna_local_entry,
445 				      "global hna received");
446 
447 		spin_unlock_bh(&bat_priv->hna_lhash_lock);
448 
449 		hna_buff_count++;
450 	}
451 
452 	/* initialize, and overwrite if malloc succeeds */
453 	orig_node->hna_buff = NULL;
454 	orig_node->hna_buff_len = 0;
455 
456 	if (hna_buff_len > 0) {
457 		orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
458 		if (orig_node->hna_buff) {
459 			memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
460 			orig_node->hna_buff_len = hna_buff_len;
461 		}
462 	}
463 }
464 
hna_global_seq_print_text(struct seq_file * seq,void * offset)465 int hna_global_seq_print_text(struct seq_file *seq, void *offset)
466 {
467 	struct net_device *net_dev = (struct net_device *)seq->private;
468 	struct bat_priv *bat_priv = netdev_priv(net_dev);
469 	struct hashtable_t *hash = bat_priv->hna_global_hash;
470 	struct hna_global_entry *hna_global_entry;
471 	struct hlist_node *node;
472 	struct hlist_head *head;
473 	size_t buf_size, pos;
474 	char *buff;
475 	int i;
476 
477 	if (!bat_priv->primary_if) {
478 		return seq_printf(seq, "BATMAN mesh %s disabled - "
479 				  "please specify interfaces to enable it\n",
480 				  net_dev->name);
481 	}
482 
483 	seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
484 		   net_dev->name);
485 
486 	spin_lock_bh(&bat_priv->hna_ghash_lock);
487 
488 	buf_size = 1;
489 	/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
490 	for (i = 0; i < hash->size; i++) {
491 		head = &hash->table[i];
492 
493 		rcu_read_lock();
494 		__hlist_for_each_rcu(node, head)
495 			buf_size += 43;
496 		rcu_read_unlock();
497 	}
498 
499 	buff = kmalloc(buf_size, GFP_ATOMIC);
500 	if (!buff) {
501 		spin_unlock_bh(&bat_priv->hna_ghash_lock);
502 		return -ENOMEM;
503 	}
504 	buff[0] = '\0';
505 	pos = 0;
506 
507 	for (i = 0; i < hash->size; i++) {
508 		head = &hash->table[i];
509 
510 		rcu_read_lock();
511 		hlist_for_each_entry_rcu(hna_global_entry, node,
512 					 head, hash_entry) {
513 			pos += snprintf(buff + pos, 44,
514 					" * %pM via %pM\n",
515 					hna_global_entry->addr,
516 					hna_global_entry->orig_node->orig);
517 		}
518 		rcu_read_unlock();
519 	}
520 
521 	spin_unlock_bh(&bat_priv->hna_ghash_lock);
522 
523 	seq_printf(seq, "%s", buff);
524 	kfree(buff);
525 	return 0;
526 }
527 
_hna_global_del_orig(struct bat_priv * bat_priv,struct hna_global_entry * hna_global_entry,char * message)528 static void _hna_global_del_orig(struct bat_priv *bat_priv,
529 				 struct hna_global_entry *hna_global_entry,
530 				 char *message)
531 {
532 	bat_dbg(DBG_ROUTES, bat_priv,
533 		"Deleting global hna entry %pM (via %pM): %s\n",
534 		hna_global_entry->addr, hna_global_entry->orig_node->orig,
535 		message);
536 
537 	hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig,
538 		    hna_global_entry->addr);
539 	kfree(hna_global_entry);
540 }
541 
hna_global_del_orig(struct bat_priv * bat_priv,struct orig_node * orig_node,char * message)542 void hna_global_del_orig(struct bat_priv *bat_priv,
543 			 struct orig_node *orig_node, char *message)
544 {
545 	struct hna_global_entry *hna_global_entry;
546 	int hna_buff_count = 0;
547 	unsigned char *hna_ptr;
548 
549 	if (orig_node->hna_buff_len == 0)
550 		return;
551 
552 	spin_lock_bh(&bat_priv->hna_ghash_lock);
553 
554 	while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
555 		hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
556 		hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
557 
558 		if ((hna_global_entry) &&
559 		    (hna_global_entry->orig_node == orig_node))
560 			_hna_global_del_orig(bat_priv, hna_global_entry,
561 					     message);
562 
563 		hna_buff_count++;
564 	}
565 
566 	spin_unlock_bh(&bat_priv->hna_ghash_lock);
567 
568 	orig_node->hna_buff_len = 0;
569 	kfree(orig_node->hna_buff);
570 	orig_node->hna_buff = NULL;
571 }
572 
hna_global_del(struct hlist_node * node,void * arg)573 static void hna_global_del(struct hlist_node *node, void *arg)
574 {
575 	void *data = container_of(node, struct hna_global_entry, hash_entry);
576 
577 	kfree(data);
578 }
579 
hna_global_free(struct bat_priv * bat_priv)580 void hna_global_free(struct bat_priv *bat_priv)
581 {
582 	if (!bat_priv->hna_global_hash)
583 		return;
584 
585 	hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
586 	bat_priv->hna_global_hash = NULL;
587 }
588 
transtable_search(struct bat_priv * bat_priv,uint8_t * addr)589 struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
590 {
591 	struct hna_global_entry *hna_global_entry;
592 	struct orig_node *orig_node = NULL;
593 
594 	spin_lock_bh(&bat_priv->hna_ghash_lock);
595 	hna_global_entry = hna_global_hash_find(bat_priv, addr);
596 
597 	if (!hna_global_entry)
598 		goto out;
599 
600 	if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount))
601 		goto out;
602 
603 	orig_node = hna_global_entry->orig_node;
604 
605 out:
606 	spin_unlock_bh(&bat_priv->hna_ghash_lock);
607 	return orig_node;
608 }
609