1 /*
2  *  ebtables
3  *
4  *  Author:
5  *  Bart De Schuymer		<bdschuym@pandora.be>
6  *
7  *  ebtables.c,v 2.0, July, 2002
8  *
9  *  This code is stongly inspired on the iptables code which is
10  *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11  *
12  *  This program is free software; you can redistribute it and/or
13  *  modify it under the terms of the GNU General Public License
14  *  as published by the Free Software Foundation; either version
15  *  2 of the License, or (at your option) any later version.
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <net/sock.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
32 
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 					 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
36 
37 /*
38  * Each cpu has its own set of counters, so there is no need for write_lock in
39  * the softirq
40  * For reading or updating the counters, the user context needs to
41  * get a write_lock
42  */
43 
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48    COUNTER_OFFSET(n) * cpu))
49 
50 
51 
52 static DEFINE_MUTEX(ebt_mutex);
53 
54 #ifdef CONFIG_COMPAT
ebt_standard_compat_from_user(void * dst,const void * src)55 static void ebt_standard_compat_from_user(void *dst, const void *src)
56 {
57 	int v = *(compat_int_t *)src;
58 
59 	if (v >= 0)
60 		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 	memcpy(dst, &v, sizeof(v));
62 }
63 
ebt_standard_compat_to_user(void __user * dst,const void * src)64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
65 {
66 	compat_int_t cv = *(int *)src;
67 
68 	if (cv >= 0)
69 		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
71 }
72 #endif
73 
74 
75 static struct xt_target ebt_standard_target = {
76 	.name       = "standard",
77 	.revision   = 0,
78 	.family     = NFPROTO_BRIDGE,
79 	.targetsize = sizeof(int),
80 #ifdef CONFIG_COMPAT
81 	.compatsize = sizeof(compat_int_t),
82 	.compat_from_user = ebt_standard_compat_from_user,
83 	.compat_to_user =  ebt_standard_compat_to_user,
84 #endif
85 };
86 
87 static inline int
ebt_do_watcher(const struct ebt_entry_watcher * w,struct sk_buff * skb,struct xt_action_param * par)88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 	       struct xt_action_param *par)
90 {
91 	par->target   = w->u.watcher;
92 	par->targinfo = w->data;
93 	w->u.watcher->target(skb, par);
94 	/* watchers don't give a verdict */
95 	return 0;
96 }
97 
98 static inline int
ebt_do_match(struct ebt_entry_match * m,const struct sk_buff * skb,struct xt_action_param * par)99 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
100 	     struct xt_action_param *par)
101 {
102 	par->match     = m->u.match;
103 	par->matchinfo = m->data;
104 	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
105 }
106 
107 static inline int
ebt_dev_check(const char * entry,const struct net_device * device)108 ebt_dev_check(const char *entry, const struct net_device *device)
109 {
110 	int i = 0;
111 	const char *devname;
112 
113 	if (*entry == '\0')
114 		return 0;
115 	if (!device)
116 		return 1;
117 	devname = device->name;
118 	/* 1 is the wildcard token */
119 	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
120 		i++;
121 	return (devname[i] != entry[i] && entry[i] != 1);
122 }
123 
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
126 static inline int
ebt_basic_match(const struct ebt_entry * e,const struct sk_buff * skb,const struct net_device * in,const struct net_device * out)127 ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
128                 const struct net_device *in, const struct net_device *out)
129 {
130 	const struct ethhdr *h = eth_hdr(skb);
131 	const struct net_bridge_port *p;
132 	__be16 ethproto;
133 	int verdict, i;
134 
135 	if (vlan_tx_tag_present(skb))
136 		ethproto = htons(ETH_P_8021Q);
137 	else
138 		ethproto = h->h_proto;
139 
140 	if (e->bitmask & EBT_802_3) {
141 		if (FWINV2(ntohs(ethproto) >= 1536, EBT_IPROTO))
142 			return 1;
143 	} else if (!(e->bitmask & EBT_NOPROTO) &&
144 	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
145 		return 1;
146 
147 	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
148 		return 1;
149 	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
150 		return 1;
151 	/* rcu_read_lock()ed by nf_hook_slow */
152 	if (in && (p = br_port_get_rcu(in)) != NULL &&
153 	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
154 		return 1;
155 	if (out && (p = br_port_get_rcu(out)) != NULL &&
156 	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
157 		return 1;
158 
159 	if (e->bitmask & EBT_SOURCEMAC) {
160 		verdict = 0;
161 		for (i = 0; i < 6; i++)
162 			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
163 			   e->sourcemsk[i];
164 		if (FWINV2(verdict != 0, EBT_ISOURCE) )
165 			return 1;
166 	}
167 	if (e->bitmask & EBT_DESTMAC) {
168 		verdict = 0;
169 		for (i = 0; i < 6; i++)
170 			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
171 			   e->destmsk[i];
172 		if (FWINV2(verdict != 0, EBT_IDEST) )
173 			return 1;
174 	}
175 	return 0;
176 }
177 
178 static inline __pure
ebt_next_entry(const struct ebt_entry * entry)179 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
180 {
181 	return (void *)entry + entry->next_offset;
182 }
183 
184 /* Do some firewalling */
ebt_do_table(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,struct ebt_table * table)185 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
186    const struct net_device *in, const struct net_device *out,
187    struct ebt_table *table)
188 {
189 	int i, nentries;
190 	struct ebt_entry *point;
191 	struct ebt_counter *counter_base, *cb_base;
192 	const struct ebt_entry_target *t;
193 	int verdict, sp = 0;
194 	struct ebt_chainstack *cs;
195 	struct ebt_entries *chaininfo;
196 	const char *base;
197 	const struct ebt_table_info *private;
198 	struct xt_action_param acpar;
199 
200 	acpar.family  = NFPROTO_BRIDGE;
201 	acpar.in      = in;
202 	acpar.out     = out;
203 	acpar.hotdrop = false;
204 	acpar.hooknum = hook;
205 
206 	read_lock_bh(&table->lock);
207 	private = table->private;
208 	cb_base = COUNTER_BASE(private->counters, private->nentries,
209 	   smp_processor_id());
210 	if (private->chainstack)
211 		cs = private->chainstack[smp_processor_id()];
212 	else
213 		cs = NULL;
214 	chaininfo = private->hook_entry[hook];
215 	nentries = private->hook_entry[hook]->nentries;
216 	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
217 	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
218 	/* base for chain jumps */
219 	base = private->entries;
220 	i = 0;
221 	while (i < nentries) {
222 		if (ebt_basic_match(point, skb, in, out))
223 			goto letscontinue;
224 
225 		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
226 			goto letscontinue;
227 		if (acpar.hotdrop) {
228 			read_unlock_bh(&table->lock);
229 			return NF_DROP;
230 		}
231 
232 		/* increase counter */
233 		(*(counter_base + i)).pcnt++;
234 		(*(counter_base + i)).bcnt += skb->len;
235 
236 		/* these should only watch: not modify, nor tell us
237 		   what to do with the packet */
238 		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
239 
240 		t = (struct ebt_entry_target *)
241 		   (((char *)point) + point->target_offset);
242 		/* standard target */
243 		if (!t->u.target->target)
244 			verdict = ((struct ebt_standard_target *)t)->verdict;
245 		else {
246 			acpar.target   = t->u.target;
247 			acpar.targinfo = t->data;
248 			verdict = t->u.target->target(skb, &acpar);
249 		}
250 		if (verdict == EBT_ACCEPT) {
251 			read_unlock_bh(&table->lock);
252 			return NF_ACCEPT;
253 		}
254 		if (verdict == EBT_DROP) {
255 			read_unlock_bh(&table->lock);
256 			return NF_DROP;
257 		}
258 		if (verdict == EBT_RETURN) {
259 letsreturn:
260 #ifdef CONFIG_NETFILTER_DEBUG
261 			if (sp == 0) {
262 				BUGPRINT("RETURN on base chain");
263 				/* act like this is EBT_CONTINUE */
264 				goto letscontinue;
265 			}
266 #endif
267 			sp--;
268 			/* put all the local variables right */
269 			i = cs[sp].n;
270 			chaininfo = cs[sp].chaininfo;
271 			nentries = chaininfo->nentries;
272 			point = cs[sp].e;
273 			counter_base = cb_base +
274 			   chaininfo->counter_offset;
275 			continue;
276 		}
277 		if (verdict == EBT_CONTINUE)
278 			goto letscontinue;
279 #ifdef CONFIG_NETFILTER_DEBUG
280 		if (verdict < 0) {
281 			BUGPRINT("bogus standard verdict\n");
282 			read_unlock_bh(&table->lock);
283 			return NF_DROP;
284 		}
285 #endif
286 		/* jump to a udc */
287 		cs[sp].n = i + 1;
288 		cs[sp].chaininfo = chaininfo;
289 		cs[sp].e = ebt_next_entry(point);
290 		i = 0;
291 		chaininfo = (struct ebt_entries *) (base + verdict);
292 #ifdef CONFIG_NETFILTER_DEBUG
293 		if (chaininfo->distinguisher) {
294 			BUGPRINT("jump to non-chain\n");
295 			read_unlock_bh(&table->lock);
296 			return NF_DROP;
297 		}
298 #endif
299 		nentries = chaininfo->nentries;
300 		point = (struct ebt_entry *)chaininfo->data;
301 		counter_base = cb_base + chaininfo->counter_offset;
302 		sp++;
303 		continue;
304 letscontinue:
305 		point = ebt_next_entry(point);
306 		i++;
307 	}
308 
309 	/* I actually like this :) */
310 	if (chaininfo->policy == EBT_RETURN)
311 		goto letsreturn;
312 	if (chaininfo->policy == EBT_ACCEPT) {
313 		read_unlock_bh(&table->lock);
314 		return NF_ACCEPT;
315 	}
316 	read_unlock_bh(&table->lock);
317 	return NF_DROP;
318 }
319 
320 /* If it succeeds, returns element and locks mutex */
321 static inline void *
find_inlist_lock_noload(struct list_head * head,const char * name,int * error,struct mutex * mutex)322 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
323    struct mutex *mutex)
324 {
325 	struct {
326 		struct list_head list;
327 		char name[EBT_FUNCTION_MAXNAMELEN];
328 	} *e;
329 
330 	*error = mutex_lock_interruptible(mutex);
331 	if (*error != 0)
332 		return NULL;
333 
334 	list_for_each_entry(e, head, list) {
335 		if (strcmp(e->name, name) == 0)
336 			return e;
337 	}
338 	*error = -ENOENT;
339 	mutex_unlock(mutex);
340 	return NULL;
341 }
342 
343 static void *
find_inlist_lock(struct list_head * head,const char * name,const char * prefix,int * error,struct mutex * mutex)344 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
345    int *error, struct mutex *mutex)
346 {
347 	return try_then_request_module(
348 			find_inlist_lock_noload(head, name, error, mutex),
349 			"%s%s", prefix, name);
350 }
351 
352 static inline struct ebt_table *
find_table_lock(struct net * net,const char * name,int * error,struct mutex * mutex)353 find_table_lock(struct net *net, const char *name, int *error,
354 		struct mutex *mutex)
355 {
356 	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
357 				"ebtable_", error, mutex);
358 }
359 
360 static inline int
ebt_check_match(struct ebt_entry_match * m,struct xt_mtchk_param * par,unsigned int * cnt)361 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
362 		unsigned int *cnt)
363 {
364 	const struct ebt_entry *e = par->entryinfo;
365 	struct xt_match *match;
366 	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
367 	int ret;
368 
369 	if (left < sizeof(struct ebt_entry_match) ||
370 	    left - sizeof(struct ebt_entry_match) < m->match_size)
371 		return -EINVAL;
372 
373 	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
374 	if (IS_ERR(match))
375 		return PTR_ERR(match);
376 	m->u.match = match;
377 
378 	par->match     = match;
379 	par->matchinfo = m->data;
380 	ret = xt_check_match(par, m->match_size,
381 	      e->ethproto, e->invflags & EBT_IPROTO);
382 	if (ret < 0) {
383 		module_put(match->me);
384 		return ret;
385 	}
386 
387 	(*cnt)++;
388 	return 0;
389 }
390 
391 static inline int
ebt_check_watcher(struct ebt_entry_watcher * w,struct xt_tgchk_param * par,unsigned int * cnt)392 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
393 		  unsigned int *cnt)
394 {
395 	const struct ebt_entry *e = par->entryinfo;
396 	struct xt_target *watcher;
397 	size_t left = ((char *)e + e->target_offset) - (char *)w;
398 	int ret;
399 
400 	if (left < sizeof(struct ebt_entry_watcher) ||
401 	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
402 		return -EINVAL;
403 
404 	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
405 	if (IS_ERR(watcher))
406 		return PTR_ERR(watcher);
407 	w->u.watcher = watcher;
408 
409 	par->target   = watcher;
410 	par->targinfo = w->data;
411 	ret = xt_check_target(par, w->watcher_size,
412 	      e->ethproto, e->invflags & EBT_IPROTO);
413 	if (ret < 0) {
414 		module_put(watcher->me);
415 		return ret;
416 	}
417 
418 	(*cnt)++;
419 	return 0;
420 }
421 
ebt_verify_pointers(const struct ebt_replace * repl,struct ebt_table_info * newinfo)422 static int ebt_verify_pointers(const struct ebt_replace *repl,
423 			       struct ebt_table_info *newinfo)
424 {
425 	unsigned int limit = repl->entries_size;
426 	unsigned int valid_hooks = repl->valid_hooks;
427 	unsigned int offset = 0;
428 	int i;
429 
430 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
431 		newinfo->hook_entry[i] = NULL;
432 
433 	newinfo->entries_size = repl->entries_size;
434 	newinfo->nentries = repl->nentries;
435 
436 	while (offset < limit) {
437 		size_t left = limit - offset;
438 		struct ebt_entry *e = (void *)newinfo->entries + offset;
439 
440 		if (left < sizeof(unsigned int))
441 			break;
442 
443 		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
444 			if ((valid_hooks & (1 << i)) == 0)
445 				continue;
446 			if ((char __user *)repl->hook_entry[i] ==
447 			     repl->entries + offset)
448 				break;
449 		}
450 
451 		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
452 			if (e->bitmask != 0) {
453 				/* we make userspace set this right,
454 				   so there is no misunderstanding */
455 				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
456 					 "in distinguisher\n");
457 				return -EINVAL;
458 			}
459 			if (i != NF_BR_NUMHOOKS)
460 				newinfo->hook_entry[i] = (struct ebt_entries *)e;
461 			if (left < sizeof(struct ebt_entries))
462 				break;
463 			offset += sizeof(struct ebt_entries);
464 		} else {
465 			if (left < sizeof(struct ebt_entry))
466 				break;
467 			if (left < e->next_offset)
468 				break;
469 			if (e->next_offset < sizeof(struct ebt_entry))
470 				return -EINVAL;
471 			offset += e->next_offset;
472 		}
473 	}
474 	if (offset != limit) {
475 		BUGPRINT("entries_size too small\n");
476 		return -EINVAL;
477 	}
478 
479 	/* check if all valid hooks have a chain */
480 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
481 		if (!newinfo->hook_entry[i] &&
482 		   (valid_hooks & (1 << i))) {
483 			BUGPRINT("Valid hook without chain\n");
484 			return -EINVAL;
485 		}
486 	}
487 	return 0;
488 }
489 
490 /*
491  * this one is very careful, as it is the first function
492  * to parse the userspace data
493  */
494 static inline int
ebt_check_entry_size_and_hooks(const struct ebt_entry * e,const struct ebt_table_info * newinfo,unsigned int * n,unsigned int * cnt,unsigned int * totalcnt,unsigned int * udc_cnt)495 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
496    const struct ebt_table_info *newinfo,
497    unsigned int *n, unsigned int *cnt,
498    unsigned int *totalcnt, unsigned int *udc_cnt)
499 {
500 	int i;
501 
502 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
503 		if ((void *)e == (void *)newinfo->hook_entry[i])
504 			break;
505 	}
506 	/* beginning of a new chain
507 	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
508 	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
509 		/* this checks if the previous chain has as many entries
510 		   as it said it has */
511 		if (*n != *cnt) {
512 			BUGPRINT("nentries does not equal the nr of entries "
513 				 "in the chain\n");
514 			return -EINVAL;
515 		}
516 		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
517 		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
518 			/* only RETURN from udc */
519 			if (i != NF_BR_NUMHOOKS ||
520 			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
521 				BUGPRINT("bad policy\n");
522 				return -EINVAL;
523 			}
524 		}
525 		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
526 			(*udc_cnt)++;
527 		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
528 			BUGPRINT("counter_offset != totalcnt");
529 			return -EINVAL;
530 		}
531 		*n = ((struct ebt_entries *)e)->nentries;
532 		*cnt = 0;
533 		return 0;
534 	}
535 	/* a plain old entry, heh */
536 	if (sizeof(struct ebt_entry) > e->watchers_offset ||
537 	   e->watchers_offset > e->target_offset ||
538 	   e->target_offset >= e->next_offset) {
539 		BUGPRINT("entry offsets not in right order\n");
540 		return -EINVAL;
541 	}
542 	/* this is not checked anywhere else */
543 	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
544 		BUGPRINT("target size too small\n");
545 		return -EINVAL;
546 	}
547 	(*cnt)++;
548 	(*totalcnt)++;
549 	return 0;
550 }
551 
552 struct ebt_cl_stack
553 {
554 	struct ebt_chainstack cs;
555 	int from;
556 	unsigned int hookmask;
557 };
558 
559 /*
560  * we need these positions to check that the jumps to a different part of the
561  * entries is a jump to the beginning of a new chain.
562  */
563 static inline int
ebt_get_udc_positions(struct ebt_entry * e,struct ebt_table_info * newinfo,unsigned int * n,struct ebt_cl_stack * udc)564 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
565    unsigned int *n, struct ebt_cl_stack *udc)
566 {
567 	int i;
568 
569 	/* we're only interested in chain starts */
570 	if (e->bitmask)
571 		return 0;
572 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
573 		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
574 			break;
575 	}
576 	/* only care about udc */
577 	if (i != NF_BR_NUMHOOKS)
578 		return 0;
579 
580 	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
581 	/* these initialisations are depended on later in check_chainloops() */
582 	udc[*n].cs.n = 0;
583 	udc[*n].hookmask = 0;
584 
585 	(*n)++;
586 	return 0;
587 }
588 
589 static inline int
ebt_cleanup_match(struct ebt_entry_match * m,struct net * net,unsigned int * i)590 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
591 {
592 	struct xt_mtdtor_param par;
593 
594 	if (i && (*i)-- == 0)
595 		return 1;
596 
597 	par.net       = net;
598 	par.match     = m->u.match;
599 	par.matchinfo = m->data;
600 	par.family    = NFPROTO_BRIDGE;
601 	if (par.match->destroy != NULL)
602 		par.match->destroy(&par);
603 	module_put(par.match->me);
604 	return 0;
605 }
606 
607 static inline int
ebt_cleanup_watcher(struct ebt_entry_watcher * w,struct net * net,unsigned int * i)608 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
609 {
610 	struct xt_tgdtor_param par;
611 
612 	if (i && (*i)-- == 0)
613 		return 1;
614 
615 	par.net      = net;
616 	par.target   = w->u.watcher;
617 	par.targinfo = w->data;
618 	par.family   = NFPROTO_BRIDGE;
619 	if (par.target->destroy != NULL)
620 		par.target->destroy(&par);
621 	module_put(par.target->me);
622 	return 0;
623 }
624 
625 static inline int
ebt_cleanup_entry(struct ebt_entry * e,struct net * net,unsigned int * cnt)626 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
627 {
628 	struct xt_tgdtor_param par;
629 	struct ebt_entry_target *t;
630 
631 	if (e->bitmask == 0)
632 		return 0;
633 	/* we're done */
634 	if (cnt && (*cnt)-- == 0)
635 		return 1;
636 	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
637 	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
638 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
639 
640 	par.net      = net;
641 	par.target   = t->u.target;
642 	par.targinfo = t->data;
643 	par.family   = NFPROTO_BRIDGE;
644 	if (par.target->destroy != NULL)
645 		par.target->destroy(&par);
646 	module_put(par.target->me);
647 	return 0;
648 }
649 
650 static inline int
ebt_check_entry(struct ebt_entry * e,struct net * net,const struct ebt_table_info * newinfo,const char * name,unsigned int * cnt,struct ebt_cl_stack * cl_s,unsigned int udc_cnt)651 ebt_check_entry(struct ebt_entry *e, struct net *net,
652    const struct ebt_table_info *newinfo,
653    const char *name, unsigned int *cnt,
654    struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
655 {
656 	struct ebt_entry_target *t;
657 	struct xt_target *target;
658 	unsigned int i, j, hook = 0, hookmask = 0;
659 	size_t gap;
660 	int ret;
661 	struct xt_mtchk_param mtpar;
662 	struct xt_tgchk_param tgpar;
663 
664 	/* don't mess with the struct ebt_entries */
665 	if (e->bitmask == 0)
666 		return 0;
667 
668 	if (e->bitmask & ~EBT_F_MASK) {
669 		BUGPRINT("Unknown flag for bitmask\n");
670 		return -EINVAL;
671 	}
672 	if (e->invflags & ~EBT_INV_MASK) {
673 		BUGPRINT("Unknown flag for inv bitmask\n");
674 		return -EINVAL;
675 	}
676 	if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
677 		BUGPRINT("NOPROTO & 802_3 not allowed\n");
678 		return -EINVAL;
679 	}
680 	/* what hook do we belong to? */
681 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
682 		if (!newinfo->hook_entry[i])
683 			continue;
684 		if ((char *)newinfo->hook_entry[i] < (char *)e)
685 			hook = i;
686 		else
687 			break;
688 	}
689 	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
690 	   a base chain */
691 	if (i < NF_BR_NUMHOOKS)
692 		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
693 	else {
694 		for (i = 0; i < udc_cnt; i++)
695 			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
696 				break;
697 		if (i == 0)
698 			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
699 		else
700 			hookmask = cl_s[i - 1].hookmask;
701 	}
702 	i = 0;
703 
704 	mtpar.net	= tgpar.net       = net;
705 	mtpar.table     = tgpar.table     = name;
706 	mtpar.entryinfo = tgpar.entryinfo = e;
707 	mtpar.hook_mask = tgpar.hook_mask = hookmask;
708 	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
709 	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
710 	if (ret != 0)
711 		goto cleanup_matches;
712 	j = 0;
713 	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
714 	if (ret != 0)
715 		goto cleanup_watchers;
716 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
717 	gap = e->next_offset - e->target_offset;
718 
719 	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
720 	if (IS_ERR(target)) {
721 		ret = PTR_ERR(target);
722 		goto cleanup_watchers;
723 	}
724 
725 	t->u.target = target;
726 	if (t->u.target == &ebt_standard_target) {
727 		if (gap < sizeof(struct ebt_standard_target)) {
728 			BUGPRINT("Standard target size too big\n");
729 			ret = -EFAULT;
730 			goto cleanup_watchers;
731 		}
732 		if (((struct ebt_standard_target *)t)->verdict <
733 		   -NUM_STANDARD_TARGETS) {
734 			BUGPRINT("Invalid standard target\n");
735 			ret = -EFAULT;
736 			goto cleanup_watchers;
737 		}
738 	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
739 		module_put(t->u.target->me);
740 		ret = -EFAULT;
741 		goto cleanup_watchers;
742 	}
743 
744 	tgpar.target   = target;
745 	tgpar.targinfo = t->data;
746 	ret = xt_check_target(&tgpar, t->target_size,
747 	      e->ethproto, e->invflags & EBT_IPROTO);
748 	if (ret < 0) {
749 		module_put(target->me);
750 		goto cleanup_watchers;
751 	}
752 	(*cnt)++;
753 	return 0;
754 cleanup_watchers:
755 	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
756 cleanup_matches:
757 	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
758 	return ret;
759 }
760 
761 /*
762  * checks for loops and sets the hook mask for udc
763  * the hook mask for udc tells us from which base chains the udc can be
764  * accessed. This mask is a parameter to the check() functions of the extensions
765  */
check_chainloops(const struct ebt_entries * chain,struct ebt_cl_stack * cl_s,unsigned int udc_cnt,unsigned int hooknr,char * base)766 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
767    unsigned int udc_cnt, unsigned int hooknr, char *base)
768 {
769 	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
770 	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
771 	const struct ebt_entry_target *t;
772 
773 	while (pos < nentries || chain_nr != -1) {
774 		/* end of udc, go back one 'recursion' step */
775 		if (pos == nentries) {
776 			/* put back values of the time when this chain was called */
777 			e = cl_s[chain_nr].cs.e;
778 			if (cl_s[chain_nr].from != -1)
779 				nentries =
780 				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
781 			else
782 				nentries = chain->nentries;
783 			pos = cl_s[chain_nr].cs.n;
784 			/* make sure we won't see a loop that isn't one */
785 			cl_s[chain_nr].cs.n = 0;
786 			chain_nr = cl_s[chain_nr].from;
787 			if (pos == nentries)
788 				continue;
789 		}
790 		t = (struct ebt_entry_target *)
791 		   (((char *)e) + e->target_offset);
792 		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
793 			goto letscontinue;
794 		if (e->target_offset + sizeof(struct ebt_standard_target) >
795 		   e->next_offset) {
796 			BUGPRINT("Standard target size too big\n");
797 			return -1;
798 		}
799 		verdict = ((struct ebt_standard_target *)t)->verdict;
800 		if (verdict >= 0) { /* jump to another chain */
801 			struct ebt_entries *hlp2 =
802 			   (struct ebt_entries *)(base + verdict);
803 			for (i = 0; i < udc_cnt; i++)
804 				if (hlp2 == cl_s[i].cs.chaininfo)
805 					break;
806 			/* bad destination or loop */
807 			if (i == udc_cnt) {
808 				BUGPRINT("bad destination\n");
809 				return -1;
810 			}
811 			if (cl_s[i].cs.n) {
812 				BUGPRINT("loop\n");
813 				return -1;
814 			}
815 			if (cl_s[i].hookmask & (1 << hooknr))
816 				goto letscontinue;
817 			/* this can't be 0, so the loop test is correct */
818 			cl_s[i].cs.n = pos + 1;
819 			pos = 0;
820 			cl_s[i].cs.e = ebt_next_entry(e);
821 			e = (struct ebt_entry *)(hlp2->data);
822 			nentries = hlp2->nentries;
823 			cl_s[i].from = chain_nr;
824 			chain_nr = i;
825 			/* this udc is accessible from the base chain for hooknr */
826 			cl_s[i].hookmask |= (1 << hooknr);
827 			continue;
828 		}
829 letscontinue:
830 		e = ebt_next_entry(e);
831 		pos++;
832 	}
833 	return 0;
834 }
835 
836 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
translate_table(struct net * net,const char * name,struct ebt_table_info * newinfo)837 static int translate_table(struct net *net, const char *name,
838 			   struct ebt_table_info *newinfo)
839 {
840 	unsigned int i, j, k, udc_cnt;
841 	int ret;
842 	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
843 
844 	i = 0;
845 	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
846 		i++;
847 	if (i == NF_BR_NUMHOOKS) {
848 		BUGPRINT("No valid hooks specified\n");
849 		return -EINVAL;
850 	}
851 	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
852 		BUGPRINT("Chains don't start at beginning\n");
853 		return -EINVAL;
854 	}
855 	/* make sure chains are ordered after each other in same order
856 	   as their corresponding hooks */
857 	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
858 		if (!newinfo->hook_entry[j])
859 			continue;
860 		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
861 			BUGPRINT("Hook order must be followed\n");
862 			return -EINVAL;
863 		}
864 		i = j;
865 	}
866 
867 	/* do some early checkings and initialize some things */
868 	i = 0; /* holds the expected nr. of entries for the chain */
869 	j = 0; /* holds the up to now counted entries for the chain */
870 	k = 0; /* holds the total nr. of entries, should equal
871 		  newinfo->nentries afterwards */
872 	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
873 	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
874 	   ebt_check_entry_size_and_hooks, newinfo,
875 	   &i, &j, &k, &udc_cnt);
876 
877 	if (ret != 0)
878 		return ret;
879 
880 	if (i != j) {
881 		BUGPRINT("nentries does not equal the nr of entries in the "
882 			 "(last) chain\n");
883 		return -EINVAL;
884 	}
885 	if (k != newinfo->nentries) {
886 		BUGPRINT("Total nentries is wrong\n");
887 		return -EINVAL;
888 	}
889 
890 	/* get the location of the udc, put them in an array
891 	   while we're at it, allocate the chainstack */
892 	if (udc_cnt) {
893 		/* this will get free'd in do_replace()/ebt_register_table()
894 		   if an error occurs */
895 		newinfo->chainstack =
896 			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
897 		if (!newinfo->chainstack)
898 			return -ENOMEM;
899 		for_each_possible_cpu(i) {
900 			newinfo->chainstack[i] =
901 			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
902 			if (!newinfo->chainstack[i]) {
903 				while (i)
904 					vfree(newinfo->chainstack[--i]);
905 				vfree(newinfo->chainstack);
906 				newinfo->chainstack = NULL;
907 				return -ENOMEM;
908 			}
909 		}
910 
911 		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
912 		if (!cl_s)
913 			return -ENOMEM;
914 		i = 0; /* the i'th udc */
915 		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
916 		   ebt_get_udc_positions, newinfo, &i, cl_s);
917 		/* sanity check */
918 		if (i != udc_cnt) {
919 			BUGPRINT("i != udc_cnt\n");
920 			vfree(cl_s);
921 			return -EFAULT;
922 		}
923 	}
924 
925 	/* Check for loops */
926 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
927 		if (newinfo->hook_entry[i])
928 			if (check_chainloops(newinfo->hook_entry[i],
929 			   cl_s, udc_cnt, i, newinfo->entries)) {
930 				vfree(cl_s);
931 				return -EINVAL;
932 			}
933 
934 	/* we now know the following (along with E=mc²):
935 	   - the nr of entries in each chain is right
936 	   - the size of the allocated space is right
937 	   - all valid hooks have a corresponding chain
938 	   - there are no loops
939 	   - wrong data can still be on the level of a single entry
940 	   - could be there are jumps to places that are not the
941 	     beginning of a chain. This can only occur in chains that
942 	     are not accessible from any base chains, so we don't care. */
943 
944 	/* used to know what we need to clean up if something goes wrong */
945 	i = 0;
946 	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
947 	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
948 	if (ret != 0) {
949 		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
950 				  ebt_cleanup_entry, net, &i);
951 	}
952 	vfree(cl_s);
953 	return ret;
954 }
955 
956 /* called under write_lock */
get_counters(const struct ebt_counter * oldcounters,struct ebt_counter * counters,unsigned int nentries)957 static void get_counters(const struct ebt_counter *oldcounters,
958    struct ebt_counter *counters, unsigned int nentries)
959 {
960 	int i, cpu;
961 	struct ebt_counter *counter_base;
962 
963 	/* counters of cpu 0 */
964 	memcpy(counters, oldcounters,
965 	       sizeof(struct ebt_counter) * nentries);
966 
967 	/* add other counters to those of cpu 0 */
968 	for_each_possible_cpu(cpu) {
969 		if (cpu == 0)
970 			continue;
971 		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
972 		for (i = 0; i < nentries; i++) {
973 			counters[i].pcnt += counter_base[i].pcnt;
974 			counters[i].bcnt += counter_base[i].bcnt;
975 		}
976 	}
977 }
978 
do_replace_finish(struct net * net,struct ebt_replace * repl,struct ebt_table_info * newinfo)979 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
980 			      struct ebt_table_info *newinfo)
981 {
982 	int ret, i;
983 	struct ebt_counter *counterstmp = NULL;
984 	/* used to be able to unlock earlier */
985 	struct ebt_table_info *table;
986 	struct ebt_table *t;
987 
988 	/* the user wants counters back
989 	   the check on the size is done later, when we have the lock */
990 	if (repl->num_counters) {
991 		unsigned long size = repl->num_counters * sizeof(*counterstmp);
992 		counterstmp = vmalloc(size);
993 		if (!counterstmp)
994 			return -ENOMEM;
995 	}
996 
997 	newinfo->chainstack = NULL;
998 	ret = ebt_verify_pointers(repl, newinfo);
999 	if (ret != 0)
1000 		goto free_counterstmp;
1001 
1002 	ret = translate_table(net, repl->name, newinfo);
1003 
1004 	if (ret != 0)
1005 		goto free_counterstmp;
1006 
1007 	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1008 	if (!t) {
1009 		ret = -ENOENT;
1010 		goto free_iterate;
1011 	}
1012 
1013 	/* the table doesn't like it */
1014 	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1015 		goto free_unlock;
1016 
1017 	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1018 		BUGPRINT("Wrong nr. of counters requested\n");
1019 		ret = -EINVAL;
1020 		goto free_unlock;
1021 	}
1022 
1023 	/* we have the mutex lock, so no danger in reading this pointer */
1024 	table = t->private;
1025 	/* make sure the table can only be rmmod'ed if it contains no rules */
1026 	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1027 		ret = -ENOENT;
1028 		goto free_unlock;
1029 	} else if (table->nentries && !newinfo->nentries)
1030 		module_put(t->me);
1031 	/* we need an atomic snapshot of the counters */
1032 	write_lock_bh(&t->lock);
1033 	if (repl->num_counters)
1034 		get_counters(t->private->counters, counterstmp,
1035 		   t->private->nentries);
1036 
1037 	t->private = newinfo;
1038 	write_unlock_bh(&t->lock);
1039 	mutex_unlock(&ebt_mutex);
1040 	/* so, a user can change the chains while having messed up her counter
1041 	   allocation. Only reason why this is done is because this way the lock
1042 	   is held only once, while this doesn't bring the kernel into a
1043 	   dangerous state. */
1044 	if (repl->num_counters &&
1045 	   copy_to_user(repl->counters, counterstmp,
1046 	   repl->num_counters * sizeof(struct ebt_counter))) {
1047 		/* Silent error, can't fail, new table is already in place */
1048 		net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1049 	}
1050 
1051 	/* decrease module count and free resources */
1052 	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1053 			  ebt_cleanup_entry, net, NULL);
1054 
1055 	vfree(table->entries);
1056 	if (table->chainstack) {
1057 		for_each_possible_cpu(i)
1058 			vfree(table->chainstack[i]);
1059 		vfree(table->chainstack);
1060 	}
1061 	vfree(table);
1062 
1063 	vfree(counterstmp);
1064 	return ret;
1065 
1066 free_unlock:
1067 	mutex_unlock(&ebt_mutex);
1068 free_iterate:
1069 	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1070 			  ebt_cleanup_entry, net, NULL);
1071 free_counterstmp:
1072 	vfree(counterstmp);
1073 	/* can be initialized in translate_table() */
1074 	if (newinfo->chainstack) {
1075 		for_each_possible_cpu(i)
1076 			vfree(newinfo->chainstack[i]);
1077 		vfree(newinfo->chainstack);
1078 	}
1079 	return ret;
1080 }
1081 
1082 /* replace the table */
do_replace(struct net * net,const void __user * user,unsigned int len)1083 static int do_replace(struct net *net, const void __user *user,
1084 		      unsigned int len)
1085 {
1086 	int ret, countersize;
1087 	struct ebt_table_info *newinfo;
1088 	struct ebt_replace tmp;
1089 
1090 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1091 		return -EFAULT;
1092 
1093 	if (len != sizeof(tmp) + tmp.entries_size) {
1094 		BUGPRINT("Wrong len argument\n");
1095 		return -EINVAL;
1096 	}
1097 
1098 	if (tmp.entries_size == 0) {
1099 		BUGPRINT("Entries_size never zero\n");
1100 		return -EINVAL;
1101 	}
1102 	/* overflow check */
1103 	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1104 			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1105 		return -ENOMEM;
1106 	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1107 		return -ENOMEM;
1108 
1109 	tmp.name[sizeof(tmp.name) - 1] = 0;
1110 
1111 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1112 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1113 	if (!newinfo)
1114 		return -ENOMEM;
1115 
1116 	if (countersize)
1117 		memset(newinfo->counters, 0, countersize);
1118 
1119 	newinfo->entries = vmalloc(tmp.entries_size);
1120 	if (!newinfo->entries) {
1121 		ret = -ENOMEM;
1122 		goto free_newinfo;
1123 	}
1124 	if (copy_from_user(
1125 	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1126 		BUGPRINT("Couldn't copy entries from userspace\n");
1127 		ret = -EFAULT;
1128 		goto free_entries;
1129 	}
1130 
1131 	ret = do_replace_finish(net, &tmp, newinfo);
1132 	if (ret == 0)
1133 		return ret;
1134 free_entries:
1135 	vfree(newinfo->entries);
1136 free_newinfo:
1137 	vfree(newinfo);
1138 	return ret;
1139 }
1140 
1141 struct ebt_table *
ebt_register_table(struct net * net,const struct ebt_table * input_table)1142 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1143 {
1144 	struct ebt_table_info *newinfo;
1145 	struct ebt_table *t, *table;
1146 	struct ebt_replace_kernel *repl;
1147 	int ret, i, countersize;
1148 	void *p;
1149 
1150 	if (input_table == NULL || (repl = input_table->table) == NULL ||
1151 	    repl->entries == NULL || repl->entries_size == 0 ||
1152 	    repl->counters != NULL || input_table->private != NULL) {
1153 		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1154 		return ERR_PTR(-EINVAL);
1155 	}
1156 
1157 	/* Don't add one table to multiple lists. */
1158 	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1159 	if (!table) {
1160 		ret = -ENOMEM;
1161 		goto out;
1162 	}
1163 
1164 	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1165 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1166 	ret = -ENOMEM;
1167 	if (!newinfo)
1168 		goto free_table;
1169 
1170 	p = vmalloc(repl->entries_size);
1171 	if (!p)
1172 		goto free_newinfo;
1173 
1174 	memcpy(p, repl->entries, repl->entries_size);
1175 	newinfo->entries = p;
1176 
1177 	newinfo->entries_size = repl->entries_size;
1178 	newinfo->nentries = repl->nentries;
1179 
1180 	if (countersize)
1181 		memset(newinfo->counters, 0, countersize);
1182 
1183 	/* fill in newinfo and parse the entries */
1184 	newinfo->chainstack = NULL;
1185 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1186 		if ((repl->valid_hooks & (1 << i)) == 0)
1187 			newinfo->hook_entry[i] = NULL;
1188 		else
1189 			newinfo->hook_entry[i] = p +
1190 				((char *)repl->hook_entry[i] - repl->entries);
1191 	}
1192 	ret = translate_table(net, repl->name, newinfo);
1193 	if (ret != 0) {
1194 		BUGPRINT("Translate_table failed\n");
1195 		goto free_chainstack;
1196 	}
1197 
1198 	if (table->check && table->check(newinfo, table->valid_hooks)) {
1199 		BUGPRINT("The table doesn't like its own initial data, lol\n");
1200 		ret = -EINVAL;
1201 		goto free_chainstack;
1202 	}
1203 
1204 	table->private = newinfo;
1205 	rwlock_init(&table->lock);
1206 	ret = mutex_lock_interruptible(&ebt_mutex);
1207 	if (ret != 0)
1208 		goto free_chainstack;
1209 
1210 	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1211 		if (strcmp(t->name, table->name) == 0) {
1212 			ret = -EEXIST;
1213 			BUGPRINT("Table name already exists\n");
1214 			goto free_unlock;
1215 		}
1216 	}
1217 
1218 	/* Hold a reference count if the chains aren't empty */
1219 	if (newinfo->nentries && !try_module_get(table->me)) {
1220 		ret = -ENOENT;
1221 		goto free_unlock;
1222 	}
1223 	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1224 	mutex_unlock(&ebt_mutex);
1225 	return table;
1226 free_unlock:
1227 	mutex_unlock(&ebt_mutex);
1228 free_chainstack:
1229 	if (newinfo->chainstack) {
1230 		for_each_possible_cpu(i)
1231 			vfree(newinfo->chainstack[i]);
1232 		vfree(newinfo->chainstack);
1233 	}
1234 	vfree(newinfo->entries);
1235 free_newinfo:
1236 	vfree(newinfo);
1237 free_table:
1238 	kfree(table);
1239 out:
1240 	return ERR_PTR(ret);
1241 }
1242 
ebt_unregister_table(struct net * net,struct ebt_table * table)1243 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1244 {
1245 	int i;
1246 
1247 	if (!table) {
1248 		BUGPRINT("Request to unregister NULL table!!!\n");
1249 		return;
1250 	}
1251 	mutex_lock(&ebt_mutex);
1252 	list_del(&table->list);
1253 	mutex_unlock(&ebt_mutex);
1254 	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1255 			  ebt_cleanup_entry, net, NULL);
1256 	if (table->private->nentries)
1257 		module_put(table->me);
1258 	vfree(table->private->entries);
1259 	if (table->private->chainstack) {
1260 		for_each_possible_cpu(i)
1261 			vfree(table->private->chainstack[i]);
1262 		vfree(table->private->chainstack);
1263 	}
1264 	vfree(table->private);
1265 	kfree(table);
1266 }
1267 
1268 /* userspace just supplied us with counters */
do_update_counters(struct net * net,const char * name,struct ebt_counter __user * counters,unsigned int num_counters,const void __user * user,unsigned int len)1269 static int do_update_counters(struct net *net, const char *name,
1270 				struct ebt_counter __user *counters,
1271 				unsigned int num_counters,
1272 				const void __user *user, unsigned int len)
1273 {
1274 	int i, ret;
1275 	struct ebt_counter *tmp;
1276 	struct ebt_table *t;
1277 
1278 	if (num_counters == 0)
1279 		return -EINVAL;
1280 
1281 	tmp = vmalloc(num_counters * sizeof(*tmp));
1282 	if (!tmp)
1283 		return -ENOMEM;
1284 
1285 	t = find_table_lock(net, name, &ret, &ebt_mutex);
1286 	if (!t)
1287 		goto free_tmp;
1288 
1289 	if (num_counters != t->private->nentries) {
1290 		BUGPRINT("Wrong nr of counters\n");
1291 		ret = -EINVAL;
1292 		goto unlock_mutex;
1293 	}
1294 
1295 	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1296 		ret = -EFAULT;
1297 		goto unlock_mutex;
1298 	}
1299 
1300 	/* we want an atomic add of the counters */
1301 	write_lock_bh(&t->lock);
1302 
1303 	/* we add to the counters of the first cpu */
1304 	for (i = 0; i < num_counters; i++) {
1305 		t->private->counters[i].pcnt += tmp[i].pcnt;
1306 		t->private->counters[i].bcnt += tmp[i].bcnt;
1307 	}
1308 
1309 	write_unlock_bh(&t->lock);
1310 	ret = 0;
1311 unlock_mutex:
1312 	mutex_unlock(&ebt_mutex);
1313 free_tmp:
1314 	vfree(tmp);
1315 	return ret;
1316 }
1317 
update_counters(struct net * net,const void __user * user,unsigned int len)1318 static int update_counters(struct net *net, const void __user *user,
1319 			    unsigned int len)
1320 {
1321 	struct ebt_replace hlp;
1322 
1323 	if (copy_from_user(&hlp, user, sizeof(hlp)))
1324 		return -EFAULT;
1325 
1326 	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1327 		return -EINVAL;
1328 
1329 	return do_update_counters(net, hlp.name, hlp.counters,
1330 				hlp.num_counters, user, len);
1331 }
1332 
ebt_make_matchname(const struct ebt_entry_match * m,const char * base,char __user * ubase)1333 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1334     const char *base, char __user *ubase)
1335 {
1336 	char __user *hlp = ubase + ((char *)m - base);
1337 	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1338 
1339 	/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1340 	   long. Copy 29 bytes and fill remaining bytes with zeroes. */
1341 	strncpy(name, m->u.match->name, sizeof(name));
1342 	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1343 		return -EFAULT;
1344 	return 0;
1345 }
1346 
ebt_make_watchername(const struct ebt_entry_watcher * w,const char * base,char __user * ubase)1347 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1348     const char *base, char __user *ubase)
1349 {
1350 	char __user *hlp = ubase + ((char *)w - base);
1351 	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1352 
1353 	strncpy(name, w->u.watcher->name, sizeof(name));
1354 	if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1355 		return -EFAULT;
1356 	return 0;
1357 }
1358 
1359 static inline int
ebt_make_names(struct ebt_entry * e,const char * base,char __user * ubase)1360 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1361 {
1362 	int ret;
1363 	char __user *hlp;
1364 	const struct ebt_entry_target *t;
1365 	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1366 
1367 	if (e->bitmask == 0)
1368 		return 0;
1369 
1370 	hlp = ubase + (((char *)e + e->target_offset) - base);
1371 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1372 
1373 	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1374 	if (ret != 0)
1375 		return ret;
1376 	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1377 	if (ret != 0)
1378 		return ret;
1379 	strncpy(name, t->u.target->name, sizeof(name));
1380 	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1381 		return -EFAULT;
1382 	return 0;
1383 }
1384 
copy_counters_to_user(struct ebt_table * t,const struct ebt_counter * oldcounters,void __user * user,unsigned int num_counters,unsigned int nentries)1385 static int copy_counters_to_user(struct ebt_table *t,
1386 				  const struct ebt_counter *oldcounters,
1387 				  void __user *user, unsigned int num_counters,
1388 				  unsigned int nentries)
1389 {
1390 	struct ebt_counter *counterstmp;
1391 	int ret = 0;
1392 
1393 	/* userspace might not need the counters */
1394 	if (num_counters == 0)
1395 		return 0;
1396 
1397 	if (num_counters != nentries) {
1398 		BUGPRINT("Num_counters wrong\n");
1399 		return -EINVAL;
1400 	}
1401 
1402 	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1403 	if (!counterstmp)
1404 		return -ENOMEM;
1405 
1406 	write_lock_bh(&t->lock);
1407 	get_counters(oldcounters, counterstmp, nentries);
1408 	write_unlock_bh(&t->lock);
1409 
1410 	if (copy_to_user(user, counterstmp,
1411 	   nentries * sizeof(struct ebt_counter)))
1412 		ret = -EFAULT;
1413 	vfree(counterstmp);
1414 	return ret;
1415 }
1416 
1417 /* called with ebt_mutex locked */
copy_everything_to_user(struct ebt_table * t,void __user * user,const int * len,int cmd)1418 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1419     const int *len, int cmd)
1420 {
1421 	struct ebt_replace tmp;
1422 	const struct ebt_counter *oldcounters;
1423 	unsigned int entries_size, nentries;
1424 	int ret;
1425 	char *entries;
1426 
1427 	if (cmd == EBT_SO_GET_ENTRIES) {
1428 		entries_size = t->private->entries_size;
1429 		nentries = t->private->nentries;
1430 		entries = t->private->entries;
1431 		oldcounters = t->private->counters;
1432 	} else {
1433 		entries_size = t->table->entries_size;
1434 		nentries = t->table->nentries;
1435 		entries = t->table->entries;
1436 		oldcounters = t->table->counters;
1437 	}
1438 
1439 	if (copy_from_user(&tmp, user, sizeof(tmp)))
1440 		return -EFAULT;
1441 
1442 	if (*len != sizeof(struct ebt_replace) + entries_size +
1443 	   (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1444 		return -EINVAL;
1445 
1446 	if (tmp.nentries != nentries) {
1447 		BUGPRINT("Nentries wrong\n");
1448 		return -EINVAL;
1449 	}
1450 
1451 	if (tmp.entries_size != entries_size) {
1452 		BUGPRINT("Wrong size\n");
1453 		return -EINVAL;
1454 	}
1455 
1456 	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1457 					tmp.num_counters, nentries);
1458 	if (ret)
1459 		return ret;
1460 
1461 	if (copy_to_user(tmp.entries, entries, entries_size)) {
1462 		BUGPRINT("Couldn't copy entries to userspace\n");
1463 		return -EFAULT;
1464 	}
1465 	/* set the match/watcher/target names right */
1466 	return EBT_ENTRY_ITERATE(entries, entries_size,
1467 	   ebt_make_names, entries, tmp.entries);
1468 }
1469 
do_ebt_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1470 static int do_ebt_set_ctl(struct sock *sk,
1471 	int cmd, void __user *user, unsigned int len)
1472 {
1473 	int ret;
1474 
1475 	if (!capable(CAP_NET_ADMIN))
1476 		return -EPERM;
1477 
1478 	switch(cmd) {
1479 	case EBT_SO_SET_ENTRIES:
1480 		ret = do_replace(sock_net(sk), user, len);
1481 		break;
1482 	case EBT_SO_SET_COUNTERS:
1483 		ret = update_counters(sock_net(sk), user, len);
1484 		break;
1485 	default:
1486 		ret = -EINVAL;
1487 	}
1488 	return ret;
1489 }
1490 
do_ebt_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1491 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1492 {
1493 	int ret;
1494 	struct ebt_replace tmp;
1495 	struct ebt_table *t;
1496 
1497 	if (!capable(CAP_NET_ADMIN))
1498 		return -EPERM;
1499 
1500 	if (copy_from_user(&tmp, user, sizeof(tmp)))
1501 		return -EFAULT;
1502 
1503 	t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1504 	if (!t)
1505 		return ret;
1506 
1507 	switch(cmd) {
1508 	case EBT_SO_GET_INFO:
1509 	case EBT_SO_GET_INIT_INFO:
1510 		if (*len != sizeof(struct ebt_replace)){
1511 			ret = -EINVAL;
1512 			mutex_unlock(&ebt_mutex);
1513 			break;
1514 		}
1515 		if (cmd == EBT_SO_GET_INFO) {
1516 			tmp.nentries = t->private->nentries;
1517 			tmp.entries_size = t->private->entries_size;
1518 			tmp.valid_hooks = t->valid_hooks;
1519 		} else {
1520 			tmp.nentries = t->table->nentries;
1521 			tmp.entries_size = t->table->entries_size;
1522 			tmp.valid_hooks = t->table->valid_hooks;
1523 		}
1524 		mutex_unlock(&ebt_mutex);
1525 		if (copy_to_user(user, &tmp, *len) != 0){
1526 			BUGPRINT("c2u Didn't work\n");
1527 			ret = -EFAULT;
1528 			break;
1529 		}
1530 		ret = 0;
1531 		break;
1532 
1533 	case EBT_SO_GET_ENTRIES:
1534 	case EBT_SO_GET_INIT_ENTRIES:
1535 		ret = copy_everything_to_user(t, user, len, cmd);
1536 		mutex_unlock(&ebt_mutex);
1537 		break;
1538 
1539 	default:
1540 		mutex_unlock(&ebt_mutex);
1541 		ret = -EINVAL;
1542 	}
1543 
1544 	return ret;
1545 }
1546 
1547 #ifdef CONFIG_COMPAT
1548 /* 32 bit-userspace compatibility definitions. */
1549 struct compat_ebt_replace {
1550 	char name[EBT_TABLE_MAXNAMELEN];
1551 	compat_uint_t valid_hooks;
1552 	compat_uint_t nentries;
1553 	compat_uint_t entries_size;
1554 	/* start of the chains */
1555 	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1556 	/* nr of counters userspace expects back */
1557 	compat_uint_t num_counters;
1558 	/* where the kernel will put the old counters. */
1559 	compat_uptr_t counters;
1560 	compat_uptr_t entries;
1561 };
1562 
1563 /* struct ebt_entry_match, _target and _watcher have same layout */
1564 struct compat_ebt_entry_mwt {
1565 	union {
1566 		char name[EBT_FUNCTION_MAXNAMELEN];
1567 		compat_uptr_t ptr;
1568 	} u;
1569 	compat_uint_t match_size;
1570 	compat_uint_t data[0];
1571 };
1572 
1573 /* account for possible padding between match_size and ->data */
ebt_compat_entry_padsize(void)1574 static int ebt_compat_entry_padsize(void)
1575 {
1576 	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1577 			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1578 	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1579 			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1580 }
1581 
ebt_compat_match_offset(const struct xt_match * match,unsigned int userlen)1582 static int ebt_compat_match_offset(const struct xt_match *match,
1583 				   unsigned int userlen)
1584 {
1585 	/*
1586 	 * ebt_among needs special handling. The kernel .matchsize is
1587 	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1588 	 * value is expected.
1589 	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1590 	 */
1591 	if (unlikely(match->matchsize == -1))
1592 		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1593 	return xt_compat_match_offset(match);
1594 }
1595 
compat_match_to_user(struct ebt_entry_match * m,void __user ** dstptr,unsigned int * size)1596 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1597 				unsigned int *size)
1598 {
1599 	const struct xt_match *match = m->u.match;
1600 	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1601 	int off = ebt_compat_match_offset(match, m->match_size);
1602 	compat_uint_t msize = m->match_size - off;
1603 
1604 	BUG_ON(off >= m->match_size);
1605 
1606 	if (copy_to_user(cm->u.name, match->name,
1607 	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1608 		return -EFAULT;
1609 
1610 	if (match->compat_to_user) {
1611 		if (match->compat_to_user(cm->data, m->data))
1612 			return -EFAULT;
1613 	} else if (copy_to_user(cm->data, m->data, msize))
1614 			return -EFAULT;
1615 
1616 	*size -= ebt_compat_entry_padsize() + off;
1617 	*dstptr = cm->data;
1618 	*dstptr += msize;
1619 	return 0;
1620 }
1621 
compat_target_to_user(struct ebt_entry_target * t,void __user ** dstptr,unsigned int * size)1622 static int compat_target_to_user(struct ebt_entry_target *t,
1623 				 void __user **dstptr,
1624 				 unsigned int *size)
1625 {
1626 	const struct xt_target *target = t->u.target;
1627 	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1628 	int off = xt_compat_target_offset(target);
1629 	compat_uint_t tsize = t->target_size - off;
1630 
1631 	BUG_ON(off >= t->target_size);
1632 
1633 	if (copy_to_user(cm->u.name, target->name,
1634 	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1635 		return -EFAULT;
1636 
1637 	if (target->compat_to_user) {
1638 		if (target->compat_to_user(cm->data, t->data))
1639 			return -EFAULT;
1640 	} else if (copy_to_user(cm->data, t->data, tsize))
1641 		return -EFAULT;
1642 
1643 	*size -= ebt_compat_entry_padsize() + off;
1644 	*dstptr = cm->data;
1645 	*dstptr += tsize;
1646 	return 0;
1647 }
1648 
compat_watcher_to_user(struct ebt_entry_watcher * w,void __user ** dstptr,unsigned int * size)1649 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1650 				  void __user **dstptr,
1651 				  unsigned int *size)
1652 {
1653 	return compat_target_to_user((struct ebt_entry_target *)w,
1654 							dstptr, size);
1655 }
1656 
compat_copy_entry_to_user(struct ebt_entry * e,void __user ** dstptr,unsigned int * size)1657 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1658 				unsigned int *size)
1659 {
1660 	struct ebt_entry_target *t;
1661 	struct ebt_entry __user *ce;
1662 	u32 watchers_offset, target_offset, next_offset;
1663 	compat_uint_t origsize;
1664 	int ret;
1665 
1666 	if (e->bitmask == 0) {
1667 		if (*size < sizeof(struct ebt_entries))
1668 			return -EINVAL;
1669 		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1670 			return -EFAULT;
1671 
1672 		*dstptr += sizeof(struct ebt_entries);
1673 		*size -= sizeof(struct ebt_entries);
1674 		return 0;
1675 	}
1676 
1677 	if (*size < sizeof(*ce))
1678 		return -EINVAL;
1679 
1680 	ce = (struct ebt_entry __user *)*dstptr;
1681 	if (copy_to_user(ce, e, sizeof(*ce)))
1682 		return -EFAULT;
1683 
1684 	origsize = *size;
1685 	*dstptr += sizeof(*ce);
1686 
1687 	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1688 	if (ret)
1689 		return ret;
1690 	watchers_offset = e->watchers_offset - (origsize - *size);
1691 
1692 	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1693 	if (ret)
1694 		return ret;
1695 	target_offset = e->target_offset - (origsize - *size);
1696 
1697 	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1698 
1699 	ret = compat_target_to_user(t, dstptr, size);
1700 	if (ret)
1701 		return ret;
1702 	next_offset = e->next_offset - (origsize - *size);
1703 
1704 	if (put_user(watchers_offset, &ce->watchers_offset) ||
1705 	    put_user(target_offset, &ce->target_offset) ||
1706 	    put_user(next_offset, &ce->next_offset))
1707 		return -EFAULT;
1708 
1709 	*size -= sizeof(*ce);
1710 	return 0;
1711 }
1712 
compat_calc_match(struct ebt_entry_match * m,int * off)1713 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1714 {
1715 	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1716 	*off += ebt_compat_entry_padsize();
1717 	return 0;
1718 }
1719 
compat_calc_watcher(struct ebt_entry_watcher * w,int * off)1720 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1721 {
1722 	*off += xt_compat_target_offset(w->u.watcher);
1723 	*off += ebt_compat_entry_padsize();
1724 	return 0;
1725 }
1726 
compat_calc_entry(const struct ebt_entry * e,const struct ebt_table_info * info,const void * base,struct compat_ebt_replace * newinfo)1727 static int compat_calc_entry(const struct ebt_entry *e,
1728 			     const struct ebt_table_info *info,
1729 			     const void *base,
1730 			     struct compat_ebt_replace *newinfo)
1731 {
1732 	const struct ebt_entry_target *t;
1733 	unsigned int entry_offset;
1734 	int off, ret, i;
1735 
1736 	if (e->bitmask == 0)
1737 		return 0;
1738 
1739 	off = 0;
1740 	entry_offset = (void *)e - base;
1741 
1742 	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1743 	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1744 
1745 	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1746 
1747 	off += xt_compat_target_offset(t->u.target);
1748 	off += ebt_compat_entry_padsize();
1749 
1750 	newinfo->entries_size -= off;
1751 
1752 	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1753 	if (ret)
1754 		return ret;
1755 
1756 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1757 		const void *hookptr = info->hook_entry[i];
1758 		if (info->hook_entry[i] &&
1759 		    (e < (struct ebt_entry *)(base - hookptr))) {
1760 			newinfo->hook_entry[i] -= off;
1761 			pr_debug("0x%08X -> 0x%08X\n",
1762 					newinfo->hook_entry[i] + off,
1763 					newinfo->hook_entry[i]);
1764 		}
1765 	}
1766 
1767 	return 0;
1768 }
1769 
1770 
compat_table_info(const struct ebt_table_info * info,struct compat_ebt_replace * newinfo)1771 static int compat_table_info(const struct ebt_table_info *info,
1772 			     struct compat_ebt_replace *newinfo)
1773 {
1774 	unsigned int size = info->entries_size;
1775 	const void *entries = info->entries;
1776 
1777 	newinfo->entries_size = size;
1778 
1779 	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1780 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1781 							entries, newinfo);
1782 }
1783 
compat_copy_everything_to_user(struct ebt_table * t,void __user * user,int * len,int cmd)1784 static int compat_copy_everything_to_user(struct ebt_table *t,
1785 					  void __user *user, int *len, int cmd)
1786 {
1787 	struct compat_ebt_replace repl, tmp;
1788 	struct ebt_counter *oldcounters;
1789 	struct ebt_table_info tinfo;
1790 	int ret;
1791 	void __user *pos;
1792 
1793 	memset(&tinfo, 0, sizeof(tinfo));
1794 
1795 	if (cmd == EBT_SO_GET_ENTRIES) {
1796 		tinfo.entries_size = t->private->entries_size;
1797 		tinfo.nentries = t->private->nentries;
1798 		tinfo.entries = t->private->entries;
1799 		oldcounters = t->private->counters;
1800 	} else {
1801 		tinfo.entries_size = t->table->entries_size;
1802 		tinfo.nentries = t->table->nentries;
1803 		tinfo.entries = t->table->entries;
1804 		oldcounters = t->table->counters;
1805 	}
1806 
1807 	if (copy_from_user(&tmp, user, sizeof(tmp)))
1808 		return -EFAULT;
1809 
1810 	if (tmp.nentries != tinfo.nentries ||
1811 	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1812 		return -EINVAL;
1813 
1814 	memcpy(&repl, &tmp, sizeof(repl));
1815 	if (cmd == EBT_SO_GET_ENTRIES)
1816 		ret = compat_table_info(t->private, &repl);
1817 	else
1818 		ret = compat_table_info(&tinfo, &repl);
1819 	if (ret)
1820 		return ret;
1821 
1822 	if (*len != sizeof(tmp) + repl.entries_size +
1823 	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1824 		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1825 				*len, tinfo.entries_size, repl.entries_size);
1826 		return -EINVAL;
1827 	}
1828 
1829 	/* userspace might not need the counters */
1830 	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1831 					tmp.num_counters, tinfo.nentries);
1832 	if (ret)
1833 		return ret;
1834 
1835 	pos = compat_ptr(tmp.entries);
1836 	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1837 			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1838 }
1839 
1840 struct ebt_entries_buf_state {
1841 	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1842 	u32 buf_kern_len;	/* total size of kernel buffer */
1843 	u32 buf_kern_offset;	/* amount of data copied so far */
1844 	u32 buf_user_offset;	/* read position in userspace buffer */
1845 };
1846 
ebt_buf_count(struct ebt_entries_buf_state * state,unsigned int sz)1847 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1848 {
1849 	state->buf_kern_offset += sz;
1850 	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1851 }
1852 
ebt_buf_add(struct ebt_entries_buf_state * state,void * data,unsigned int sz)1853 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1854 		       void *data, unsigned int sz)
1855 {
1856 	if (state->buf_kern_start == NULL)
1857 		goto count_only;
1858 
1859 	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1860 
1861 	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1862 
1863  count_only:
1864 	state->buf_user_offset += sz;
1865 	return ebt_buf_count(state, sz);
1866 }
1867 
ebt_buf_add_pad(struct ebt_entries_buf_state * state,unsigned int sz)1868 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1869 {
1870 	char *b = state->buf_kern_start;
1871 
1872 	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1873 
1874 	if (b != NULL && sz > 0)
1875 		memset(b + state->buf_kern_offset, 0, sz);
1876 	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1877 	return ebt_buf_count(state, sz);
1878 }
1879 
1880 enum compat_mwt {
1881 	EBT_COMPAT_MATCH,
1882 	EBT_COMPAT_WATCHER,
1883 	EBT_COMPAT_TARGET,
1884 };
1885 
compat_mtw_from_user(struct compat_ebt_entry_mwt * mwt,enum compat_mwt compat_mwt,struct ebt_entries_buf_state * state,const unsigned char * base)1886 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1887 				enum compat_mwt compat_mwt,
1888 				struct ebt_entries_buf_state *state,
1889 				const unsigned char *base)
1890 {
1891 	char name[EBT_FUNCTION_MAXNAMELEN];
1892 	struct xt_match *match;
1893 	struct xt_target *wt;
1894 	void *dst = NULL;
1895 	int off, pad = 0;
1896 	unsigned int size_kern, match_size = mwt->match_size;
1897 
1898 	strlcpy(name, mwt->u.name, sizeof(name));
1899 
1900 	if (state->buf_kern_start)
1901 		dst = state->buf_kern_start + state->buf_kern_offset;
1902 
1903 	switch (compat_mwt) {
1904 	case EBT_COMPAT_MATCH:
1905 		match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1906 		if (IS_ERR(match))
1907 			return PTR_ERR(match);
1908 
1909 		off = ebt_compat_match_offset(match, match_size);
1910 		if (dst) {
1911 			if (match->compat_from_user)
1912 				match->compat_from_user(dst, mwt->data);
1913 			else
1914 				memcpy(dst, mwt->data, match_size);
1915 		}
1916 
1917 		size_kern = match->matchsize;
1918 		if (unlikely(size_kern == -1))
1919 			size_kern = match_size;
1920 		module_put(match->me);
1921 		break;
1922 	case EBT_COMPAT_WATCHER: /* fallthrough */
1923 	case EBT_COMPAT_TARGET:
1924 		wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1925 		if (IS_ERR(wt))
1926 			return PTR_ERR(wt);
1927 		off = xt_compat_target_offset(wt);
1928 
1929 		if (dst) {
1930 			if (wt->compat_from_user)
1931 				wt->compat_from_user(dst, mwt->data);
1932 			else
1933 				memcpy(dst, mwt->data, match_size);
1934 		}
1935 
1936 		size_kern = wt->targetsize;
1937 		module_put(wt->me);
1938 		break;
1939 
1940 	default:
1941 		return -EINVAL;
1942 	}
1943 
1944 	state->buf_kern_offset += match_size + off;
1945 	state->buf_user_offset += match_size;
1946 	pad = XT_ALIGN(size_kern) - size_kern;
1947 
1948 	if (pad > 0 && dst) {
1949 		BUG_ON(state->buf_kern_len <= pad);
1950 		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1951 		memset(dst + size_kern, 0, pad);
1952 	}
1953 	return off + match_size;
1954 }
1955 
1956 /*
1957  * return size of all matches, watchers or target, including necessary
1958  * alignment and padding.
1959  */
ebt_size_mwt(struct compat_ebt_entry_mwt * match32,unsigned int size_left,enum compat_mwt type,struct ebt_entries_buf_state * state,const void * base)1960 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1961 			unsigned int size_left, enum compat_mwt type,
1962 			struct ebt_entries_buf_state *state, const void *base)
1963 {
1964 	int growth = 0;
1965 	char *buf;
1966 
1967 	if (size_left == 0)
1968 		return 0;
1969 
1970 	buf = (char *) match32;
1971 
1972 	while (size_left >= sizeof(*match32)) {
1973 		struct ebt_entry_match *match_kern;
1974 		int ret;
1975 
1976 		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1977 		if (match_kern) {
1978 			char *tmp;
1979 			tmp = state->buf_kern_start + state->buf_kern_offset;
1980 			match_kern = (struct ebt_entry_match *) tmp;
1981 		}
1982 		ret = ebt_buf_add(state, buf, sizeof(*match32));
1983 		if (ret < 0)
1984 			return ret;
1985 		size_left -= sizeof(*match32);
1986 
1987 		/* add padding before match->data (if any) */
1988 		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1989 		if (ret < 0)
1990 			return ret;
1991 
1992 		if (match32->match_size > size_left)
1993 			return -EINVAL;
1994 
1995 		size_left -= match32->match_size;
1996 
1997 		ret = compat_mtw_from_user(match32, type, state, base);
1998 		if (ret < 0)
1999 			return ret;
2000 
2001 		BUG_ON(ret < match32->match_size);
2002 		growth += ret - match32->match_size;
2003 		growth += ebt_compat_entry_padsize();
2004 
2005 		buf += sizeof(*match32);
2006 		buf += match32->match_size;
2007 
2008 		if (match_kern)
2009 			match_kern->match_size = ret;
2010 
2011 		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2012 		match32 = (struct compat_ebt_entry_mwt *) buf;
2013 	}
2014 
2015 	return growth;
2016 }
2017 
2018 /* called for all ebt_entry structures. */
size_entry_mwt(struct ebt_entry * entry,const unsigned char * base,unsigned int * total,struct ebt_entries_buf_state * state)2019 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2020 			  unsigned int *total,
2021 			  struct ebt_entries_buf_state *state)
2022 {
2023 	unsigned int i, j, startoff, new_offset = 0;
2024 	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2025 	unsigned int offsets[4];
2026 	unsigned int *offsets_update = NULL;
2027 	int ret;
2028 	char *buf_start;
2029 
2030 	if (*total < sizeof(struct ebt_entries))
2031 		return -EINVAL;
2032 
2033 	if (!entry->bitmask) {
2034 		*total -= sizeof(struct ebt_entries);
2035 		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2036 	}
2037 	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2038 		return -EINVAL;
2039 
2040 	startoff = state->buf_user_offset;
2041 	/* pull in most part of ebt_entry, it does not need to be changed. */
2042 	ret = ebt_buf_add(state, entry,
2043 			offsetof(struct ebt_entry, watchers_offset));
2044 	if (ret < 0)
2045 		return ret;
2046 
2047 	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2048 	memcpy(&offsets[1], &entry->watchers_offset,
2049 			sizeof(offsets) - sizeof(offsets[0]));
2050 
2051 	if (state->buf_kern_start) {
2052 		buf_start = state->buf_kern_start + state->buf_kern_offset;
2053 		offsets_update = (unsigned int *) buf_start;
2054 	}
2055 	ret = ebt_buf_add(state, &offsets[1],
2056 			sizeof(offsets) - sizeof(offsets[0]));
2057 	if (ret < 0)
2058 		return ret;
2059 	buf_start = (char *) entry;
2060 	/*
2061 	 * 0: matches offset, always follows ebt_entry.
2062 	 * 1: watchers offset, from ebt_entry structure
2063 	 * 2: target offset, from ebt_entry structure
2064 	 * 3: next ebt_entry offset, from ebt_entry structure
2065 	 *
2066 	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2067 	 */
2068 	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2069 		struct compat_ebt_entry_mwt *match32;
2070 		unsigned int size;
2071 		char *buf = buf_start;
2072 
2073 		buf = buf_start + offsets[i];
2074 		if (offsets[i] > offsets[j])
2075 			return -EINVAL;
2076 
2077 		match32 = (struct compat_ebt_entry_mwt *) buf;
2078 		size = offsets[j] - offsets[i];
2079 		ret = ebt_size_mwt(match32, size, i, state, base);
2080 		if (ret < 0)
2081 			return ret;
2082 		new_offset += ret;
2083 		if (offsets_update && new_offset) {
2084 			pr_debug("change offset %d to %d\n",
2085 				offsets_update[i], offsets[j] + new_offset);
2086 			offsets_update[i] = offsets[j] + new_offset;
2087 		}
2088 	}
2089 
2090 	if (state->buf_kern_start == NULL) {
2091 		unsigned int offset = buf_start - (char *) base;
2092 
2093 		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2094 		if (ret < 0)
2095 			return ret;
2096 	}
2097 
2098 	startoff = state->buf_user_offset - startoff;
2099 
2100 	BUG_ON(*total < startoff);
2101 	*total -= startoff;
2102 	return 0;
2103 }
2104 
2105 /*
2106  * repl->entries_size is the size of the ebt_entry blob in userspace.
2107  * It might need more memory when copied to a 64 bit kernel in case
2108  * userspace is 32-bit. So, first task: find out how much memory is needed.
2109  *
2110  * Called before validation is performed.
2111  */
compat_copy_entries(unsigned char * data,unsigned int size_user,struct ebt_entries_buf_state * state)2112 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2113 				struct ebt_entries_buf_state *state)
2114 {
2115 	unsigned int size_remaining = size_user;
2116 	int ret;
2117 
2118 	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2119 					&size_remaining, state);
2120 	if (ret < 0)
2121 		return ret;
2122 
2123 	WARN_ON(size_remaining);
2124 	return state->buf_kern_offset;
2125 }
2126 
2127 
compat_copy_ebt_replace_from_user(struct ebt_replace * repl,void __user * user,unsigned int len)2128 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2129 					    void __user *user, unsigned int len)
2130 {
2131 	struct compat_ebt_replace tmp;
2132 	int i;
2133 
2134 	if (len < sizeof(tmp))
2135 		return -EINVAL;
2136 
2137 	if (copy_from_user(&tmp, user, sizeof(tmp)))
2138 		return -EFAULT;
2139 
2140 	if (len != sizeof(tmp) + tmp.entries_size)
2141 		return -EINVAL;
2142 
2143 	if (tmp.entries_size == 0)
2144 		return -EINVAL;
2145 
2146 	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2147 			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2148 		return -ENOMEM;
2149 	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2150 		return -ENOMEM;
2151 
2152 	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2153 
2154 	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2155 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2156 		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2157 
2158 	repl->num_counters = tmp.num_counters;
2159 	repl->counters = compat_ptr(tmp.counters);
2160 	repl->entries = compat_ptr(tmp.entries);
2161 	return 0;
2162 }
2163 
compat_do_replace(struct net * net,void __user * user,unsigned int len)2164 static int compat_do_replace(struct net *net, void __user *user,
2165 			     unsigned int len)
2166 {
2167 	int ret, i, countersize, size64;
2168 	struct ebt_table_info *newinfo;
2169 	struct ebt_replace tmp;
2170 	struct ebt_entries_buf_state state;
2171 	void *entries_tmp;
2172 
2173 	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2174 	if (ret) {
2175 		/* try real handler in case userland supplied needed padding */
2176 		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2177 			ret = 0;
2178 		return ret;
2179 	}
2180 
2181 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2182 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2183 	if (!newinfo)
2184 		return -ENOMEM;
2185 
2186 	if (countersize)
2187 		memset(newinfo->counters, 0, countersize);
2188 
2189 	memset(&state, 0, sizeof(state));
2190 
2191 	newinfo->entries = vmalloc(tmp.entries_size);
2192 	if (!newinfo->entries) {
2193 		ret = -ENOMEM;
2194 		goto free_newinfo;
2195 	}
2196 	if (copy_from_user(
2197 	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2198 		ret = -EFAULT;
2199 		goto free_entries;
2200 	}
2201 
2202 	entries_tmp = newinfo->entries;
2203 
2204 	xt_compat_lock(NFPROTO_BRIDGE);
2205 
2206 	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2207 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2208 	if (ret < 0)
2209 		goto out_unlock;
2210 
2211 	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2212 		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2213 		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2214 
2215 	size64 = ret;
2216 	newinfo->entries = vmalloc(size64);
2217 	if (!newinfo->entries) {
2218 		vfree(entries_tmp);
2219 		ret = -ENOMEM;
2220 		goto out_unlock;
2221 	}
2222 
2223 	memset(&state, 0, sizeof(state));
2224 	state.buf_kern_start = newinfo->entries;
2225 	state.buf_kern_len = size64;
2226 
2227 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2228 	BUG_ON(ret < 0);	/* parses same data again */
2229 
2230 	vfree(entries_tmp);
2231 	tmp.entries_size = size64;
2232 
2233 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2234 		char __user *usrptr;
2235 		if (tmp.hook_entry[i]) {
2236 			unsigned int delta;
2237 			usrptr = (char __user *) tmp.hook_entry[i];
2238 			delta = usrptr - tmp.entries;
2239 			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2240 			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2241 		}
2242 	}
2243 
2244 	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2245 	xt_compat_unlock(NFPROTO_BRIDGE);
2246 
2247 	ret = do_replace_finish(net, &tmp, newinfo);
2248 	if (ret == 0)
2249 		return ret;
2250 free_entries:
2251 	vfree(newinfo->entries);
2252 free_newinfo:
2253 	vfree(newinfo);
2254 	return ret;
2255 out_unlock:
2256 	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2257 	xt_compat_unlock(NFPROTO_BRIDGE);
2258 	goto free_entries;
2259 }
2260 
compat_update_counters(struct net * net,void __user * user,unsigned int len)2261 static int compat_update_counters(struct net *net, void __user *user,
2262 				  unsigned int len)
2263 {
2264 	struct compat_ebt_replace hlp;
2265 
2266 	if (copy_from_user(&hlp, user, sizeof(hlp)))
2267 		return -EFAULT;
2268 
2269 	/* try real handler in case userland supplied needed padding */
2270 	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2271 		return update_counters(net, user, len);
2272 
2273 	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2274 					hlp.num_counters, user, len);
2275 }
2276 
compat_do_ebt_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)2277 static int compat_do_ebt_set_ctl(struct sock *sk,
2278 		int cmd, void __user *user, unsigned int len)
2279 {
2280 	int ret;
2281 
2282 	if (!capable(CAP_NET_ADMIN))
2283 		return -EPERM;
2284 
2285 	switch (cmd) {
2286 	case EBT_SO_SET_ENTRIES:
2287 		ret = compat_do_replace(sock_net(sk), user, len);
2288 		break;
2289 	case EBT_SO_SET_COUNTERS:
2290 		ret = compat_update_counters(sock_net(sk), user, len);
2291 		break;
2292 	default:
2293 		ret = -EINVAL;
2294   }
2295 	return ret;
2296 }
2297 
compat_do_ebt_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)2298 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2299 		void __user *user, int *len)
2300 {
2301 	int ret;
2302 	struct compat_ebt_replace tmp;
2303 	struct ebt_table *t;
2304 
2305 	if (!capable(CAP_NET_ADMIN))
2306 		return -EPERM;
2307 
2308 	/* try real handler in case userland supplied needed padding */
2309 	if ((cmd == EBT_SO_GET_INFO ||
2310 	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2311 			return do_ebt_get_ctl(sk, cmd, user, len);
2312 
2313 	if (copy_from_user(&tmp, user, sizeof(tmp)))
2314 		return -EFAULT;
2315 
2316 	t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2317 	if (!t)
2318 		return ret;
2319 
2320 	xt_compat_lock(NFPROTO_BRIDGE);
2321 	switch (cmd) {
2322 	case EBT_SO_GET_INFO:
2323 		tmp.nentries = t->private->nentries;
2324 		ret = compat_table_info(t->private, &tmp);
2325 		if (ret)
2326 			goto out;
2327 		tmp.valid_hooks = t->valid_hooks;
2328 
2329 		if (copy_to_user(user, &tmp, *len) != 0) {
2330 			ret = -EFAULT;
2331 			break;
2332 		}
2333 		ret = 0;
2334 		break;
2335 	case EBT_SO_GET_INIT_INFO:
2336 		tmp.nentries = t->table->nentries;
2337 		tmp.entries_size = t->table->entries_size;
2338 		tmp.valid_hooks = t->table->valid_hooks;
2339 
2340 		if (copy_to_user(user, &tmp, *len) != 0) {
2341 			ret = -EFAULT;
2342 			break;
2343 		}
2344 		ret = 0;
2345 		break;
2346 	case EBT_SO_GET_ENTRIES:
2347 	case EBT_SO_GET_INIT_ENTRIES:
2348 		/*
2349 		 * try real handler first in case of userland-side padding.
2350 		 * in case we are dealing with an 'ordinary' 32 bit binary
2351 		 * without 64bit compatibility padding, this will fail right
2352 		 * after copy_from_user when the *len argument is validated.
2353 		 *
2354 		 * the compat_ variant needs to do one pass over the kernel
2355 		 * data set to adjust for size differences before it the check.
2356 		 */
2357 		if (copy_everything_to_user(t, user, len, cmd) == 0)
2358 			ret = 0;
2359 		else
2360 			ret = compat_copy_everything_to_user(t, user, len, cmd);
2361 		break;
2362 	default:
2363 		ret = -EINVAL;
2364 	}
2365  out:
2366 	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2367 	xt_compat_unlock(NFPROTO_BRIDGE);
2368 	mutex_unlock(&ebt_mutex);
2369 	return ret;
2370 }
2371 #endif
2372 
2373 static struct nf_sockopt_ops ebt_sockopts =
2374 {
2375 	.pf		= PF_INET,
2376 	.set_optmin	= EBT_BASE_CTL,
2377 	.set_optmax	= EBT_SO_SET_MAX + 1,
2378 	.set		= do_ebt_set_ctl,
2379 #ifdef CONFIG_COMPAT
2380 	.compat_set	= compat_do_ebt_set_ctl,
2381 #endif
2382 	.get_optmin	= EBT_BASE_CTL,
2383 	.get_optmax	= EBT_SO_GET_MAX + 1,
2384 	.get		= do_ebt_get_ctl,
2385 #ifdef CONFIG_COMPAT
2386 	.compat_get	= compat_do_ebt_get_ctl,
2387 #endif
2388 	.owner		= THIS_MODULE,
2389 };
2390 
ebtables_init(void)2391 static int __init ebtables_init(void)
2392 {
2393 	int ret;
2394 
2395 	ret = xt_register_target(&ebt_standard_target);
2396 	if (ret < 0)
2397 		return ret;
2398 	ret = nf_register_sockopt(&ebt_sockopts);
2399 	if (ret < 0) {
2400 		xt_unregister_target(&ebt_standard_target);
2401 		return ret;
2402 	}
2403 
2404 	printk(KERN_INFO "Ebtables v2.0 registered\n");
2405 	return 0;
2406 }
2407 
ebtables_fini(void)2408 static void __exit ebtables_fini(void)
2409 {
2410 	nf_unregister_sockopt(&ebt_sockopts);
2411 	xt_unregister_target(&ebt_standard_target);
2412 	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2413 }
2414 
2415 EXPORT_SYMBOL(ebt_register_table);
2416 EXPORT_SYMBOL(ebt_unregister_table);
2417 EXPORT_SYMBOL(ebt_do_table);
2418 module_init(ebtables_init);
2419 module_exit(ebtables_fini);
2420 MODULE_LICENSE("GPL");
2421