1 /*
2  * x_tables core - Backend for {ip,ip6,arp}_tables
3  *
4  * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5  *
6  * Based on existing ip_tables code which is
7  *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
8  *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/kernel.h>
17 #include <linux/socket.h>
18 #include <linux/net.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/string.h>
22 #include <linux/vmalloc.h>
23 #include <linux/mutex.h>
24 #include <linux/mm.h>
25 #include <linux/slab.h>
26 #include <linux/audit.h>
27 #include <net/net_namespace.h>
28 
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_arp.h>
31 #include <linux/netfilter_ipv4/ip_tables.h>
32 #include <linux/netfilter_ipv6/ip6_tables.h>
33 #include <linux/netfilter_arp/arp_tables.h>
34 
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
37 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
38 
39 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
40 
41 struct compat_delta {
42 	unsigned int offset; /* offset in kernel */
43 	int delta; /* delta in 32bit user land */
44 };
45 
46 struct xt_af {
47 	struct mutex mutex;
48 	struct list_head match;
49 	struct list_head target;
50 #ifdef CONFIG_COMPAT
51 	struct mutex compat_mutex;
52 	struct compat_delta *compat_tab;
53 	unsigned int number; /* number of slots in compat_tab[] */
54 	unsigned int cur; /* number of used slots in compat_tab[] */
55 #endif
56 };
57 
58 static struct xt_af *xt;
59 
60 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
61 	[NFPROTO_UNSPEC] = "x",
62 	[NFPROTO_IPV4]   = "ip",
63 	[NFPROTO_ARP]    = "arp",
64 	[NFPROTO_BRIDGE] = "eb",
65 	[NFPROTO_IPV6]   = "ip6",
66 };
67 
68 /* Allow this many total (re)entries. */
69 static const unsigned int xt_jumpstack_multiplier = 2;
70 
71 /* Registration hooks for targets. */
72 int
xt_register_target(struct xt_target * target)73 xt_register_target(struct xt_target *target)
74 {
75 	u_int8_t af = target->family;
76 	int ret;
77 
78 	ret = mutex_lock_interruptible(&xt[af].mutex);
79 	if (ret != 0)
80 		return ret;
81 	list_add(&target->list, &xt[af].target);
82 	mutex_unlock(&xt[af].mutex);
83 	return ret;
84 }
85 EXPORT_SYMBOL(xt_register_target);
86 
87 void
xt_unregister_target(struct xt_target * target)88 xt_unregister_target(struct xt_target *target)
89 {
90 	u_int8_t af = target->family;
91 
92 	mutex_lock(&xt[af].mutex);
93 	list_del(&target->list);
94 	mutex_unlock(&xt[af].mutex);
95 }
96 EXPORT_SYMBOL(xt_unregister_target);
97 
98 int
xt_register_targets(struct xt_target * target,unsigned int n)99 xt_register_targets(struct xt_target *target, unsigned int n)
100 {
101 	unsigned int i;
102 	int err = 0;
103 
104 	for (i = 0; i < n; i++) {
105 		err = xt_register_target(&target[i]);
106 		if (err)
107 			goto err;
108 	}
109 	return err;
110 
111 err:
112 	if (i > 0)
113 		xt_unregister_targets(target, i);
114 	return err;
115 }
116 EXPORT_SYMBOL(xt_register_targets);
117 
118 void
xt_unregister_targets(struct xt_target * target,unsigned int n)119 xt_unregister_targets(struct xt_target *target, unsigned int n)
120 {
121 	while (n-- > 0)
122 		xt_unregister_target(&target[n]);
123 }
124 EXPORT_SYMBOL(xt_unregister_targets);
125 
126 int
xt_register_match(struct xt_match * match)127 xt_register_match(struct xt_match *match)
128 {
129 	u_int8_t af = match->family;
130 	int ret;
131 
132 	ret = mutex_lock_interruptible(&xt[af].mutex);
133 	if (ret != 0)
134 		return ret;
135 
136 	list_add(&match->list, &xt[af].match);
137 	mutex_unlock(&xt[af].mutex);
138 
139 	return ret;
140 }
141 EXPORT_SYMBOL(xt_register_match);
142 
143 void
xt_unregister_match(struct xt_match * match)144 xt_unregister_match(struct xt_match *match)
145 {
146 	u_int8_t af = match->family;
147 
148 	mutex_lock(&xt[af].mutex);
149 	list_del(&match->list);
150 	mutex_unlock(&xt[af].mutex);
151 }
152 EXPORT_SYMBOL(xt_unregister_match);
153 
154 int
xt_register_matches(struct xt_match * match,unsigned int n)155 xt_register_matches(struct xt_match *match, unsigned int n)
156 {
157 	unsigned int i;
158 	int err = 0;
159 
160 	for (i = 0; i < n; i++) {
161 		err = xt_register_match(&match[i]);
162 		if (err)
163 			goto err;
164 	}
165 	return err;
166 
167 err:
168 	if (i > 0)
169 		xt_unregister_matches(match, i);
170 	return err;
171 }
172 EXPORT_SYMBOL(xt_register_matches);
173 
174 void
xt_unregister_matches(struct xt_match * match,unsigned int n)175 xt_unregister_matches(struct xt_match *match, unsigned int n)
176 {
177 	while (n-- > 0)
178 		xt_unregister_match(&match[n]);
179 }
180 EXPORT_SYMBOL(xt_unregister_matches);
181 
182 
183 /*
184  * These are weird, but module loading must not be done with mutex
185  * held (since they will register), and we have to have a single
186  * function to use.
187  */
188 
189 /* Find match, grabs ref.  Returns ERR_PTR() on error. */
xt_find_match(u8 af,const char * name,u8 revision)190 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
191 {
192 	struct xt_match *m;
193 	int err = -ENOENT;
194 
195 	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
196 		return ERR_PTR(-EINTR);
197 
198 	list_for_each_entry(m, &xt[af].match, list) {
199 		if (strcmp(m->name, name) == 0) {
200 			if (m->revision == revision) {
201 				if (try_module_get(m->me)) {
202 					mutex_unlock(&xt[af].mutex);
203 					return m;
204 				}
205 			} else
206 				err = -EPROTOTYPE; /* Found something. */
207 		}
208 	}
209 	mutex_unlock(&xt[af].mutex);
210 
211 	if (af != NFPROTO_UNSPEC)
212 		/* Try searching again in the family-independent list */
213 		return xt_find_match(NFPROTO_UNSPEC, name, revision);
214 
215 	return ERR_PTR(err);
216 }
217 EXPORT_SYMBOL(xt_find_match);
218 
219 struct xt_match *
xt_request_find_match(uint8_t nfproto,const char * name,uint8_t revision)220 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
221 {
222 	struct xt_match *match;
223 
224 	match = xt_find_match(nfproto, name, revision);
225 	if (IS_ERR(match)) {
226 		request_module("%st_%s", xt_prefix[nfproto], name);
227 		match = xt_find_match(nfproto, name, revision);
228 	}
229 
230 	return match;
231 }
232 EXPORT_SYMBOL_GPL(xt_request_find_match);
233 
234 /* Find target, grabs ref.  Returns ERR_PTR() on error. */
xt_find_target(u8 af,const char * name,u8 revision)235 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
236 {
237 	struct xt_target *t;
238 	int err = -ENOENT;
239 
240 	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
241 		return ERR_PTR(-EINTR);
242 
243 	list_for_each_entry(t, &xt[af].target, list) {
244 		if (strcmp(t->name, name) == 0) {
245 			if (t->revision == revision) {
246 				if (try_module_get(t->me)) {
247 					mutex_unlock(&xt[af].mutex);
248 					return t;
249 				}
250 			} else
251 				err = -EPROTOTYPE; /* Found something. */
252 		}
253 	}
254 	mutex_unlock(&xt[af].mutex);
255 
256 	if (af != NFPROTO_UNSPEC)
257 		/* Try searching again in the family-independent list */
258 		return xt_find_target(NFPROTO_UNSPEC, name, revision);
259 
260 	return ERR_PTR(err);
261 }
262 EXPORT_SYMBOL(xt_find_target);
263 
xt_request_find_target(u8 af,const char * name,u8 revision)264 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
265 {
266 	struct xt_target *target;
267 
268 	target = xt_find_target(af, name, revision);
269 	if (IS_ERR(target)) {
270 		request_module("%st_%s", xt_prefix[af], name);
271 		target = xt_find_target(af, name, revision);
272 	}
273 
274 	return target;
275 }
276 EXPORT_SYMBOL_GPL(xt_request_find_target);
277 
match_revfn(u8 af,const char * name,u8 revision,int * bestp)278 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
279 {
280 	const struct xt_match *m;
281 	int have_rev = 0;
282 
283 	list_for_each_entry(m, &xt[af].match, list) {
284 		if (strcmp(m->name, name) == 0) {
285 			if (m->revision > *bestp)
286 				*bestp = m->revision;
287 			if (m->revision == revision)
288 				have_rev = 1;
289 		}
290 	}
291 
292 	if (af != NFPROTO_UNSPEC && !have_rev)
293 		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
294 
295 	return have_rev;
296 }
297 
target_revfn(u8 af,const char * name,u8 revision,int * bestp)298 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
299 {
300 	const struct xt_target *t;
301 	int have_rev = 0;
302 
303 	list_for_each_entry(t, &xt[af].target, list) {
304 		if (strcmp(t->name, name) == 0) {
305 			if (t->revision > *bestp)
306 				*bestp = t->revision;
307 			if (t->revision == revision)
308 				have_rev = 1;
309 		}
310 	}
311 
312 	if (af != NFPROTO_UNSPEC && !have_rev)
313 		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
314 
315 	return have_rev;
316 }
317 
318 /* Returns true or false (if no such extension at all) */
xt_find_revision(u8 af,const char * name,u8 revision,int target,int * err)319 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
320 		     int *err)
321 {
322 	int have_rev, best = -1;
323 
324 	if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
325 		*err = -EINTR;
326 		return 1;
327 	}
328 	if (target == 1)
329 		have_rev = target_revfn(af, name, revision, &best);
330 	else
331 		have_rev = match_revfn(af, name, revision, &best);
332 	mutex_unlock(&xt[af].mutex);
333 
334 	/* Nothing at all?  Return 0 to try loading module. */
335 	if (best == -1) {
336 		*err = -ENOENT;
337 		return 0;
338 	}
339 
340 	*err = best;
341 	if (!have_rev)
342 		*err = -EPROTONOSUPPORT;
343 	return 1;
344 }
345 EXPORT_SYMBOL_GPL(xt_find_revision);
346 
textify_hooks(char * buf,size_t size,unsigned int mask)347 static char *textify_hooks(char *buf, size_t size, unsigned int mask)
348 {
349 	static const char *const names[] = {
350 		"PREROUTING", "INPUT", "FORWARD",
351 		"OUTPUT", "POSTROUTING", "BROUTING",
352 	};
353 	unsigned int i;
354 	char *p = buf;
355 	bool np = false;
356 	int res;
357 
358 	*p = '\0';
359 	for (i = 0; i < ARRAY_SIZE(names); ++i) {
360 		if (!(mask & (1 << i)))
361 			continue;
362 		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
363 		if (res > 0) {
364 			size -= res;
365 			p += res;
366 		}
367 		np = true;
368 	}
369 
370 	return buf;
371 }
372 
xt_check_match(struct xt_mtchk_param * par,unsigned int size,u_int8_t proto,bool inv_proto)373 int xt_check_match(struct xt_mtchk_param *par,
374 		   unsigned int size, u_int8_t proto, bool inv_proto)
375 {
376 	int ret;
377 
378 	if (XT_ALIGN(par->match->matchsize) != size &&
379 	    par->match->matchsize != -1) {
380 		/*
381 		 * ebt_among is exempt from centralized matchsize checking
382 		 * because it uses a dynamic-size data set.
383 		 */
384 		pr_err("%s_tables: %s.%u match: invalid size "
385 		       "%u (kernel) != (user) %u\n",
386 		       xt_prefix[par->family], par->match->name,
387 		       par->match->revision,
388 		       XT_ALIGN(par->match->matchsize), size);
389 		return -EINVAL;
390 	}
391 	if (par->match->table != NULL &&
392 	    strcmp(par->match->table, par->table) != 0) {
393 		pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
394 		       xt_prefix[par->family], par->match->name,
395 		       par->match->table, par->table);
396 		return -EINVAL;
397 	}
398 	if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
399 		char used[64], allow[64];
400 
401 		pr_err("%s_tables: %s match: used from hooks %s, but only "
402 		       "valid from %s\n",
403 		       xt_prefix[par->family], par->match->name,
404 		       textify_hooks(used, sizeof(used), par->hook_mask),
405 		       textify_hooks(allow, sizeof(allow), par->match->hooks));
406 		return -EINVAL;
407 	}
408 	if (par->match->proto && (par->match->proto != proto || inv_proto)) {
409 		pr_err("%s_tables: %s match: only valid for protocol %u\n",
410 		       xt_prefix[par->family], par->match->name,
411 		       par->match->proto);
412 		return -EINVAL;
413 	}
414 	if (par->match->checkentry != NULL) {
415 		ret = par->match->checkentry(par);
416 		if (ret < 0)
417 			return ret;
418 		else if (ret > 0)
419 			/* Flag up potential errors. */
420 			return -EIO;
421 	}
422 	return 0;
423 }
424 EXPORT_SYMBOL_GPL(xt_check_match);
425 
426 #ifdef CONFIG_COMPAT
xt_compat_add_offset(u_int8_t af,unsigned int offset,int delta)427 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
428 {
429 	struct xt_af *xp = &xt[af];
430 
431 	if (!xp->compat_tab) {
432 		if (!xp->number)
433 			return -EINVAL;
434 		xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
435 		if (!xp->compat_tab)
436 			return -ENOMEM;
437 		xp->cur = 0;
438 	}
439 
440 	if (xp->cur >= xp->number)
441 		return -EINVAL;
442 
443 	if (xp->cur)
444 		delta += xp->compat_tab[xp->cur - 1].delta;
445 	xp->compat_tab[xp->cur].offset = offset;
446 	xp->compat_tab[xp->cur].delta = delta;
447 	xp->cur++;
448 	return 0;
449 }
450 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
451 
xt_compat_flush_offsets(u_int8_t af)452 void xt_compat_flush_offsets(u_int8_t af)
453 {
454 	if (xt[af].compat_tab) {
455 		vfree(xt[af].compat_tab);
456 		xt[af].compat_tab = NULL;
457 		xt[af].number = 0;
458 		xt[af].cur = 0;
459 	}
460 }
461 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
462 
xt_compat_calc_jump(u_int8_t af,unsigned int offset)463 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
464 {
465 	struct compat_delta *tmp = xt[af].compat_tab;
466 	int mid, left = 0, right = xt[af].cur - 1;
467 
468 	while (left <= right) {
469 		mid = (left + right) >> 1;
470 		if (offset > tmp[mid].offset)
471 			left = mid + 1;
472 		else if (offset < tmp[mid].offset)
473 			right = mid - 1;
474 		else
475 			return mid ? tmp[mid - 1].delta : 0;
476 	}
477 	return left ? tmp[left - 1].delta : 0;
478 }
479 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
480 
xt_compat_init_offsets(u_int8_t af,unsigned int number)481 void xt_compat_init_offsets(u_int8_t af, unsigned int number)
482 {
483 	xt[af].number = number;
484 	xt[af].cur = 0;
485 }
486 EXPORT_SYMBOL(xt_compat_init_offsets);
487 
xt_compat_match_offset(const struct xt_match * match)488 int xt_compat_match_offset(const struct xt_match *match)
489 {
490 	u_int16_t csize = match->compatsize ? : match->matchsize;
491 	return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
492 }
493 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
494 
xt_compat_match_from_user(struct xt_entry_match * m,void ** dstptr,unsigned int * size)495 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
496 			      unsigned int *size)
497 {
498 	const struct xt_match *match = m->u.kernel.match;
499 	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
500 	int pad, off = xt_compat_match_offset(match);
501 	u_int16_t msize = cm->u.user.match_size;
502 
503 	m = *dstptr;
504 	memcpy(m, cm, sizeof(*cm));
505 	if (match->compat_from_user)
506 		match->compat_from_user(m->data, cm->data);
507 	else
508 		memcpy(m->data, cm->data, msize - sizeof(*cm));
509 	pad = XT_ALIGN(match->matchsize) - match->matchsize;
510 	if (pad > 0)
511 		memset(m->data + match->matchsize, 0, pad);
512 
513 	msize += off;
514 	m->u.user.match_size = msize;
515 
516 	*size += off;
517 	*dstptr += msize;
518 	return 0;
519 }
520 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
521 
xt_compat_match_to_user(const struct xt_entry_match * m,void __user ** dstptr,unsigned int * size)522 int xt_compat_match_to_user(const struct xt_entry_match *m,
523 			    void __user **dstptr, unsigned int *size)
524 {
525 	const struct xt_match *match = m->u.kernel.match;
526 	struct compat_xt_entry_match __user *cm = *dstptr;
527 	int off = xt_compat_match_offset(match);
528 	u_int16_t msize = m->u.user.match_size - off;
529 
530 	if (copy_to_user(cm, m, sizeof(*cm)) ||
531 	    put_user(msize, &cm->u.user.match_size) ||
532 	    copy_to_user(cm->u.user.name, m->u.kernel.match->name,
533 			 strlen(m->u.kernel.match->name) + 1))
534 		return -EFAULT;
535 
536 	if (match->compat_to_user) {
537 		if (match->compat_to_user((void __user *)cm->data, m->data))
538 			return -EFAULT;
539 	} else {
540 		if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
541 			return -EFAULT;
542 	}
543 
544 	*size -= off;
545 	*dstptr += msize;
546 	return 0;
547 }
548 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
549 #endif /* CONFIG_COMPAT */
550 
xt_check_target(struct xt_tgchk_param * par,unsigned int size,u_int8_t proto,bool inv_proto)551 int xt_check_target(struct xt_tgchk_param *par,
552 		    unsigned int size, u_int8_t proto, bool inv_proto)
553 {
554 	int ret;
555 
556 	if (XT_ALIGN(par->target->targetsize) != size) {
557 		pr_err("%s_tables: %s.%u target: invalid size "
558 		       "%u (kernel) != (user) %u\n",
559 		       xt_prefix[par->family], par->target->name,
560 		       par->target->revision,
561 		       XT_ALIGN(par->target->targetsize), size);
562 		return -EINVAL;
563 	}
564 	if (par->target->table != NULL &&
565 	    strcmp(par->target->table, par->table) != 0) {
566 		pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
567 		       xt_prefix[par->family], par->target->name,
568 		       par->target->table, par->table);
569 		return -EINVAL;
570 	}
571 	if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
572 		char used[64], allow[64];
573 
574 		pr_err("%s_tables: %s target: used from hooks %s, but only "
575 		       "usable from %s\n",
576 		       xt_prefix[par->family], par->target->name,
577 		       textify_hooks(used, sizeof(used), par->hook_mask),
578 		       textify_hooks(allow, sizeof(allow), par->target->hooks));
579 		return -EINVAL;
580 	}
581 	if (par->target->proto && (par->target->proto != proto || inv_proto)) {
582 		pr_err("%s_tables: %s target: only valid for protocol %u\n",
583 		       xt_prefix[par->family], par->target->name,
584 		       par->target->proto);
585 		return -EINVAL;
586 	}
587 	if (par->target->checkentry != NULL) {
588 		ret = par->target->checkentry(par);
589 		if (ret < 0)
590 			return ret;
591 		else if (ret > 0)
592 			/* Flag up potential errors. */
593 			return -EIO;
594 	}
595 	return 0;
596 }
597 EXPORT_SYMBOL_GPL(xt_check_target);
598 
599 #ifdef CONFIG_COMPAT
xt_compat_target_offset(const struct xt_target * target)600 int xt_compat_target_offset(const struct xt_target *target)
601 {
602 	u_int16_t csize = target->compatsize ? : target->targetsize;
603 	return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
604 }
605 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
606 
xt_compat_target_from_user(struct xt_entry_target * t,void ** dstptr,unsigned int * size)607 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
608 				unsigned int *size)
609 {
610 	const struct xt_target *target = t->u.kernel.target;
611 	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
612 	int pad, off = xt_compat_target_offset(target);
613 	u_int16_t tsize = ct->u.user.target_size;
614 
615 	t = *dstptr;
616 	memcpy(t, ct, sizeof(*ct));
617 	if (target->compat_from_user)
618 		target->compat_from_user(t->data, ct->data);
619 	else
620 		memcpy(t->data, ct->data, tsize - sizeof(*ct));
621 	pad = XT_ALIGN(target->targetsize) - target->targetsize;
622 	if (pad > 0)
623 		memset(t->data + target->targetsize, 0, pad);
624 
625 	tsize += off;
626 	t->u.user.target_size = tsize;
627 
628 	*size += off;
629 	*dstptr += tsize;
630 }
631 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
632 
xt_compat_target_to_user(const struct xt_entry_target * t,void __user ** dstptr,unsigned int * size)633 int xt_compat_target_to_user(const struct xt_entry_target *t,
634 			     void __user **dstptr, unsigned int *size)
635 {
636 	const struct xt_target *target = t->u.kernel.target;
637 	struct compat_xt_entry_target __user *ct = *dstptr;
638 	int off = xt_compat_target_offset(target);
639 	u_int16_t tsize = t->u.user.target_size - off;
640 
641 	if (copy_to_user(ct, t, sizeof(*ct)) ||
642 	    put_user(tsize, &ct->u.user.target_size) ||
643 	    copy_to_user(ct->u.user.name, t->u.kernel.target->name,
644 			 strlen(t->u.kernel.target->name) + 1))
645 		return -EFAULT;
646 
647 	if (target->compat_to_user) {
648 		if (target->compat_to_user((void __user *)ct->data, t->data))
649 			return -EFAULT;
650 	} else {
651 		if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
652 			return -EFAULT;
653 	}
654 
655 	*size -= off;
656 	*dstptr += tsize;
657 	return 0;
658 }
659 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
660 #endif
661 
xt_alloc_table_info(unsigned int size)662 struct xt_table_info *xt_alloc_table_info(unsigned int size)
663 {
664 	struct xt_table_info *newinfo;
665 	int cpu;
666 
667 	/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
668 	if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
669 		return NULL;
670 
671 	newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
672 	if (!newinfo)
673 		return NULL;
674 
675 	newinfo->size = size;
676 
677 	for_each_possible_cpu(cpu) {
678 		if (size <= PAGE_SIZE)
679 			newinfo->entries[cpu] = kmalloc_node(size,
680 							GFP_KERNEL,
681 							cpu_to_node(cpu));
682 		else
683 			newinfo->entries[cpu] = vmalloc_node(size,
684 							cpu_to_node(cpu));
685 
686 		if (newinfo->entries[cpu] == NULL) {
687 			xt_free_table_info(newinfo);
688 			return NULL;
689 		}
690 	}
691 
692 	return newinfo;
693 }
694 EXPORT_SYMBOL(xt_alloc_table_info);
695 
xt_free_table_info(struct xt_table_info * info)696 void xt_free_table_info(struct xt_table_info *info)
697 {
698 	int cpu;
699 
700 	for_each_possible_cpu(cpu) {
701 		if (info->size <= PAGE_SIZE)
702 			kfree(info->entries[cpu]);
703 		else
704 			vfree(info->entries[cpu]);
705 	}
706 
707 	if (info->jumpstack != NULL) {
708 		if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
709 			for_each_possible_cpu(cpu)
710 				vfree(info->jumpstack[cpu]);
711 		} else {
712 			for_each_possible_cpu(cpu)
713 				kfree(info->jumpstack[cpu]);
714 		}
715 	}
716 
717 	if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
718 		vfree(info->jumpstack);
719 	else
720 		kfree(info->jumpstack);
721 
722 	free_percpu(info->stackptr);
723 
724 	kfree(info);
725 }
726 EXPORT_SYMBOL(xt_free_table_info);
727 
728 /* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
xt_find_table_lock(struct net * net,u_int8_t af,const char * name)729 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
730 				    const char *name)
731 {
732 	struct xt_table *t;
733 
734 	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
735 		return ERR_PTR(-EINTR);
736 
737 	list_for_each_entry(t, &net->xt.tables[af], list)
738 		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
739 			return t;
740 	mutex_unlock(&xt[af].mutex);
741 	return NULL;
742 }
743 EXPORT_SYMBOL_GPL(xt_find_table_lock);
744 
xt_table_unlock(struct xt_table * table)745 void xt_table_unlock(struct xt_table *table)
746 {
747 	mutex_unlock(&xt[table->af].mutex);
748 }
749 EXPORT_SYMBOL_GPL(xt_table_unlock);
750 
751 #ifdef CONFIG_COMPAT
xt_compat_lock(u_int8_t af)752 void xt_compat_lock(u_int8_t af)
753 {
754 	mutex_lock(&xt[af].compat_mutex);
755 }
756 EXPORT_SYMBOL_GPL(xt_compat_lock);
757 
xt_compat_unlock(u_int8_t af)758 void xt_compat_unlock(u_int8_t af)
759 {
760 	mutex_unlock(&xt[af].compat_mutex);
761 }
762 EXPORT_SYMBOL_GPL(xt_compat_unlock);
763 #endif
764 
765 DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
766 EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
767 
xt_jumpstack_alloc(struct xt_table_info * i)768 static int xt_jumpstack_alloc(struct xt_table_info *i)
769 {
770 	unsigned int size;
771 	int cpu;
772 
773 	i->stackptr = alloc_percpu(unsigned int);
774 	if (i->stackptr == NULL)
775 		return -ENOMEM;
776 
777 	size = sizeof(void **) * nr_cpu_ids;
778 	if (size > PAGE_SIZE)
779 		i->jumpstack = vmalloc(size);
780 	else
781 		i->jumpstack = kmalloc(size, GFP_KERNEL);
782 	if (i->jumpstack == NULL)
783 		return -ENOMEM;
784 	memset(i->jumpstack, 0, size);
785 
786 	i->stacksize *= xt_jumpstack_multiplier;
787 	size = sizeof(void *) * i->stacksize;
788 	for_each_possible_cpu(cpu) {
789 		if (size > PAGE_SIZE)
790 			i->jumpstack[cpu] = vmalloc_node(size,
791 				cpu_to_node(cpu));
792 		else
793 			i->jumpstack[cpu] = kmalloc_node(size,
794 				GFP_KERNEL, cpu_to_node(cpu));
795 		if (i->jumpstack[cpu] == NULL)
796 			/*
797 			 * Freeing will be done later on by the callers. The
798 			 * chain is: xt_replace_table -> __do_replace ->
799 			 * do_replace -> xt_free_table_info.
800 			 */
801 			return -ENOMEM;
802 	}
803 
804 	return 0;
805 }
806 
807 struct xt_table_info *
xt_replace_table(struct xt_table * table,unsigned int num_counters,struct xt_table_info * newinfo,int * error)808 xt_replace_table(struct xt_table *table,
809 	      unsigned int num_counters,
810 	      struct xt_table_info *newinfo,
811 	      int *error)
812 {
813 	struct xt_table_info *private;
814 	int ret;
815 
816 	ret = xt_jumpstack_alloc(newinfo);
817 	if (ret < 0) {
818 		*error = ret;
819 		return NULL;
820 	}
821 
822 	/* Do the substitution. */
823 	local_bh_disable();
824 	private = table->private;
825 
826 	/* Check inside lock: is the old number correct? */
827 	if (num_counters != private->number) {
828 		pr_debug("num_counters != table->private->number (%u/%u)\n",
829 			 num_counters, private->number);
830 		local_bh_enable();
831 		*error = -EAGAIN;
832 		return NULL;
833 	}
834 
835 	table->private = newinfo;
836 	newinfo->initial_entries = private->initial_entries;
837 
838 	/*
839 	 * Even though table entries have now been swapped, other CPU's
840 	 * may still be using the old entries. This is okay, because
841 	 * resynchronization happens because of the locking done
842 	 * during the get_counters() routine.
843 	 */
844 	local_bh_enable();
845 
846 #ifdef CONFIG_AUDIT
847 	if (audit_enabled) {
848 		struct audit_buffer *ab;
849 
850 		ab = audit_log_start(current->audit_context, GFP_KERNEL,
851 				     AUDIT_NETFILTER_CFG);
852 		if (ab) {
853 			audit_log_format(ab, "table=%s family=%u entries=%u",
854 					 table->name, table->af,
855 					 private->number);
856 			audit_log_end(ab);
857 		}
858 	}
859 #endif
860 
861 	return private;
862 }
863 EXPORT_SYMBOL_GPL(xt_replace_table);
864 
xt_register_table(struct net * net,const struct xt_table * input_table,struct xt_table_info * bootstrap,struct xt_table_info * newinfo)865 struct xt_table *xt_register_table(struct net *net,
866 				   const struct xt_table *input_table,
867 				   struct xt_table_info *bootstrap,
868 				   struct xt_table_info *newinfo)
869 {
870 	int ret;
871 	struct xt_table_info *private;
872 	struct xt_table *t, *table;
873 
874 	/* Don't add one object to multiple lists. */
875 	table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
876 	if (!table) {
877 		ret = -ENOMEM;
878 		goto out;
879 	}
880 
881 	ret = mutex_lock_interruptible(&xt[table->af].mutex);
882 	if (ret != 0)
883 		goto out_free;
884 
885 	/* Don't autoload: we'd eat our tail... */
886 	list_for_each_entry(t, &net->xt.tables[table->af], list) {
887 		if (strcmp(t->name, table->name) == 0) {
888 			ret = -EEXIST;
889 			goto unlock;
890 		}
891 	}
892 
893 	/* Simplifies replace_table code. */
894 	table->private = bootstrap;
895 
896 	if (!xt_replace_table(table, 0, newinfo, &ret))
897 		goto unlock;
898 
899 	private = table->private;
900 	pr_debug("table->private->number = %u\n", private->number);
901 
902 	/* save number of initial entries */
903 	private->initial_entries = private->number;
904 
905 	list_add(&table->list, &net->xt.tables[table->af]);
906 	mutex_unlock(&xt[table->af].mutex);
907 	return table;
908 
909  unlock:
910 	mutex_unlock(&xt[table->af].mutex);
911 out_free:
912 	kfree(table);
913 out:
914 	return ERR_PTR(ret);
915 }
916 EXPORT_SYMBOL_GPL(xt_register_table);
917 
xt_unregister_table(struct xt_table * table)918 void *xt_unregister_table(struct xt_table *table)
919 {
920 	struct xt_table_info *private;
921 
922 	mutex_lock(&xt[table->af].mutex);
923 	private = table->private;
924 	list_del(&table->list);
925 	mutex_unlock(&xt[table->af].mutex);
926 	kfree(table);
927 
928 	return private;
929 }
930 EXPORT_SYMBOL_GPL(xt_unregister_table);
931 
932 #ifdef CONFIG_PROC_FS
933 struct xt_names_priv {
934 	struct seq_net_private p;
935 	u_int8_t af;
936 };
xt_table_seq_start(struct seq_file * seq,loff_t * pos)937 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
938 {
939 	struct xt_names_priv *priv = seq->private;
940 	struct net *net = seq_file_net(seq);
941 	u_int8_t af = priv->af;
942 
943 	mutex_lock(&xt[af].mutex);
944 	return seq_list_start(&net->xt.tables[af], *pos);
945 }
946 
xt_table_seq_next(struct seq_file * seq,void * v,loff_t * pos)947 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
948 {
949 	struct xt_names_priv *priv = seq->private;
950 	struct net *net = seq_file_net(seq);
951 	u_int8_t af = priv->af;
952 
953 	return seq_list_next(v, &net->xt.tables[af], pos);
954 }
955 
xt_table_seq_stop(struct seq_file * seq,void * v)956 static void xt_table_seq_stop(struct seq_file *seq, void *v)
957 {
958 	struct xt_names_priv *priv = seq->private;
959 	u_int8_t af = priv->af;
960 
961 	mutex_unlock(&xt[af].mutex);
962 }
963 
xt_table_seq_show(struct seq_file * seq,void * v)964 static int xt_table_seq_show(struct seq_file *seq, void *v)
965 {
966 	struct xt_table *table = list_entry(v, struct xt_table, list);
967 
968 	if (strlen(table->name))
969 		return seq_printf(seq, "%s\n", table->name);
970 	else
971 		return 0;
972 }
973 
974 static const struct seq_operations xt_table_seq_ops = {
975 	.start	= xt_table_seq_start,
976 	.next	= xt_table_seq_next,
977 	.stop	= xt_table_seq_stop,
978 	.show	= xt_table_seq_show,
979 };
980 
xt_table_open(struct inode * inode,struct file * file)981 static int xt_table_open(struct inode *inode, struct file *file)
982 {
983 	int ret;
984 	struct xt_names_priv *priv;
985 
986 	ret = seq_open_net(inode, file, &xt_table_seq_ops,
987 			   sizeof(struct xt_names_priv));
988 	if (!ret) {
989 		priv = ((struct seq_file *)file->private_data)->private;
990 		priv->af = (unsigned long)PDE(inode)->data;
991 	}
992 	return ret;
993 }
994 
995 static const struct file_operations xt_table_ops = {
996 	.owner	 = THIS_MODULE,
997 	.open	 = xt_table_open,
998 	.read	 = seq_read,
999 	.llseek	 = seq_lseek,
1000 	.release = seq_release_net,
1001 };
1002 
1003 /*
1004  * Traverse state for ip{,6}_{tables,matches} for helping crossing
1005  * the multi-AF mutexes.
1006  */
1007 struct nf_mttg_trav {
1008 	struct list_head *head, *curr;
1009 	uint8_t class, nfproto;
1010 };
1011 
1012 enum {
1013 	MTTG_TRAV_INIT,
1014 	MTTG_TRAV_NFP_UNSPEC,
1015 	MTTG_TRAV_NFP_SPEC,
1016 	MTTG_TRAV_DONE,
1017 };
1018 
xt_mttg_seq_next(struct seq_file * seq,void * v,loff_t * ppos,bool is_target)1019 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1020     bool is_target)
1021 {
1022 	static const uint8_t next_class[] = {
1023 		[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1024 		[MTTG_TRAV_NFP_SPEC]   = MTTG_TRAV_DONE,
1025 	};
1026 	struct nf_mttg_trav *trav = seq->private;
1027 
1028 	switch (trav->class) {
1029 	case MTTG_TRAV_INIT:
1030 		trav->class = MTTG_TRAV_NFP_UNSPEC;
1031 		mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1032 		trav->head = trav->curr = is_target ?
1033 			&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1034  		break;
1035 	case MTTG_TRAV_NFP_UNSPEC:
1036 		trav->curr = trav->curr->next;
1037 		if (trav->curr != trav->head)
1038 			break;
1039 		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1040 		mutex_lock(&xt[trav->nfproto].mutex);
1041 		trav->head = trav->curr = is_target ?
1042 			&xt[trav->nfproto].target : &xt[trav->nfproto].match;
1043 		trav->class = next_class[trav->class];
1044 		break;
1045 	case MTTG_TRAV_NFP_SPEC:
1046 		trav->curr = trav->curr->next;
1047 		if (trav->curr != trav->head)
1048 			break;
1049 		/* fallthru, _stop will unlock */
1050 	default:
1051 		return NULL;
1052 	}
1053 
1054 	if (ppos != NULL)
1055 		++*ppos;
1056 	return trav;
1057 }
1058 
xt_mttg_seq_start(struct seq_file * seq,loff_t * pos,bool is_target)1059 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1060     bool is_target)
1061 {
1062 	struct nf_mttg_trav *trav = seq->private;
1063 	unsigned int j;
1064 
1065 	trav->class = MTTG_TRAV_INIT;
1066 	for (j = 0; j < *pos; ++j)
1067 		if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1068 			return NULL;
1069 	return trav;
1070 }
1071 
xt_mttg_seq_stop(struct seq_file * seq,void * v)1072 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1073 {
1074 	struct nf_mttg_trav *trav = seq->private;
1075 
1076 	switch (trav->class) {
1077 	case MTTG_TRAV_NFP_UNSPEC:
1078 		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1079 		break;
1080 	case MTTG_TRAV_NFP_SPEC:
1081 		mutex_unlock(&xt[trav->nfproto].mutex);
1082 		break;
1083 	}
1084 }
1085 
xt_match_seq_start(struct seq_file * seq,loff_t * pos)1086 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1087 {
1088 	return xt_mttg_seq_start(seq, pos, false);
1089 }
1090 
xt_match_seq_next(struct seq_file * seq,void * v,loff_t * ppos)1091 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1092 {
1093 	return xt_mttg_seq_next(seq, v, ppos, false);
1094 }
1095 
xt_match_seq_show(struct seq_file * seq,void * v)1096 static int xt_match_seq_show(struct seq_file *seq, void *v)
1097 {
1098 	const struct nf_mttg_trav *trav = seq->private;
1099 	const struct xt_match *match;
1100 
1101 	switch (trav->class) {
1102 	case MTTG_TRAV_NFP_UNSPEC:
1103 	case MTTG_TRAV_NFP_SPEC:
1104 		if (trav->curr == trav->head)
1105 			return 0;
1106 		match = list_entry(trav->curr, struct xt_match, list);
1107 		return (*match->name == '\0') ? 0 :
1108 		       seq_printf(seq, "%s\n", match->name);
1109 	}
1110 	return 0;
1111 }
1112 
1113 static const struct seq_operations xt_match_seq_ops = {
1114 	.start	= xt_match_seq_start,
1115 	.next	= xt_match_seq_next,
1116 	.stop	= xt_mttg_seq_stop,
1117 	.show	= xt_match_seq_show,
1118 };
1119 
xt_match_open(struct inode * inode,struct file * file)1120 static int xt_match_open(struct inode *inode, struct file *file)
1121 {
1122 	struct seq_file *seq;
1123 	struct nf_mttg_trav *trav;
1124 	int ret;
1125 
1126 	trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1127 	if (trav == NULL)
1128 		return -ENOMEM;
1129 
1130 	ret = seq_open(file, &xt_match_seq_ops);
1131 	if (ret < 0) {
1132 		kfree(trav);
1133 		return ret;
1134 	}
1135 
1136 	seq = file->private_data;
1137 	seq->private = trav;
1138 	trav->nfproto = (unsigned long)PDE(inode)->data;
1139 	return 0;
1140 }
1141 
1142 static const struct file_operations xt_match_ops = {
1143 	.owner	 = THIS_MODULE,
1144 	.open	 = xt_match_open,
1145 	.read	 = seq_read,
1146 	.llseek	 = seq_lseek,
1147 	.release = seq_release_private,
1148 };
1149 
xt_target_seq_start(struct seq_file * seq,loff_t * pos)1150 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1151 {
1152 	return xt_mttg_seq_start(seq, pos, true);
1153 }
1154 
xt_target_seq_next(struct seq_file * seq,void * v,loff_t * ppos)1155 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1156 {
1157 	return xt_mttg_seq_next(seq, v, ppos, true);
1158 }
1159 
xt_target_seq_show(struct seq_file * seq,void * v)1160 static int xt_target_seq_show(struct seq_file *seq, void *v)
1161 {
1162 	const struct nf_mttg_trav *trav = seq->private;
1163 	const struct xt_target *target;
1164 
1165 	switch (trav->class) {
1166 	case MTTG_TRAV_NFP_UNSPEC:
1167 	case MTTG_TRAV_NFP_SPEC:
1168 		if (trav->curr == trav->head)
1169 			return 0;
1170 		target = list_entry(trav->curr, struct xt_target, list);
1171 		return (*target->name == '\0') ? 0 :
1172 		       seq_printf(seq, "%s\n", target->name);
1173 	}
1174 	return 0;
1175 }
1176 
1177 static const struct seq_operations xt_target_seq_ops = {
1178 	.start	= xt_target_seq_start,
1179 	.next	= xt_target_seq_next,
1180 	.stop	= xt_mttg_seq_stop,
1181 	.show	= xt_target_seq_show,
1182 };
1183 
xt_target_open(struct inode * inode,struct file * file)1184 static int xt_target_open(struct inode *inode, struct file *file)
1185 {
1186 	struct seq_file *seq;
1187 	struct nf_mttg_trav *trav;
1188 	int ret;
1189 
1190 	trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1191 	if (trav == NULL)
1192 		return -ENOMEM;
1193 
1194 	ret = seq_open(file, &xt_target_seq_ops);
1195 	if (ret < 0) {
1196 		kfree(trav);
1197 		return ret;
1198 	}
1199 
1200 	seq = file->private_data;
1201 	seq->private = trav;
1202 	trav->nfproto = (unsigned long)PDE(inode)->data;
1203 	return 0;
1204 }
1205 
1206 static const struct file_operations xt_target_ops = {
1207 	.owner	 = THIS_MODULE,
1208 	.open	 = xt_target_open,
1209 	.read	 = seq_read,
1210 	.llseek	 = seq_lseek,
1211 	.release = seq_release_private,
1212 };
1213 
1214 #define FORMAT_TABLES	"_tables_names"
1215 #define	FORMAT_MATCHES	"_tables_matches"
1216 #define FORMAT_TARGETS 	"_tables_targets"
1217 
1218 #endif /* CONFIG_PROC_FS */
1219 
1220 /**
1221  * xt_hook_link - set up hooks for a new table
1222  * @table:	table with metadata needed to set up hooks
1223  * @fn:		Hook function
1224  *
1225  * This function will take care of creating and registering the necessary
1226  * Netfilter hooks for XT tables.
1227  */
xt_hook_link(const struct xt_table * table,nf_hookfn * fn)1228 struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1229 {
1230 	unsigned int hook_mask = table->valid_hooks;
1231 	uint8_t i, num_hooks = hweight32(hook_mask);
1232 	uint8_t hooknum;
1233 	struct nf_hook_ops *ops;
1234 	int ret;
1235 
1236 	ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1237 	if (ops == NULL)
1238 		return ERR_PTR(-ENOMEM);
1239 
1240 	for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1241 	     hook_mask >>= 1, ++hooknum) {
1242 		if (!(hook_mask & 1))
1243 			continue;
1244 		ops[i].hook     = fn;
1245 		ops[i].owner    = table->me;
1246 		ops[i].pf       = table->af;
1247 		ops[i].hooknum  = hooknum;
1248 		ops[i].priority = table->priority;
1249 		++i;
1250 	}
1251 
1252 	ret = nf_register_hooks(ops, num_hooks);
1253 	if (ret < 0) {
1254 		kfree(ops);
1255 		return ERR_PTR(ret);
1256 	}
1257 
1258 	return ops;
1259 }
1260 EXPORT_SYMBOL_GPL(xt_hook_link);
1261 
1262 /**
1263  * xt_hook_unlink - remove hooks for a table
1264  * @ops:	nf_hook_ops array as returned by nf_hook_link
1265  * @hook_mask:	the very same mask that was passed to nf_hook_link
1266  */
xt_hook_unlink(const struct xt_table * table,struct nf_hook_ops * ops)1267 void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1268 {
1269 	nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1270 	kfree(ops);
1271 }
1272 EXPORT_SYMBOL_GPL(xt_hook_unlink);
1273 
xt_proto_init(struct net * net,u_int8_t af)1274 int xt_proto_init(struct net *net, u_int8_t af)
1275 {
1276 #ifdef CONFIG_PROC_FS
1277 	char buf[XT_FUNCTION_MAXNAMELEN];
1278 	struct proc_dir_entry *proc;
1279 #endif
1280 
1281 	if (af >= ARRAY_SIZE(xt_prefix))
1282 		return -EINVAL;
1283 
1284 
1285 #ifdef CONFIG_PROC_FS
1286 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1287 	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1288 	proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1289 				(void *)(unsigned long)af);
1290 	if (!proc)
1291 		goto out;
1292 
1293 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1294 	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1295 	proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1296 				(void *)(unsigned long)af);
1297 	if (!proc)
1298 		goto out_remove_tables;
1299 
1300 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1301 	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1302 	proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1303 				(void *)(unsigned long)af);
1304 	if (!proc)
1305 		goto out_remove_matches;
1306 #endif
1307 
1308 	return 0;
1309 
1310 #ifdef CONFIG_PROC_FS
1311 out_remove_matches:
1312 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1313 	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1314 	proc_net_remove(net, buf);
1315 
1316 out_remove_tables:
1317 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1318 	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1319 	proc_net_remove(net, buf);
1320 out:
1321 	return -1;
1322 #endif
1323 }
1324 EXPORT_SYMBOL_GPL(xt_proto_init);
1325 
xt_proto_fini(struct net * net,u_int8_t af)1326 void xt_proto_fini(struct net *net, u_int8_t af)
1327 {
1328 #ifdef CONFIG_PROC_FS
1329 	char buf[XT_FUNCTION_MAXNAMELEN];
1330 
1331 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1332 	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1333 	proc_net_remove(net, buf);
1334 
1335 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1336 	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1337 	proc_net_remove(net, buf);
1338 
1339 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1340 	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1341 	proc_net_remove(net, buf);
1342 #endif /*CONFIG_PROC_FS*/
1343 }
1344 EXPORT_SYMBOL_GPL(xt_proto_fini);
1345 
xt_net_init(struct net * net)1346 static int __net_init xt_net_init(struct net *net)
1347 {
1348 	int i;
1349 
1350 	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1351 		INIT_LIST_HEAD(&net->xt.tables[i]);
1352 	return 0;
1353 }
1354 
1355 static struct pernet_operations xt_net_ops = {
1356 	.init = xt_net_init,
1357 };
1358 
xt_init(void)1359 static int __init xt_init(void)
1360 {
1361 	unsigned int i;
1362 	int rv;
1363 
1364 	for_each_possible_cpu(i) {
1365 		struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1366 
1367 		seqlock_init(&lock->lock);
1368 		lock->readers = 0;
1369 	}
1370 
1371 	xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1372 	if (!xt)
1373 		return -ENOMEM;
1374 
1375 	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1376 		mutex_init(&xt[i].mutex);
1377 #ifdef CONFIG_COMPAT
1378 		mutex_init(&xt[i].compat_mutex);
1379 		xt[i].compat_tab = NULL;
1380 #endif
1381 		INIT_LIST_HEAD(&xt[i].target);
1382 		INIT_LIST_HEAD(&xt[i].match);
1383 	}
1384 	rv = register_pernet_subsys(&xt_net_ops);
1385 	if (rv < 0)
1386 		kfree(xt);
1387 	return rv;
1388 }
1389 
xt_fini(void)1390 static void __exit xt_fini(void)
1391 {
1392 	unregister_pernet_subsys(&xt_net_ops);
1393 	kfree(xt);
1394 }
1395 
1396 module_init(xt_init);
1397 module_exit(xt_fini);
1398 
1399