1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28 
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33 
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
37 
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41 
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47 
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53 
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x)	WARN_ON(!(x))
56 #else
57 #define IP_NF_ASSERT(x)
58 #endif
59 
60 #if 0
61 /* All the better to debug you with... */
62 #define static
63 #define inline
64 #endif
65 
ip6t_alloc_initial_table(const struct xt_table * info)66 void *ip6t_alloc_initial_table(const struct xt_table *info)
67 {
68 	return xt_alloc_initial_table(ip6t, IP6T);
69 }
70 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
71 
72 /*
73    We keep a set of rules for each CPU, so we can avoid write-locking
74    them in the softirq when updating the counters and therefore
75    only need to read-lock in the softirq; doing a write_lock_bh() in user
76    context stops packets coming through and allows user context to read
77    the counters or update the rules.
78 
79    Hence the start of any table is given by get_table() below.  */
80 
81 /* Returns whether matches rule or not. */
82 /* Performance critical - called for every packet */
83 static inline bool
ip6_packet_match(const struct sk_buff * skb,const char * indev,const char * outdev,const struct ip6t_ip6 * ip6info,unsigned int * protoff,int * fragoff,bool * hotdrop)84 ip6_packet_match(const struct sk_buff *skb,
85 		 const char *indev,
86 		 const char *outdev,
87 		 const struct ip6t_ip6 *ip6info,
88 		 unsigned int *protoff,
89 		 int *fragoff, bool *hotdrop)
90 {
91 	unsigned long ret;
92 	const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
93 
94 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
95 
96 	if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
97 				       &ip6info->src), IP6T_INV_SRCIP) ||
98 	    FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
99 				       &ip6info->dst), IP6T_INV_DSTIP)) {
100 		dprintf("Source or dest mismatch.\n");
101 /*
102 		dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
103 			ipinfo->smsk.s_addr, ipinfo->src.s_addr,
104 			ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
105 		dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
106 			ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
107 			ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
108 		return false;
109 	}
110 
111 	ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
112 
113 	if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
114 		dprintf("VIA in mismatch (%s vs %s).%s\n",
115 			indev, ip6info->iniface,
116 			ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
117 		return false;
118 	}
119 
120 	ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
121 
122 	if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
123 		dprintf("VIA out mismatch (%s vs %s).%s\n",
124 			outdev, ip6info->outiface,
125 			ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
126 		return false;
127 	}
128 
129 /* ... might want to do something with class and flowlabel here ... */
130 
131 	/* look for the desired protocol header */
132 	if((ip6info->flags & IP6T_F_PROTO)) {
133 		int protohdr;
134 		unsigned short _frag_off;
135 
136 		protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
137 		if (protohdr < 0) {
138 			if (_frag_off == 0)
139 				*hotdrop = true;
140 			return false;
141 		}
142 		*fragoff = _frag_off;
143 
144 		dprintf("Packet protocol %hi ?= %s%hi.\n",
145 				protohdr,
146 				ip6info->invflags & IP6T_INV_PROTO ? "!":"",
147 				ip6info->proto);
148 
149 		if (ip6info->proto == protohdr) {
150 			if(ip6info->invflags & IP6T_INV_PROTO) {
151 				return false;
152 			}
153 			return true;
154 		}
155 
156 		/* We need match for the '-p all', too! */
157 		if ((ip6info->proto != 0) &&
158 			!(ip6info->invflags & IP6T_INV_PROTO))
159 			return false;
160 	}
161 	return true;
162 }
163 
164 /* should be ip6 safe */
165 static bool
ip6_checkentry(const struct ip6t_ip6 * ipv6)166 ip6_checkentry(const struct ip6t_ip6 *ipv6)
167 {
168 	if (ipv6->flags & ~IP6T_F_MASK) {
169 		duprintf("Unknown flag bits set: %08X\n",
170 			 ipv6->flags & ~IP6T_F_MASK);
171 		return false;
172 	}
173 	if (ipv6->invflags & ~IP6T_INV_MASK) {
174 		duprintf("Unknown invflag bits set: %08X\n",
175 			 ipv6->invflags & ~IP6T_INV_MASK);
176 		return false;
177 	}
178 	return true;
179 }
180 
181 static unsigned int
ip6t_error(struct sk_buff * skb,const struct xt_action_param * par)182 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
183 {
184 	if (net_ratelimit())
185 		pr_info("error: `%s'\n", (const char *)par->targinfo);
186 
187 	return NF_DROP;
188 }
189 
190 static inline struct ip6t_entry *
get_entry(const void * base,unsigned int offset)191 get_entry(const void *base, unsigned int offset)
192 {
193 	return (struct ip6t_entry *)(base + offset);
194 }
195 
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
unconditional(const struct ip6t_ip6 * ipv6)198 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
199 {
200 	static const struct ip6t_ip6 uncond;
201 
202 	return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
203 }
204 
205 static inline const struct xt_entry_target *
ip6t_get_target_c(const struct ip6t_entry * e)206 ip6t_get_target_c(const struct ip6t_entry *e)
207 {
208 	return ip6t_get_target((struct ip6t_entry *)e);
209 }
210 
211 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
212     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
213 /* This cries for unification! */
214 static const char *const hooknames[] = {
215 	[NF_INET_PRE_ROUTING]		= "PREROUTING",
216 	[NF_INET_LOCAL_IN]		= "INPUT",
217 	[NF_INET_FORWARD]		= "FORWARD",
218 	[NF_INET_LOCAL_OUT]		= "OUTPUT",
219 	[NF_INET_POST_ROUTING]		= "POSTROUTING",
220 };
221 
222 enum nf_ip_trace_comments {
223 	NF_IP6_TRACE_COMMENT_RULE,
224 	NF_IP6_TRACE_COMMENT_RETURN,
225 	NF_IP6_TRACE_COMMENT_POLICY,
226 };
227 
228 static const char *const comments[] = {
229 	[NF_IP6_TRACE_COMMENT_RULE]	= "rule",
230 	[NF_IP6_TRACE_COMMENT_RETURN]	= "return",
231 	[NF_IP6_TRACE_COMMENT_POLICY]	= "policy",
232 };
233 
234 static struct nf_loginfo trace_loginfo = {
235 	.type = NF_LOG_TYPE_LOG,
236 	.u = {
237 		.log = {
238 			.level = 4,
239 			.logflags = NF_LOG_MASK,
240 		},
241 	},
242 };
243 
244 /* Mildly perf critical (only if packet tracing is on) */
245 static inline int
get_chainname_rulenum(const struct ip6t_entry * s,const struct ip6t_entry * e,const char * hookname,const char ** chainname,const char ** comment,unsigned int * rulenum)246 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
247 		      const char *hookname, const char **chainname,
248 		      const char **comment, unsigned int *rulenum)
249 {
250 	const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
251 
252 	if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
253 		/* Head of user chain: ERROR target with chainname */
254 		*chainname = t->target.data;
255 		(*rulenum) = 0;
256 	} else if (s == e) {
257 		(*rulenum)++;
258 
259 		if (s->target_offset == sizeof(struct ip6t_entry) &&
260 		    strcmp(t->target.u.kernel.target->name,
261 			   XT_STANDARD_TARGET) == 0 &&
262 		    t->verdict < 0 &&
263 		    unconditional(&s->ipv6)) {
264 			/* Tail of chains: STANDARD target (return/policy) */
265 			*comment = *chainname == hookname
266 				? comments[NF_IP6_TRACE_COMMENT_POLICY]
267 				: comments[NF_IP6_TRACE_COMMENT_RETURN];
268 		}
269 		return 1;
270 	} else
271 		(*rulenum)++;
272 
273 	return 0;
274 }
275 
trace_packet(const struct sk_buff * skb,unsigned int hook,const struct net_device * in,const struct net_device * out,const char * tablename,const struct xt_table_info * private,const struct ip6t_entry * e)276 static void trace_packet(const struct sk_buff *skb,
277 			 unsigned int hook,
278 			 const struct net_device *in,
279 			 const struct net_device *out,
280 			 const char *tablename,
281 			 const struct xt_table_info *private,
282 			 const struct ip6t_entry *e)
283 {
284 	const void *table_base;
285 	const struct ip6t_entry *root;
286 	const char *hookname, *chainname, *comment;
287 	const struct ip6t_entry *iter;
288 	unsigned int rulenum = 0;
289 
290 	table_base = private->entries[smp_processor_id()];
291 	root = get_entry(table_base, private->hook_entry[hook]);
292 
293 	hookname = chainname = hooknames[hook];
294 	comment = comments[NF_IP6_TRACE_COMMENT_RULE];
295 
296 	xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
297 		if (get_chainname_rulenum(iter, e, hookname,
298 		    &chainname, &comment, &rulenum) != 0)
299 			break;
300 
301 	nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
302 		      "TRACE: %s:%s:%s:%u ",
303 		      tablename, chainname, comment, rulenum);
304 }
305 #endif
306 
307 static inline __pure struct ip6t_entry *
ip6t_next_entry(const struct ip6t_entry * entry)308 ip6t_next_entry(const struct ip6t_entry *entry)
309 {
310 	return (void *)entry + entry->next_offset;
311 }
312 
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
314 unsigned int
ip6t_do_table(struct sk_buff * skb,unsigned int hook,const struct net_device * in,const struct net_device * out,struct xt_table * table)315 ip6t_do_table(struct sk_buff *skb,
316 	      unsigned int hook,
317 	      const struct net_device *in,
318 	      const struct net_device *out,
319 	      struct xt_table *table)
320 {
321 	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 	/* Initializing verdict to NF_DROP keeps gcc happy. */
323 	unsigned int verdict = NF_DROP;
324 	const char *indev, *outdev;
325 	const void *table_base;
326 	struct ip6t_entry *e, **jumpstack;
327 	unsigned int *stackptr, origptr, cpu;
328 	const struct xt_table_info *private;
329 	struct xt_action_param acpar;
330 	unsigned int addend;
331 
332 	/* Initialization */
333 	indev = in ? in->name : nulldevname;
334 	outdev = out ? out->name : nulldevname;
335 	/* We handle fragments by dealing with the first fragment as
336 	 * if it was a normal packet.  All other fragments are treated
337 	 * normally, except that they will NEVER match rules that ask
338 	 * things we don't know, ie. tcp syn flag or ports).  If the
339 	 * rule is also a fragment-specific rule, non-fragments won't
340 	 * match it. */
341 	acpar.hotdrop = false;
342 	acpar.in      = in;
343 	acpar.out     = out;
344 	acpar.family  = NFPROTO_IPV6;
345 	acpar.hooknum = hook;
346 
347 	IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348 
349 	local_bh_disable();
350 	addend = xt_write_recseq_begin();
351 	private = table->private;
352 	cpu        = smp_processor_id();
353 	table_base = private->entries[cpu];
354 	jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
355 	stackptr   = per_cpu_ptr(private->stackptr, cpu);
356 	origptr    = *stackptr;
357 
358 	e = get_entry(table_base, private->hook_entry[hook]);
359 
360 	do {
361 		const struct xt_entry_target *t;
362 		const struct xt_entry_match *ematch;
363 
364 		IP_NF_ASSERT(e);
365 		if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
366 		    &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
367  no_match:
368 			e = ip6t_next_entry(e);
369 			continue;
370 		}
371 
372 		xt_ematch_foreach(ematch, e) {
373 			acpar.match     = ematch->u.kernel.match;
374 			acpar.matchinfo = ematch->data;
375 			if (!acpar.match->match(skb, &acpar))
376 				goto no_match;
377 		}
378 
379 		ADD_COUNTER(e->counters, skb->len, 1);
380 
381 		t = ip6t_get_target_c(e);
382 		IP_NF_ASSERT(t->u.kernel.target);
383 
384 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
385     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
386 		/* The packet is traced: log it */
387 		if (unlikely(skb->nf_trace))
388 			trace_packet(skb, hook, in, out,
389 				     table->name, private, e);
390 #endif
391 		/* Standard target? */
392 		if (!t->u.kernel.target->target) {
393 			int v;
394 
395 			v = ((struct xt_standard_target *)t)->verdict;
396 			if (v < 0) {
397 				/* Pop from stack? */
398 				if (v != XT_RETURN) {
399 					verdict = (unsigned)(-v) - 1;
400 					break;
401 				}
402 				if (*stackptr <= origptr)
403 					e = get_entry(table_base,
404 					    private->underflow[hook]);
405 				else
406 					e = ip6t_next_entry(jumpstack[--*stackptr]);
407 				continue;
408 			}
409 			if (table_base + v != ip6t_next_entry(e) &&
410 			    !(e->ipv6.flags & IP6T_F_GOTO)) {
411 				if (*stackptr >= private->stacksize) {
412 					verdict = NF_DROP;
413 					break;
414 				}
415 				jumpstack[(*stackptr)++] = e;
416 			}
417 
418 			e = get_entry(table_base, v);
419 			continue;
420 		}
421 
422 		acpar.target   = t->u.kernel.target;
423 		acpar.targinfo = t->data;
424 
425 		verdict = t->u.kernel.target->target(skb, &acpar);
426 		if (verdict == XT_CONTINUE)
427 			e = ip6t_next_entry(e);
428 		else
429 			/* Verdict */
430 			break;
431 	} while (!acpar.hotdrop);
432 
433 	*stackptr = origptr;
434 
435  	xt_write_recseq_end(addend);
436  	local_bh_enable();
437 
438 #ifdef DEBUG_ALLOW_ALL
439 	return NF_ACCEPT;
440 #else
441 	if (acpar.hotdrop)
442 		return NF_DROP;
443 	else return verdict;
444 #endif
445 }
446 
447 /* Figures out from what hook each rule can be called: returns 0 if
448    there are loops.  Puts hook bitmask in comefrom. */
449 static int
mark_source_chains(const struct xt_table_info * newinfo,unsigned int valid_hooks,void * entry0)450 mark_source_chains(const struct xt_table_info *newinfo,
451 		   unsigned int valid_hooks, void *entry0)
452 {
453 	unsigned int hook;
454 
455 	/* No recursion; use packet counter to save back ptrs (reset
456 	   to 0 as we leave), and comefrom to save source hook bitmask */
457 	for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
458 		unsigned int pos = newinfo->hook_entry[hook];
459 		struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
460 
461 		if (!(valid_hooks & (1 << hook)))
462 			continue;
463 
464 		/* Set initial back pointer. */
465 		e->counters.pcnt = pos;
466 
467 		for (;;) {
468 			const struct xt_standard_target *t
469 				= (void *)ip6t_get_target_c(e);
470 			int visited = e->comefrom & (1 << hook);
471 
472 			if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
473 				pr_err("iptables: loop hook %u pos %u %08X.\n",
474 				       hook, pos, e->comefrom);
475 				return 0;
476 			}
477 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
478 
479 			/* Unconditional return/END. */
480 			if ((e->target_offset == sizeof(struct ip6t_entry) &&
481 			     (strcmp(t->target.u.user.name,
482 				     XT_STANDARD_TARGET) == 0) &&
483 			     t->verdict < 0 &&
484 			     unconditional(&e->ipv6)) || visited) {
485 				unsigned int oldpos, size;
486 
487 				if ((strcmp(t->target.u.user.name,
488 					    XT_STANDARD_TARGET) == 0) &&
489 				    t->verdict < -NF_MAX_VERDICT - 1) {
490 					duprintf("mark_source_chains: bad "
491 						"negative verdict (%i)\n",
492 								t->verdict);
493 					return 0;
494 				}
495 
496 				/* Return: backtrack through the last
497 				   big jump. */
498 				do {
499 					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
500 #ifdef DEBUG_IP_FIREWALL_USER
501 					if (e->comefrom
502 					    & (1 << NF_INET_NUMHOOKS)) {
503 						duprintf("Back unset "
504 							 "on hook %u "
505 							 "rule %u\n",
506 							 hook, pos);
507 					}
508 #endif
509 					oldpos = pos;
510 					pos = e->counters.pcnt;
511 					e->counters.pcnt = 0;
512 
513 					/* We're at the start. */
514 					if (pos == oldpos)
515 						goto next;
516 
517 					e = (struct ip6t_entry *)
518 						(entry0 + pos);
519 				} while (oldpos == pos + e->next_offset);
520 
521 				/* Move along one */
522 				size = e->next_offset;
523 				e = (struct ip6t_entry *)
524 					(entry0 + pos + size);
525 				e->counters.pcnt = pos;
526 				pos += size;
527 			} else {
528 				int newpos = t->verdict;
529 
530 				if (strcmp(t->target.u.user.name,
531 					   XT_STANDARD_TARGET) == 0 &&
532 				    newpos >= 0) {
533 					if (newpos > newinfo->size -
534 						sizeof(struct ip6t_entry)) {
535 						duprintf("mark_source_chains: "
536 							"bad verdict (%i)\n",
537 								newpos);
538 						return 0;
539 					}
540 					/* This a jump; chase it. */
541 					duprintf("Jump rule %u -> %u\n",
542 						 pos, newpos);
543 				} else {
544 					/* ... this is a fallthru */
545 					newpos = pos + e->next_offset;
546 				}
547 				e = (struct ip6t_entry *)
548 					(entry0 + newpos);
549 				e->counters.pcnt = pos;
550 				pos = newpos;
551 			}
552 		}
553 		next:
554 		duprintf("Finished chain %u\n", hook);
555 	}
556 	return 1;
557 }
558 
cleanup_match(struct xt_entry_match * m,struct net * net)559 static void cleanup_match(struct xt_entry_match *m, struct net *net)
560 {
561 	struct xt_mtdtor_param par;
562 
563 	par.net       = net;
564 	par.match     = m->u.kernel.match;
565 	par.matchinfo = m->data;
566 	par.family    = NFPROTO_IPV6;
567 	if (par.match->destroy != NULL)
568 		par.match->destroy(&par);
569 	module_put(par.match->me);
570 }
571 
572 static int
check_entry(const struct ip6t_entry * e,const char * name)573 check_entry(const struct ip6t_entry *e, const char *name)
574 {
575 	const struct xt_entry_target *t;
576 
577 	if (!ip6_checkentry(&e->ipv6)) {
578 		duprintf("ip_tables: ip check failed %p %s.\n", e, name);
579 		return -EINVAL;
580 	}
581 
582 	if (e->target_offset + sizeof(struct xt_entry_target) >
583 	    e->next_offset)
584 		return -EINVAL;
585 
586 	t = ip6t_get_target_c(e);
587 	if (e->target_offset + t->u.target_size > e->next_offset)
588 		return -EINVAL;
589 
590 	return 0;
591 }
592 
check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)593 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
594 {
595 	const struct ip6t_ip6 *ipv6 = par->entryinfo;
596 	int ret;
597 
598 	par->match     = m->u.kernel.match;
599 	par->matchinfo = m->data;
600 
601 	ret = xt_check_match(par, m->u.match_size - sizeof(*m),
602 			     ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
603 	if (ret < 0) {
604 		duprintf("ip_tables: check failed for `%s'.\n",
605 			 par.match->name);
606 		return ret;
607 	}
608 	return 0;
609 }
610 
611 static int
find_check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)612 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
613 {
614 	struct xt_match *match;
615 	int ret;
616 
617 	match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
618 				      m->u.user.revision);
619 	if (IS_ERR(match)) {
620 		duprintf("find_check_match: `%s' not found\n", m->u.user.name);
621 		return PTR_ERR(match);
622 	}
623 	m->u.kernel.match = match;
624 
625 	ret = check_match(m, par);
626 	if (ret)
627 		goto err;
628 
629 	return 0;
630 err:
631 	module_put(m->u.kernel.match->me);
632 	return ret;
633 }
634 
check_target(struct ip6t_entry * e,struct net * net,const char * name)635 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
636 {
637 	struct xt_entry_target *t = ip6t_get_target(e);
638 	struct xt_tgchk_param par = {
639 		.net       = net,
640 		.table     = name,
641 		.entryinfo = e,
642 		.target    = t->u.kernel.target,
643 		.targinfo  = t->data,
644 		.hook_mask = e->comefrom,
645 		.family    = NFPROTO_IPV6,
646 	};
647 	int ret;
648 
649 	t = ip6t_get_target(e);
650 	ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
651 	      e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
652 	if (ret < 0) {
653 		duprintf("ip_tables: check failed for `%s'.\n",
654 			 t->u.kernel.target->name);
655 		return ret;
656 	}
657 	return 0;
658 }
659 
660 static int
find_check_entry(struct ip6t_entry * e,struct net * net,const char * name,unsigned int size)661 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
662 		 unsigned int size)
663 {
664 	struct xt_entry_target *t;
665 	struct xt_target *target;
666 	int ret;
667 	unsigned int j;
668 	struct xt_mtchk_param mtpar;
669 	struct xt_entry_match *ematch;
670 
671 	ret = check_entry(e, name);
672 	if (ret)
673 		return ret;
674 
675 	j = 0;
676 	mtpar.net	= net;
677 	mtpar.table     = name;
678 	mtpar.entryinfo = &e->ipv6;
679 	mtpar.hook_mask = e->comefrom;
680 	mtpar.family    = NFPROTO_IPV6;
681 	xt_ematch_foreach(ematch, e) {
682 		ret = find_check_match(ematch, &mtpar);
683 		if (ret != 0)
684 			goto cleanup_matches;
685 		++j;
686 	}
687 
688 	t = ip6t_get_target(e);
689 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
690 					t->u.user.revision);
691 	if (IS_ERR(target)) {
692 		duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
693 		ret = PTR_ERR(target);
694 		goto cleanup_matches;
695 	}
696 	t->u.kernel.target = target;
697 
698 	ret = check_target(e, net, name);
699 	if (ret)
700 		goto err;
701 	return 0;
702  err:
703 	module_put(t->u.kernel.target->me);
704  cleanup_matches:
705 	xt_ematch_foreach(ematch, e) {
706 		if (j-- == 0)
707 			break;
708 		cleanup_match(ematch, net);
709 	}
710 	return ret;
711 }
712 
check_underflow(const struct ip6t_entry * e)713 static bool check_underflow(const struct ip6t_entry *e)
714 {
715 	const struct xt_entry_target *t;
716 	unsigned int verdict;
717 
718 	if (!unconditional(&e->ipv6))
719 		return false;
720 	t = ip6t_get_target_c(e);
721 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
722 		return false;
723 	verdict = ((struct xt_standard_target *)t)->verdict;
724 	verdict = -verdict - 1;
725 	return verdict == NF_DROP || verdict == NF_ACCEPT;
726 }
727 
728 static int
check_entry_size_and_hooks(struct ip6t_entry * e,struct xt_table_info * newinfo,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,unsigned int valid_hooks)729 check_entry_size_and_hooks(struct ip6t_entry *e,
730 			   struct xt_table_info *newinfo,
731 			   const unsigned char *base,
732 			   const unsigned char *limit,
733 			   const unsigned int *hook_entries,
734 			   const unsigned int *underflows,
735 			   unsigned int valid_hooks)
736 {
737 	unsigned int h;
738 
739 	if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
740 	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
741 		duprintf("Bad offset %p\n", e);
742 		return -EINVAL;
743 	}
744 
745 	if (e->next_offset
746 	    < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
747 		duprintf("checking: element %p size %u\n",
748 			 e, e->next_offset);
749 		return -EINVAL;
750 	}
751 
752 	/* Check hooks & underflows */
753 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
754 		if (!(valid_hooks & (1 << h)))
755 			continue;
756 		if ((unsigned char *)e - base == hook_entries[h])
757 			newinfo->hook_entry[h] = hook_entries[h];
758 		if ((unsigned char *)e - base == underflows[h]) {
759 			if (!check_underflow(e)) {
760 				pr_err("Underflows must be unconditional and "
761 				       "use the STANDARD target with "
762 				       "ACCEPT/DROP\n");
763 				return -EINVAL;
764 			}
765 			newinfo->underflow[h] = underflows[h];
766 		}
767 	}
768 
769 	/* Clear counters and comefrom */
770 	e->counters = ((struct xt_counters) { 0, 0 });
771 	e->comefrom = 0;
772 	return 0;
773 }
774 
cleanup_entry(struct ip6t_entry * e,struct net * net)775 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
776 {
777 	struct xt_tgdtor_param par;
778 	struct xt_entry_target *t;
779 	struct xt_entry_match *ematch;
780 
781 	/* Cleanup all matches */
782 	xt_ematch_foreach(ematch, e)
783 		cleanup_match(ematch, net);
784 	t = ip6t_get_target(e);
785 
786 	par.net      = net;
787 	par.target   = t->u.kernel.target;
788 	par.targinfo = t->data;
789 	par.family   = NFPROTO_IPV6;
790 	if (par.target->destroy != NULL)
791 		par.target->destroy(&par);
792 	module_put(par.target->me);
793 }
794 
795 /* Checks and translates the user-supplied table segment (held in
796    newinfo) */
797 static int
translate_table(struct net * net,struct xt_table_info * newinfo,void * entry0,const struct ip6t_replace * repl)798 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
799                 const struct ip6t_replace *repl)
800 {
801 	struct ip6t_entry *iter;
802 	unsigned int i;
803 	int ret = 0;
804 
805 	newinfo->size = repl->size;
806 	newinfo->number = repl->num_entries;
807 
808 	/* Init all hooks to impossible value. */
809 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
810 		newinfo->hook_entry[i] = 0xFFFFFFFF;
811 		newinfo->underflow[i] = 0xFFFFFFFF;
812 	}
813 
814 	duprintf("translate_table: size %u\n", newinfo->size);
815 	i = 0;
816 	/* Walk through entries, checking offsets. */
817 	xt_entry_foreach(iter, entry0, newinfo->size) {
818 		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
819 						 entry0 + repl->size,
820 						 repl->hook_entry,
821 						 repl->underflow,
822 						 repl->valid_hooks);
823 		if (ret != 0)
824 			return ret;
825 		++i;
826 		if (strcmp(ip6t_get_target(iter)->u.user.name,
827 		    XT_ERROR_TARGET) == 0)
828 			++newinfo->stacksize;
829 	}
830 
831 	if (i != repl->num_entries) {
832 		duprintf("translate_table: %u not %u entries\n",
833 			 i, repl->num_entries);
834 		return -EINVAL;
835 	}
836 
837 	/* Check hooks all assigned */
838 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
839 		/* Only hooks which are valid */
840 		if (!(repl->valid_hooks & (1 << i)))
841 			continue;
842 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
843 			duprintf("Invalid hook entry %u %u\n",
844 				 i, repl->hook_entry[i]);
845 			return -EINVAL;
846 		}
847 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
848 			duprintf("Invalid underflow %u %u\n",
849 				 i, repl->underflow[i]);
850 			return -EINVAL;
851 		}
852 	}
853 
854 	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
855 		return -ELOOP;
856 
857 	/* Finally, each sanity check must pass */
858 	i = 0;
859 	xt_entry_foreach(iter, entry0, newinfo->size) {
860 		ret = find_check_entry(iter, net, repl->name, repl->size);
861 		if (ret != 0)
862 			break;
863 		++i;
864 	}
865 
866 	if (ret != 0) {
867 		xt_entry_foreach(iter, entry0, newinfo->size) {
868 			if (i-- == 0)
869 				break;
870 			cleanup_entry(iter, net);
871 		}
872 		return ret;
873 	}
874 
875 	/* And one copy for every other CPU */
876 	for_each_possible_cpu(i) {
877 		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
878 			memcpy(newinfo->entries[i], entry0, newinfo->size);
879 	}
880 
881 	return ret;
882 }
883 
884 static void
get_counters(const struct xt_table_info * t,struct xt_counters counters[])885 get_counters(const struct xt_table_info *t,
886 	     struct xt_counters counters[])
887 {
888 	struct ip6t_entry *iter;
889 	unsigned int cpu;
890 	unsigned int i;
891 
892 	for_each_possible_cpu(cpu) {
893 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
894 
895 		i = 0;
896 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
897 			u64 bcnt, pcnt;
898 			unsigned int start;
899 
900 			do {
901 				start = read_seqcount_begin(s);
902 				bcnt = iter->counters.bcnt;
903 				pcnt = iter->counters.pcnt;
904 			} while (read_seqcount_retry(s, start));
905 
906 			ADD_COUNTER(counters[i], bcnt, pcnt);
907 			++i;
908 		}
909 	}
910 }
911 
alloc_counters(const struct xt_table * table)912 static struct xt_counters *alloc_counters(const struct xt_table *table)
913 {
914 	unsigned int countersize;
915 	struct xt_counters *counters;
916 	const struct xt_table_info *private = table->private;
917 
918 	/* We need atomic snapshot of counters: rest doesn't change
919 	   (other than comefrom, which userspace doesn't care
920 	   about). */
921 	countersize = sizeof(struct xt_counters) * private->number;
922 	counters = vzalloc(countersize);
923 
924 	if (counters == NULL)
925 		return ERR_PTR(-ENOMEM);
926 
927 	get_counters(private, counters);
928 
929 	return counters;
930 }
931 
932 static int
copy_entries_to_user(unsigned int total_size,const struct xt_table * table,void __user * userptr)933 copy_entries_to_user(unsigned int total_size,
934 		     const struct xt_table *table,
935 		     void __user *userptr)
936 {
937 	unsigned int off, num;
938 	const struct ip6t_entry *e;
939 	struct xt_counters *counters;
940 	const struct xt_table_info *private = table->private;
941 	int ret = 0;
942 	const void *loc_cpu_entry;
943 
944 	counters = alloc_counters(table);
945 	if (IS_ERR(counters))
946 		return PTR_ERR(counters);
947 
948 	/* choose the copy that is on our node/cpu, ...
949 	 * This choice is lazy (because current thread is
950 	 * allowed to migrate to another cpu)
951 	 */
952 	loc_cpu_entry = private->entries[raw_smp_processor_id()];
953 	if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
954 		ret = -EFAULT;
955 		goto free_counters;
956 	}
957 
958 	/* FIXME: use iterator macros --RR */
959 	/* ... then go back and fix counters and names */
960 	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
961 		unsigned int i;
962 		const struct xt_entry_match *m;
963 		const struct xt_entry_target *t;
964 
965 		e = (struct ip6t_entry *)(loc_cpu_entry + off);
966 		if (copy_to_user(userptr + off
967 				 + offsetof(struct ip6t_entry, counters),
968 				 &counters[num],
969 				 sizeof(counters[num])) != 0) {
970 			ret = -EFAULT;
971 			goto free_counters;
972 		}
973 
974 		for (i = sizeof(struct ip6t_entry);
975 		     i < e->target_offset;
976 		     i += m->u.match_size) {
977 			m = (void *)e + i;
978 
979 			if (copy_to_user(userptr + off + i
980 					 + offsetof(struct xt_entry_match,
981 						    u.user.name),
982 					 m->u.kernel.match->name,
983 					 strlen(m->u.kernel.match->name)+1)
984 			    != 0) {
985 				ret = -EFAULT;
986 				goto free_counters;
987 			}
988 		}
989 
990 		t = ip6t_get_target_c(e);
991 		if (copy_to_user(userptr + off + e->target_offset
992 				 + offsetof(struct xt_entry_target,
993 					    u.user.name),
994 				 t->u.kernel.target->name,
995 				 strlen(t->u.kernel.target->name)+1) != 0) {
996 			ret = -EFAULT;
997 			goto free_counters;
998 		}
999 	}
1000 
1001  free_counters:
1002 	vfree(counters);
1003 	return ret;
1004 }
1005 
1006 #ifdef CONFIG_COMPAT
compat_standard_from_user(void * dst,const void * src)1007 static void compat_standard_from_user(void *dst, const void *src)
1008 {
1009 	int v = *(compat_int_t *)src;
1010 
1011 	if (v > 0)
1012 		v += xt_compat_calc_jump(AF_INET6, v);
1013 	memcpy(dst, &v, sizeof(v));
1014 }
1015 
compat_standard_to_user(void __user * dst,const void * src)1016 static int compat_standard_to_user(void __user *dst, const void *src)
1017 {
1018 	compat_int_t cv = *(int *)src;
1019 
1020 	if (cv > 0)
1021 		cv -= xt_compat_calc_jump(AF_INET6, cv);
1022 	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1023 }
1024 
compat_calc_entry(const struct ip6t_entry * e,const struct xt_table_info * info,const void * base,struct xt_table_info * newinfo)1025 static int compat_calc_entry(const struct ip6t_entry *e,
1026 			     const struct xt_table_info *info,
1027 			     const void *base, struct xt_table_info *newinfo)
1028 {
1029 	const struct xt_entry_match *ematch;
1030 	const struct xt_entry_target *t;
1031 	unsigned int entry_offset;
1032 	int off, i, ret;
1033 
1034 	off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1035 	entry_offset = (void *)e - base;
1036 	xt_ematch_foreach(ematch, e)
1037 		off += xt_compat_match_offset(ematch->u.kernel.match);
1038 	t = ip6t_get_target_c(e);
1039 	off += xt_compat_target_offset(t->u.kernel.target);
1040 	newinfo->size -= off;
1041 	ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1042 	if (ret)
1043 		return ret;
1044 
1045 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1046 		if (info->hook_entry[i] &&
1047 		    (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1048 			newinfo->hook_entry[i] -= off;
1049 		if (info->underflow[i] &&
1050 		    (e < (struct ip6t_entry *)(base + info->underflow[i])))
1051 			newinfo->underflow[i] -= off;
1052 	}
1053 	return 0;
1054 }
1055 
compat_table_info(const struct xt_table_info * info,struct xt_table_info * newinfo)1056 static int compat_table_info(const struct xt_table_info *info,
1057 			     struct xt_table_info *newinfo)
1058 {
1059 	struct ip6t_entry *iter;
1060 	void *loc_cpu_entry;
1061 	int ret;
1062 
1063 	if (!newinfo || !info)
1064 		return -EINVAL;
1065 
1066 	/* we dont care about newinfo->entries[] */
1067 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1068 	newinfo->initial_entries = 0;
1069 	loc_cpu_entry = info->entries[raw_smp_processor_id()];
1070 	xt_compat_init_offsets(AF_INET6, info->number);
1071 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1072 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1073 		if (ret != 0)
1074 			return ret;
1075 	}
1076 	return 0;
1077 }
1078 #endif
1079 
get_info(struct net * net,void __user * user,const int * len,int compat)1080 static int get_info(struct net *net, void __user *user,
1081                     const int *len, int compat)
1082 {
1083 	char name[XT_TABLE_MAXNAMELEN];
1084 	struct xt_table *t;
1085 	int ret;
1086 
1087 	if (*len != sizeof(struct ip6t_getinfo)) {
1088 		duprintf("length %u != %zu\n", *len,
1089 			 sizeof(struct ip6t_getinfo));
1090 		return -EINVAL;
1091 	}
1092 
1093 	if (copy_from_user(name, user, sizeof(name)) != 0)
1094 		return -EFAULT;
1095 
1096 	name[XT_TABLE_MAXNAMELEN-1] = '\0';
1097 #ifdef CONFIG_COMPAT
1098 	if (compat)
1099 		xt_compat_lock(AF_INET6);
1100 #endif
1101 	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1102 				    "ip6table_%s", name);
1103 	if (t && !IS_ERR(t)) {
1104 		struct ip6t_getinfo info;
1105 		const struct xt_table_info *private = t->private;
1106 #ifdef CONFIG_COMPAT
1107 		struct xt_table_info tmp;
1108 
1109 		if (compat) {
1110 			ret = compat_table_info(private, &tmp);
1111 			xt_compat_flush_offsets(AF_INET6);
1112 			private = &tmp;
1113 		}
1114 #endif
1115 		memset(&info, 0, sizeof(info));
1116 		info.valid_hooks = t->valid_hooks;
1117 		memcpy(info.hook_entry, private->hook_entry,
1118 		       sizeof(info.hook_entry));
1119 		memcpy(info.underflow, private->underflow,
1120 		       sizeof(info.underflow));
1121 		info.num_entries = private->number;
1122 		info.size = private->size;
1123 		strcpy(info.name, name);
1124 
1125 		if (copy_to_user(user, &info, *len) != 0)
1126 			ret = -EFAULT;
1127 		else
1128 			ret = 0;
1129 
1130 		xt_table_unlock(t);
1131 		module_put(t->me);
1132 	} else
1133 		ret = t ? PTR_ERR(t) : -ENOENT;
1134 #ifdef CONFIG_COMPAT
1135 	if (compat)
1136 		xt_compat_unlock(AF_INET6);
1137 #endif
1138 	return ret;
1139 }
1140 
1141 static int
get_entries(struct net * net,struct ip6t_get_entries __user * uptr,const int * len)1142 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1143             const int *len)
1144 {
1145 	int ret;
1146 	struct ip6t_get_entries get;
1147 	struct xt_table *t;
1148 
1149 	if (*len < sizeof(get)) {
1150 		duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1151 		return -EINVAL;
1152 	}
1153 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1154 		return -EFAULT;
1155 	if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1156 		duprintf("get_entries: %u != %zu\n",
1157 			 *len, sizeof(get) + get.size);
1158 		return -EINVAL;
1159 	}
1160 
1161 	t = xt_find_table_lock(net, AF_INET6, get.name);
1162 	if (t && !IS_ERR(t)) {
1163 		struct xt_table_info *private = t->private;
1164 		duprintf("t->private->number = %u\n", private->number);
1165 		if (get.size == private->size)
1166 			ret = copy_entries_to_user(private->size,
1167 						   t, uptr->entrytable);
1168 		else {
1169 			duprintf("get_entries: I've got %u not %u!\n",
1170 				 private->size, get.size);
1171 			ret = -EAGAIN;
1172 		}
1173 		module_put(t->me);
1174 		xt_table_unlock(t);
1175 	} else
1176 		ret = t ? PTR_ERR(t) : -ENOENT;
1177 
1178 	return ret;
1179 }
1180 
1181 static int
__do_replace(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info * newinfo,unsigned int num_counters,void __user * counters_ptr)1182 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1183 	     struct xt_table_info *newinfo, unsigned int num_counters,
1184 	     void __user *counters_ptr)
1185 {
1186 	int ret;
1187 	struct xt_table *t;
1188 	struct xt_table_info *oldinfo;
1189 	struct xt_counters *counters;
1190 	const void *loc_cpu_old_entry;
1191 	struct ip6t_entry *iter;
1192 
1193 	ret = 0;
1194 	counters = vzalloc(num_counters * sizeof(struct xt_counters));
1195 	if (!counters) {
1196 		ret = -ENOMEM;
1197 		goto out;
1198 	}
1199 
1200 	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1201 				    "ip6table_%s", name);
1202 	if (!t || IS_ERR(t)) {
1203 		ret = t ? PTR_ERR(t) : -ENOENT;
1204 		goto free_newinfo_counters_untrans;
1205 	}
1206 
1207 	/* You lied! */
1208 	if (valid_hooks != t->valid_hooks) {
1209 		duprintf("Valid hook crap: %08X vs %08X\n",
1210 			 valid_hooks, t->valid_hooks);
1211 		ret = -EINVAL;
1212 		goto put_module;
1213 	}
1214 
1215 	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1216 	if (!oldinfo)
1217 		goto put_module;
1218 
1219 	/* Update module usage count based on number of rules */
1220 	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1221 		oldinfo->number, oldinfo->initial_entries, newinfo->number);
1222 	if ((oldinfo->number > oldinfo->initial_entries) ||
1223 	    (newinfo->number <= oldinfo->initial_entries))
1224 		module_put(t->me);
1225 	if ((oldinfo->number > oldinfo->initial_entries) &&
1226 	    (newinfo->number <= oldinfo->initial_entries))
1227 		module_put(t->me);
1228 
1229 	/* Get the old counters, and synchronize with replace */
1230 	get_counters(oldinfo, counters);
1231 
1232 	/* Decrease module usage counts and free resource */
1233 	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1234 	xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1235 		cleanup_entry(iter, net);
1236 
1237 	xt_free_table_info(oldinfo);
1238 	if (copy_to_user(counters_ptr, counters,
1239 			 sizeof(struct xt_counters) * num_counters) != 0) {
1240 		/* Silent error, can't fail, new table is already in place */
1241 		net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1242 	}
1243 	vfree(counters);
1244 	xt_table_unlock(t);
1245 	return ret;
1246 
1247  put_module:
1248 	module_put(t->me);
1249 	xt_table_unlock(t);
1250  free_newinfo_counters_untrans:
1251 	vfree(counters);
1252  out:
1253 	return ret;
1254 }
1255 
1256 static int
do_replace(struct net * net,const void __user * user,unsigned int len)1257 do_replace(struct net *net, const void __user *user, unsigned int len)
1258 {
1259 	int ret;
1260 	struct ip6t_replace tmp;
1261 	struct xt_table_info *newinfo;
1262 	void *loc_cpu_entry;
1263 	struct ip6t_entry *iter;
1264 
1265 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1266 		return -EFAULT;
1267 
1268 	/* overflow check */
1269 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1270 		return -ENOMEM;
1271 	tmp.name[sizeof(tmp.name)-1] = 0;
1272 
1273 	newinfo = xt_alloc_table_info(tmp.size);
1274 	if (!newinfo)
1275 		return -ENOMEM;
1276 
1277 	/* choose the copy that is on our node/cpu */
1278 	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1279 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1280 			   tmp.size) != 0) {
1281 		ret = -EFAULT;
1282 		goto free_newinfo;
1283 	}
1284 
1285 	ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1286 	if (ret != 0)
1287 		goto free_newinfo;
1288 
1289 	duprintf("ip_tables: Translated table\n");
1290 
1291 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1292 			   tmp.num_counters, tmp.counters);
1293 	if (ret)
1294 		goto free_newinfo_untrans;
1295 	return 0;
1296 
1297  free_newinfo_untrans:
1298 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1299 		cleanup_entry(iter, net);
1300  free_newinfo:
1301 	xt_free_table_info(newinfo);
1302 	return ret;
1303 }
1304 
1305 static int
do_add_counters(struct net * net,const void __user * user,unsigned int len,int compat)1306 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1307 		int compat)
1308 {
1309 	unsigned int i, curcpu;
1310 	struct xt_counters_info tmp;
1311 	struct xt_counters *paddc;
1312 	unsigned int num_counters;
1313 	char *name;
1314 	int size;
1315 	void *ptmp;
1316 	struct xt_table *t;
1317 	const struct xt_table_info *private;
1318 	int ret = 0;
1319 	const void *loc_cpu_entry;
1320 	struct ip6t_entry *iter;
1321 	unsigned int addend;
1322 #ifdef CONFIG_COMPAT
1323 	struct compat_xt_counters_info compat_tmp;
1324 
1325 	if (compat) {
1326 		ptmp = &compat_tmp;
1327 		size = sizeof(struct compat_xt_counters_info);
1328 	} else
1329 #endif
1330 	{
1331 		ptmp = &tmp;
1332 		size = sizeof(struct xt_counters_info);
1333 	}
1334 
1335 	if (copy_from_user(ptmp, user, size) != 0)
1336 		return -EFAULT;
1337 
1338 #ifdef CONFIG_COMPAT
1339 	if (compat) {
1340 		num_counters = compat_tmp.num_counters;
1341 		name = compat_tmp.name;
1342 	} else
1343 #endif
1344 	{
1345 		num_counters = tmp.num_counters;
1346 		name = tmp.name;
1347 	}
1348 
1349 	if (len != size + num_counters * sizeof(struct xt_counters))
1350 		return -EINVAL;
1351 
1352 	paddc = vmalloc(len - size);
1353 	if (!paddc)
1354 		return -ENOMEM;
1355 
1356 	if (copy_from_user(paddc, user + size, len - size) != 0) {
1357 		ret = -EFAULT;
1358 		goto free;
1359 	}
1360 
1361 	t = xt_find_table_lock(net, AF_INET6, name);
1362 	if (!t || IS_ERR(t)) {
1363 		ret = t ? PTR_ERR(t) : -ENOENT;
1364 		goto free;
1365 	}
1366 
1367 
1368 	local_bh_disable();
1369 	private = t->private;
1370 	if (private->number != num_counters) {
1371 		ret = -EINVAL;
1372 		goto unlock_up_free;
1373 	}
1374 
1375 	i = 0;
1376 	/* Choose the copy that is on our node */
1377 	curcpu = smp_processor_id();
1378 	addend = xt_write_recseq_begin();
1379 	loc_cpu_entry = private->entries[curcpu];
1380 	xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1381 		ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1382 		++i;
1383 	}
1384 	xt_write_recseq_end(addend);
1385 
1386  unlock_up_free:
1387 	local_bh_enable();
1388 	xt_table_unlock(t);
1389 	module_put(t->me);
1390  free:
1391 	vfree(paddc);
1392 
1393 	return ret;
1394 }
1395 
1396 #ifdef CONFIG_COMPAT
1397 struct compat_ip6t_replace {
1398 	char			name[XT_TABLE_MAXNAMELEN];
1399 	u32			valid_hooks;
1400 	u32			num_entries;
1401 	u32			size;
1402 	u32			hook_entry[NF_INET_NUMHOOKS];
1403 	u32			underflow[NF_INET_NUMHOOKS];
1404 	u32			num_counters;
1405 	compat_uptr_t		counters;	/* struct xt_counters * */
1406 	struct compat_ip6t_entry entries[0];
1407 };
1408 
1409 static int
compat_copy_entry_to_user(struct ip6t_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i)1410 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1411 			  unsigned int *size, struct xt_counters *counters,
1412 			  unsigned int i)
1413 {
1414 	struct xt_entry_target *t;
1415 	struct compat_ip6t_entry __user *ce;
1416 	u_int16_t target_offset, next_offset;
1417 	compat_uint_t origsize;
1418 	const struct xt_entry_match *ematch;
1419 	int ret = 0;
1420 
1421 	origsize = *size;
1422 	ce = (struct compat_ip6t_entry __user *)*dstptr;
1423 	if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1424 	    copy_to_user(&ce->counters, &counters[i],
1425 	    sizeof(counters[i])) != 0)
1426 		return -EFAULT;
1427 
1428 	*dstptr += sizeof(struct compat_ip6t_entry);
1429 	*size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1430 
1431 	xt_ematch_foreach(ematch, e) {
1432 		ret = xt_compat_match_to_user(ematch, dstptr, size);
1433 		if (ret != 0)
1434 			return ret;
1435 	}
1436 	target_offset = e->target_offset - (origsize - *size);
1437 	t = ip6t_get_target(e);
1438 	ret = xt_compat_target_to_user(t, dstptr, size);
1439 	if (ret)
1440 		return ret;
1441 	next_offset = e->next_offset - (origsize - *size);
1442 	if (put_user(target_offset, &ce->target_offset) != 0 ||
1443 	    put_user(next_offset, &ce->next_offset) != 0)
1444 		return -EFAULT;
1445 	return 0;
1446 }
1447 
1448 static int
compat_find_calc_match(struct xt_entry_match * m,const char * name,const struct ip6t_ip6 * ipv6,unsigned int hookmask,int * size)1449 compat_find_calc_match(struct xt_entry_match *m,
1450 		       const char *name,
1451 		       const struct ip6t_ip6 *ipv6,
1452 		       unsigned int hookmask,
1453 		       int *size)
1454 {
1455 	struct xt_match *match;
1456 
1457 	match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1458 				      m->u.user.revision);
1459 	if (IS_ERR(match)) {
1460 		duprintf("compat_check_calc_match: `%s' not found\n",
1461 			 m->u.user.name);
1462 		return PTR_ERR(match);
1463 	}
1464 	m->u.kernel.match = match;
1465 	*size += xt_compat_match_offset(match);
1466 	return 0;
1467 }
1468 
compat_release_entry(struct compat_ip6t_entry * e)1469 static void compat_release_entry(struct compat_ip6t_entry *e)
1470 {
1471 	struct xt_entry_target *t;
1472 	struct xt_entry_match *ematch;
1473 
1474 	/* Cleanup all matches */
1475 	xt_ematch_foreach(ematch, e)
1476 		module_put(ematch->u.kernel.match->me);
1477 	t = compat_ip6t_get_target(e);
1478 	module_put(t->u.kernel.target->me);
1479 }
1480 
1481 static int
check_compat_entry_size_and_hooks(struct compat_ip6t_entry * e,struct xt_table_info * newinfo,unsigned int * size,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,const char * name)1482 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1483 				  struct xt_table_info *newinfo,
1484 				  unsigned int *size,
1485 				  const unsigned char *base,
1486 				  const unsigned char *limit,
1487 				  const unsigned int *hook_entries,
1488 				  const unsigned int *underflows,
1489 				  const char *name)
1490 {
1491 	struct xt_entry_match *ematch;
1492 	struct xt_entry_target *t;
1493 	struct xt_target *target;
1494 	unsigned int entry_offset;
1495 	unsigned int j;
1496 	int ret, off, h;
1497 
1498 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
1499 	if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1500 	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1501 		duprintf("Bad offset %p, limit = %p\n", e, limit);
1502 		return -EINVAL;
1503 	}
1504 
1505 	if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1506 			     sizeof(struct compat_xt_entry_target)) {
1507 		duprintf("checking: element %p size %u\n",
1508 			 e, e->next_offset);
1509 		return -EINVAL;
1510 	}
1511 
1512 	/* For purposes of check_entry casting the compat entry is fine */
1513 	ret = check_entry((struct ip6t_entry *)e, name);
1514 	if (ret)
1515 		return ret;
1516 
1517 	off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1518 	entry_offset = (void *)e - (void *)base;
1519 	j = 0;
1520 	xt_ematch_foreach(ematch, e) {
1521 		ret = compat_find_calc_match(ematch, name,
1522 					     &e->ipv6, e->comefrom, &off);
1523 		if (ret != 0)
1524 			goto release_matches;
1525 		++j;
1526 	}
1527 
1528 	t = compat_ip6t_get_target(e);
1529 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1530 					t->u.user.revision);
1531 	if (IS_ERR(target)) {
1532 		duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1533 			 t->u.user.name);
1534 		ret = PTR_ERR(target);
1535 		goto release_matches;
1536 	}
1537 	t->u.kernel.target = target;
1538 
1539 	off += xt_compat_target_offset(target);
1540 	*size += off;
1541 	ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1542 	if (ret)
1543 		goto out;
1544 
1545 	/* Check hooks & underflows */
1546 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1547 		if ((unsigned char *)e - base == hook_entries[h])
1548 			newinfo->hook_entry[h] = hook_entries[h];
1549 		if ((unsigned char *)e - base == underflows[h])
1550 			newinfo->underflow[h] = underflows[h];
1551 	}
1552 
1553 	/* Clear counters and comefrom */
1554 	memset(&e->counters, 0, sizeof(e->counters));
1555 	e->comefrom = 0;
1556 	return 0;
1557 
1558 out:
1559 	module_put(t->u.kernel.target->me);
1560 release_matches:
1561 	xt_ematch_foreach(ematch, e) {
1562 		if (j-- == 0)
1563 			break;
1564 		module_put(ematch->u.kernel.match->me);
1565 	}
1566 	return ret;
1567 }
1568 
1569 static int
compat_copy_entry_from_user(struct compat_ip6t_entry * e,void ** dstptr,unsigned int * size,const char * name,struct xt_table_info * newinfo,unsigned char * base)1570 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1571 			    unsigned int *size, const char *name,
1572 			    struct xt_table_info *newinfo, unsigned char *base)
1573 {
1574 	struct xt_entry_target *t;
1575 	struct ip6t_entry *de;
1576 	unsigned int origsize;
1577 	int ret, h;
1578 	struct xt_entry_match *ematch;
1579 
1580 	ret = 0;
1581 	origsize = *size;
1582 	de = (struct ip6t_entry *)*dstptr;
1583 	memcpy(de, e, sizeof(struct ip6t_entry));
1584 	memcpy(&de->counters, &e->counters, sizeof(e->counters));
1585 
1586 	*dstptr += sizeof(struct ip6t_entry);
1587 	*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1588 
1589 	xt_ematch_foreach(ematch, e) {
1590 		ret = xt_compat_match_from_user(ematch, dstptr, size);
1591 		if (ret != 0)
1592 			return ret;
1593 	}
1594 	de->target_offset = e->target_offset - (origsize - *size);
1595 	t = compat_ip6t_get_target(e);
1596 	xt_compat_target_from_user(t, dstptr, size);
1597 
1598 	de->next_offset = e->next_offset - (origsize - *size);
1599 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1600 		if ((unsigned char *)de - base < newinfo->hook_entry[h])
1601 			newinfo->hook_entry[h] -= origsize - *size;
1602 		if ((unsigned char *)de - base < newinfo->underflow[h])
1603 			newinfo->underflow[h] -= origsize - *size;
1604 	}
1605 	return ret;
1606 }
1607 
compat_check_entry(struct ip6t_entry * e,struct net * net,const char * name)1608 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1609 			      const char *name)
1610 {
1611 	unsigned int j;
1612 	int ret = 0;
1613 	struct xt_mtchk_param mtpar;
1614 	struct xt_entry_match *ematch;
1615 
1616 	j = 0;
1617 	mtpar.net	= net;
1618 	mtpar.table     = name;
1619 	mtpar.entryinfo = &e->ipv6;
1620 	mtpar.hook_mask = e->comefrom;
1621 	mtpar.family    = NFPROTO_IPV6;
1622 	xt_ematch_foreach(ematch, e) {
1623 		ret = check_match(ematch, &mtpar);
1624 		if (ret != 0)
1625 			goto cleanup_matches;
1626 		++j;
1627 	}
1628 
1629 	ret = check_target(e, net, name);
1630 	if (ret)
1631 		goto cleanup_matches;
1632 	return 0;
1633 
1634  cleanup_matches:
1635 	xt_ematch_foreach(ematch, e) {
1636 		if (j-- == 0)
1637 			break;
1638 		cleanup_match(ematch, net);
1639 	}
1640 	return ret;
1641 }
1642 
1643 static int
translate_compat_table(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info ** pinfo,void ** pentry0,unsigned int total_size,unsigned int number,unsigned int * hook_entries,unsigned int * underflows)1644 translate_compat_table(struct net *net,
1645 		       const char *name,
1646 		       unsigned int valid_hooks,
1647 		       struct xt_table_info **pinfo,
1648 		       void **pentry0,
1649 		       unsigned int total_size,
1650 		       unsigned int number,
1651 		       unsigned int *hook_entries,
1652 		       unsigned int *underflows)
1653 {
1654 	unsigned int i, j;
1655 	struct xt_table_info *newinfo, *info;
1656 	void *pos, *entry0, *entry1;
1657 	struct compat_ip6t_entry *iter0;
1658 	struct ip6t_entry *iter1;
1659 	unsigned int size;
1660 	int ret = 0;
1661 
1662 	info = *pinfo;
1663 	entry0 = *pentry0;
1664 	size = total_size;
1665 	info->number = number;
1666 
1667 	/* Init all hooks to impossible value. */
1668 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1669 		info->hook_entry[i] = 0xFFFFFFFF;
1670 		info->underflow[i] = 0xFFFFFFFF;
1671 	}
1672 
1673 	duprintf("translate_compat_table: size %u\n", info->size);
1674 	j = 0;
1675 	xt_compat_lock(AF_INET6);
1676 	xt_compat_init_offsets(AF_INET6, number);
1677 	/* Walk through entries, checking offsets. */
1678 	xt_entry_foreach(iter0, entry0, total_size) {
1679 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1680 							entry0,
1681 							entry0 + total_size,
1682 							hook_entries,
1683 							underflows,
1684 							name);
1685 		if (ret != 0)
1686 			goto out_unlock;
1687 		++j;
1688 	}
1689 
1690 	ret = -EINVAL;
1691 	if (j != number) {
1692 		duprintf("translate_compat_table: %u not %u entries\n",
1693 			 j, number);
1694 		goto out_unlock;
1695 	}
1696 
1697 	/* Check hooks all assigned */
1698 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1699 		/* Only hooks which are valid */
1700 		if (!(valid_hooks & (1 << i)))
1701 			continue;
1702 		if (info->hook_entry[i] == 0xFFFFFFFF) {
1703 			duprintf("Invalid hook entry %u %u\n",
1704 				 i, hook_entries[i]);
1705 			goto out_unlock;
1706 		}
1707 		if (info->underflow[i] == 0xFFFFFFFF) {
1708 			duprintf("Invalid underflow %u %u\n",
1709 				 i, underflows[i]);
1710 			goto out_unlock;
1711 		}
1712 	}
1713 
1714 	ret = -ENOMEM;
1715 	newinfo = xt_alloc_table_info(size);
1716 	if (!newinfo)
1717 		goto out_unlock;
1718 
1719 	newinfo->number = number;
1720 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1721 		newinfo->hook_entry[i] = info->hook_entry[i];
1722 		newinfo->underflow[i] = info->underflow[i];
1723 	}
1724 	entry1 = newinfo->entries[raw_smp_processor_id()];
1725 	pos = entry1;
1726 	size = total_size;
1727 	xt_entry_foreach(iter0, entry0, total_size) {
1728 		ret = compat_copy_entry_from_user(iter0, &pos, &size,
1729 						  name, newinfo, entry1);
1730 		if (ret != 0)
1731 			break;
1732 	}
1733 	xt_compat_flush_offsets(AF_INET6);
1734 	xt_compat_unlock(AF_INET6);
1735 	if (ret)
1736 		goto free_newinfo;
1737 
1738 	ret = -ELOOP;
1739 	if (!mark_source_chains(newinfo, valid_hooks, entry1))
1740 		goto free_newinfo;
1741 
1742 	i = 0;
1743 	xt_entry_foreach(iter1, entry1, newinfo->size) {
1744 		ret = compat_check_entry(iter1, net, name);
1745 		if (ret != 0)
1746 			break;
1747 		++i;
1748 		if (strcmp(ip6t_get_target(iter1)->u.user.name,
1749 		    XT_ERROR_TARGET) == 0)
1750 			++newinfo->stacksize;
1751 	}
1752 	if (ret) {
1753 		/*
1754 		 * The first i matches need cleanup_entry (calls ->destroy)
1755 		 * because they had called ->check already. The other j-i
1756 		 * entries need only release.
1757 		 */
1758 		int skip = i;
1759 		j -= i;
1760 		xt_entry_foreach(iter0, entry0, newinfo->size) {
1761 			if (skip-- > 0)
1762 				continue;
1763 			if (j-- == 0)
1764 				break;
1765 			compat_release_entry(iter0);
1766 		}
1767 		xt_entry_foreach(iter1, entry1, newinfo->size) {
1768 			if (i-- == 0)
1769 				break;
1770 			cleanup_entry(iter1, net);
1771 		}
1772 		xt_free_table_info(newinfo);
1773 		return ret;
1774 	}
1775 
1776 	/* And one copy for every other CPU */
1777 	for_each_possible_cpu(i)
1778 		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1779 			memcpy(newinfo->entries[i], entry1, newinfo->size);
1780 
1781 	*pinfo = newinfo;
1782 	*pentry0 = entry1;
1783 	xt_free_table_info(info);
1784 	return 0;
1785 
1786 free_newinfo:
1787 	xt_free_table_info(newinfo);
1788 out:
1789 	xt_entry_foreach(iter0, entry0, total_size) {
1790 		if (j-- == 0)
1791 			break;
1792 		compat_release_entry(iter0);
1793 	}
1794 	return ret;
1795 out_unlock:
1796 	xt_compat_flush_offsets(AF_INET6);
1797 	xt_compat_unlock(AF_INET6);
1798 	goto out;
1799 }
1800 
1801 static int
compat_do_replace(struct net * net,void __user * user,unsigned int len)1802 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1803 {
1804 	int ret;
1805 	struct compat_ip6t_replace tmp;
1806 	struct xt_table_info *newinfo;
1807 	void *loc_cpu_entry;
1808 	struct ip6t_entry *iter;
1809 
1810 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1811 		return -EFAULT;
1812 
1813 	/* overflow check */
1814 	if (tmp.size >= INT_MAX / num_possible_cpus())
1815 		return -ENOMEM;
1816 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1817 		return -ENOMEM;
1818 	tmp.name[sizeof(tmp.name)-1] = 0;
1819 
1820 	newinfo = xt_alloc_table_info(tmp.size);
1821 	if (!newinfo)
1822 		return -ENOMEM;
1823 
1824 	/* choose the copy that is on our node/cpu */
1825 	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1826 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1827 			   tmp.size) != 0) {
1828 		ret = -EFAULT;
1829 		goto free_newinfo;
1830 	}
1831 
1832 	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1833 				     &newinfo, &loc_cpu_entry, tmp.size,
1834 				     tmp.num_entries, tmp.hook_entry,
1835 				     tmp.underflow);
1836 	if (ret != 0)
1837 		goto free_newinfo;
1838 
1839 	duprintf("compat_do_replace: Translated table\n");
1840 
1841 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1842 			   tmp.num_counters, compat_ptr(tmp.counters));
1843 	if (ret)
1844 		goto free_newinfo_untrans;
1845 	return 0;
1846 
1847  free_newinfo_untrans:
1848 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1849 		cleanup_entry(iter, net);
1850  free_newinfo:
1851 	xt_free_table_info(newinfo);
1852 	return ret;
1853 }
1854 
1855 static int
compat_do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1856 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1857 		       unsigned int len)
1858 {
1859 	int ret;
1860 
1861 	if (!capable(CAP_NET_ADMIN))
1862 		return -EPERM;
1863 
1864 	switch (cmd) {
1865 	case IP6T_SO_SET_REPLACE:
1866 		ret = compat_do_replace(sock_net(sk), user, len);
1867 		break;
1868 
1869 	case IP6T_SO_SET_ADD_COUNTERS:
1870 		ret = do_add_counters(sock_net(sk), user, len, 1);
1871 		break;
1872 
1873 	default:
1874 		duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
1875 		ret = -EINVAL;
1876 	}
1877 
1878 	return ret;
1879 }
1880 
1881 struct compat_ip6t_get_entries {
1882 	char name[XT_TABLE_MAXNAMELEN];
1883 	compat_uint_t size;
1884 	struct compat_ip6t_entry entrytable[0];
1885 };
1886 
1887 static int
compat_copy_entries_to_user(unsigned int total_size,struct xt_table * table,void __user * userptr)1888 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1889 			    void __user *userptr)
1890 {
1891 	struct xt_counters *counters;
1892 	const struct xt_table_info *private = table->private;
1893 	void __user *pos;
1894 	unsigned int size;
1895 	int ret = 0;
1896 	const void *loc_cpu_entry;
1897 	unsigned int i = 0;
1898 	struct ip6t_entry *iter;
1899 
1900 	counters = alloc_counters(table);
1901 	if (IS_ERR(counters))
1902 		return PTR_ERR(counters);
1903 
1904 	/* choose the copy that is on our node/cpu, ...
1905 	 * This choice is lazy (because current thread is
1906 	 * allowed to migrate to another cpu)
1907 	 */
1908 	loc_cpu_entry = private->entries[raw_smp_processor_id()];
1909 	pos = userptr;
1910 	size = total_size;
1911 	xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1912 		ret = compat_copy_entry_to_user(iter, &pos,
1913 						&size, counters, i++);
1914 		if (ret != 0)
1915 			break;
1916 	}
1917 
1918 	vfree(counters);
1919 	return ret;
1920 }
1921 
1922 static int
compat_get_entries(struct net * net,struct compat_ip6t_get_entries __user * uptr,int * len)1923 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1924 		   int *len)
1925 {
1926 	int ret;
1927 	struct compat_ip6t_get_entries get;
1928 	struct xt_table *t;
1929 
1930 	if (*len < sizeof(get)) {
1931 		duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1932 		return -EINVAL;
1933 	}
1934 
1935 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1936 		return -EFAULT;
1937 
1938 	if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1939 		duprintf("compat_get_entries: %u != %zu\n",
1940 			 *len, sizeof(get) + get.size);
1941 		return -EINVAL;
1942 	}
1943 
1944 	xt_compat_lock(AF_INET6);
1945 	t = xt_find_table_lock(net, AF_INET6, get.name);
1946 	if (t && !IS_ERR(t)) {
1947 		const struct xt_table_info *private = t->private;
1948 		struct xt_table_info info;
1949 		duprintf("t->private->number = %u\n", private->number);
1950 		ret = compat_table_info(private, &info);
1951 		if (!ret && get.size == info.size) {
1952 			ret = compat_copy_entries_to_user(private->size,
1953 							  t, uptr->entrytable);
1954 		} else if (!ret) {
1955 			duprintf("compat_get_entries: I've got %u not %u!\n",
1956 				 private->size, get.size);
1957 			ret = -EAGAIN;
1958 		}
1959 		xt_compat_flush_offsets(AF_INET6);
1960 		module_put(t->me);
1961 		xt_table_unlock(t);
1962 	} else
1963 		ret = t ? PTR_ERR(t) : -ENOENT;
1964 
1965 	xt_compat_unlock(AF_INET6);
1966 	return ret;
1967 }
1968 
1969 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1970 
1971 static int
compat_do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1972 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1973 {
1974 	int ret;
1975 
1976 	if (!capable(CAP_NET_ADMIN))
1977 		return -EPERM;
1978 
1979 	switch (cmd) {
1980 	case IP6T_SO_GET_INFO:
1981 		ret = get_info(sock_net(sk), user, len, 1);
1982 		break;
1983 	case IP6T_SO_GET_ENTRIES:
1984 		ret = compat_get_entries(sock_net(sk), user, len);
1985 		break;
1986 	default:
1987 		ret = do_ip6t_get_ctl(sk, cmd, user, len);
1988 	}
1989 	return ret;
1990 }
1991 #endif
1992 
1993 static int
do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1994 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1995 {
1996 	int ret;
1997 
1998 	if (!capable(CAP_NET_ADMIN))
1999 		return -EPERM;
2000 
2001 	switch (cmd) {
2002 	case IP6T_SO_SET_REPLACE:
2003 		ret = do_replace(sock_net(sk), user, len);
2004 		break;
2005 
2006 	case IP6T_SO_SET_ADD_COUNTERS:
2007 		ret = do_add_counters(sock_net(sk), user, len, 0);
2008 		break;
2009 
2010 	default:
2011 		duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
2012 		ret = -EINVAL;
2013 	}
2014 
2015 	return ret;
2016 }
2017 
2018 static int
do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)2019 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2020 {
2021 	int ret;
2022 
2023 	if (!capable(CAP_NET_ADMIN))
2024 		return -EPERM;
2025 
2026 	switch (cmd) {
2027 	case IP6T_SO_GET_INFO:
2028 		ret = get_info(sock_net(sk), user, len, 0);
2029 		break;
2030 
2031 	case IP6T_SO_GET_ENTRIES:
2032 		ret = get_entries(sock_net(sk), user, len);
2033 		break;
2034 
2035 	case IP6T_SO_GET_REVISION_MATCH:
2036 	case IP6T_SO_GET_REVISION_TARGET: {
2037 		struct xt_get_revision rev;
2038 		int target;
2039 
2040 		if (*len != sizeof(rev)) {
2041 			ret = -EINVAL;
2042 			break;
2043 		}
2044 		if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2045 			ret = -EFAULT;
2046 			break;
2047 		}
2048 		rev.name[sizeof(rev.name)-1] = 0;
2049 
2050 		if (cmd == IP6T_SO_GET_REVISION_TARGET)
2051 			target = 1;
2052 		else
2053 			target = 0;
2054 
2055 		try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2056 							 rev.revision,
2057 							 target, &ret),
2058 					"ip6t_%s", rev.name);
2059 		break;
2060 	}
2061 
2062 	default:
2063 		duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2064 		ret = -EINVAL;
2065 	}
2066 
2067 	return ret;
2068 }
2069 
ip6t_register_table(struct net * net,const struct xt_table * table,const struct ip6t_replace * repl)2070 struct xt_table *ip6t_register_table(struct net *net,
2071 				     const struct xt_table *table,
2072 				     const struct ip6t_replace *repl)
2073 {
2074 	int ret;
2075 	struct xt_table_info *newinfo;
2076 	struct xt_table_info bootstrap = {0};
2077 	void *loc_cpu_entry;
2078 	struct xt_table *new_table;
2079 
2080 	newinfo = xt_alloc_table_info(repl->size);
2081 	if (!newinfo) {
2082 		ret = -ENOMEM;
2083 		goto out;
2084 	}
2085 
2086 	/* choose the copy on our node/cpu, but dont care about preemption */
2087 	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2088 	memcpy(loc_cpu_entry, repl->entries, repl->size);
2089 
2090 	ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2091 	if (ret != 0)
2092 		goto out_free;
2093 
2094 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
2095 	if (IS_ERR(new_table)) {
2096 		ret = PTR_ERR(new_table);
2097 		goto out_free;
2098 	}
2099 	return new_table;
2100 
2101 out_free:
2102 	xt_free_table_info(newinfo);
2103 out:
2104 	return ERR_PTR(ret);
2105 }
2106 
ip6t_unregister_table(struct net * net,struct xt_table * table)2107 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2108 {
2109 	struct xt_table_info *private;
2110 	void *loc_cpu_entry;
2111 	struct module *table_owner = table->me;
2112 	struct ip6t_entry *iter;
2113 
2114 	private = xt_unregister_table(table);
2115 
2116 	/* Decrease module usage counts and free resources */
2117 	loc_cpu_entry = private->entries[raw_smp_processor_id()];
2118 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
2119 		cleanup_entry(iter, net);
2120 	if (private->number > private->initial_entries)
2121 		module_put(table_owner);
2122 	xt_free_table_info(private);
2123 }
2124 
2125 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2126 static inline bool
icmp6_type_code_match(u_int8_t test_type,u_int8_t min_code,u_int8_t max_code,u_int8_t type,u_int8_t code,bool invert)2127 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2128 		     u_int8_t type, u_int8_t code,
2129 		     bool invert)
2130 {
2131 	return (type == test_type && code >= min_code && code <= max_code)
2132 		^ invert;
2133 }
2134 
2135 static bool
icmp6_match(const struct sk_buff * skb,struct xt_action_param * par)2136 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2137 {
2138 	const struct icmp6hdr *ic;
2139 	struct icmp6hdr _icmph;
2140 	const struct ip6t_icmp *icmpinfo = par->matchinfo;
2141 
2142 	/* Must not be a fragment. */
2143 	if (par->fragoff != 0)
2144 		return false;
2145 
2146 	ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2147 	if (ic == NULL) {
2148 		/* We've been asked to examine this packet, and we
2149 		 * can't.  Hence, no choice but to drop.
2150 		 */
2151 		duprintf("Dropping evil ICMP tinygram.\n");
2152 		par->hotdrop = true;
2153 		return false;
2154 	}
2155 
2156 	return icmp6_type_code_match(icmpinfo->type,
2157 				     icmpinfo->code[0],
2158 				     icmpinfo->code[1],
2159 				     ic->icmp6_type, ic->icmp6_code,
2160 				     !!(icmpinfo->invflags&IP6T_ICMP_INV));
2161 }
2162 
2163 /* Called when user tries to insert an entry of this type. */
icmp6_checkentry(const struct xt_mtchk_param * par)2164 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2165 {
2166 	const struct ip6t_icmp *icmpinfo = par->matchinfo;
2167 
2168 	/* Must specify no unknown invflags */
2169 	return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2170 }
2171 
2172 /* The built-in targets: standard (NULL) and error. */
2173 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2174 	{
2175 		.name             = XT_STANDARD_TARGET,
2176 		.targetsize       = sizeof(int),
2177 		.family           = NFPROTO_IPV6,
2178 #ifdef CONFIG_COMPAT
2179 		.compatsize       = sizeof(compat_int_t),
2180 		.compat_from_user = compat_standard_from_user,
2181 		.compat_to_user   = compat_standard_to_user,
2182 #endif
2183 	},
2184 	{
2185 		.name             = XT_ERROR_TARGET,
2186 		.target           = ip6t_error,
2187 		.targetsize       = XT_FUNCTION_MAXNAMELEN,
2188 		.family           = NFPROTO_IPV6,
2189 	},
2190 };
2191 
2192 static struct nf_sockopt_ops ip6t_sockopts = {
2193 	.pf		= PF_INET6,
2194 	.set_optmin	= IP6T_BASE_CTL,
2195 	.set_optmax	= IP6T_SO_SET_MAX+1,
2196 	.set		= do_ip6t_set_ctl,
2197 #ifdef CONFIG_COMPAT
2198 	.compat_set	= compat_do_ip6t_set_ctl,
2199 #endif
2200 	.get_optmin	= IP6T_BASE_CTL,
2201 	.get_optmax	= IP6T_SO_GET_MAX+1,
2202 	.get		= do_ip6t_get_ctl,
2203 #ifdef CONFIG_COMPAT
2204 	.compat_get	= compat_do_ip6t_get_ctl,
2205 #endif
2206 	.owner		= THIS_MODULE,
2207 };
2208 
2209 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2210 	{
2211 		.name       = "icmp6",
2212 		.match      = icmp6_match,
2213 		.matchsize  = sizeof(struct ip6t_icmp),
2214 		.checkentry = icmp6_checkentry,
2215 		.proto      = IPPROTO_ICMPV6,
2216 		.family     = NFPROTO_IPV6,
2217 	},
2218 };
2219 
ip6_tables_net_init(struct net * net)2220 static int __net_init ip6_tables_net_init(struct net *net)
2221 {
2222 	return xt_proto_init(net, NFPROTO_IPV6);
2223 }
2224 
ip6_tables_net_exit(struct net * net)2225 static void __net_exit ip6_tables_net_exit(struct net *net)
2226 {
2227 	xt_proto_fini(net, NFPROTO_IPV6);
2228 }
2229 
2230 static struct pernet_operations ip6_tables_net_ops = {
2231 	.init = ip6_tables_net_init,
2232 	.exit = ip6_tables_net_exit,
2233 };
2234 
ip6_tables_init(void)2235 static int __init ip6_tables_init(void)
2236 {
2237 	int ret;
2238 
2239 	ret = register_pernet_subsys(&ip6_tables_net_ops);
2240 	if (ret < 0)
2241 		goto err1;
2242 
2243 	/* No one else will be downing sem now, so we won't sleep */
2244 	ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2245 	if (ret < 0)
2246 		goto err2;
2247 	ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2248 	if (ret < 0)
2249 		goto err4;
2250 
2251 	/* Register setsockopt */
2252 	ret = nf_register_sockopt(&ip6t_sockopts);
2253 	if (ret < 0)
2254 		goto err5;
2255 
2256 	pr_info("(C) 2000-2006 Netfilter Core Team\n");
2257 	return 0;
2258 
2259 err5:
2260 	xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2261 err4:
2262 	xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2263 err2:
2264 	unregister_pernet_subsys(&ip6_tables_net_ops);
2265 err1:
2266 	return ret;
2267 }
2268 
ip6_tables_fini(void)2269 static void __exit ip6_tables_fini(void)
2270 {
2271 	nf_unregister_sockopt(&ip6t_sockopts);
2272 
2273 	xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2274 	xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2275 	unregister_pernet_subsys(&ip6_tables_net_ops);
2276 }
2277 
2278 /*
2279  * find the offset to specified header or the protocol number of last header
2280  * if target < 0. "last header" is transport protocol header, ESP, or
2281  * "No next header".
2282  *
2283  * If target header is found, its offset is set in *offset and return protocol
2284  * number. Otherwise, return -1.
2285  *
2286  * If the first fragment doesn't contain the final protocol header or
2287  * NEXTHDR_NONE it is considered invalid.
2288  *
2289  * Note that non-1st fragment is special case that "the protocol number
2290  * of last header" is "next header" field in Fragment header. In this case,
2291  * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2292  * isn't NULL.
2293  *
2294  */
ipv6_find_hdr(const struct sk_buff * skb,unsigned int * offset,int target,unsigned short * fragoff)2295 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2296 		  int target, unsigned short *fragoff)
2297 {
2298 	unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2299 	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2300 	unsigned int len = skb->len - start;
2301 
2302 	if (fragoff)
2303 		*fragoff = 0;
2304 
2305 	while (nexthdr != target) {
2306 		struct ipv6_opt_hdr _hdr, *hp;
2307 		unsigned int hdrlen;
2308 
2309 		if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2310 			if (target < 0)
2311 				break;
2312 			return -ENOENT;
2313 		}
2314 
2315 		hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2316 		if (hp == NULL)
2317 			return -EBADMSG;
2318 		if (nexthdr == NEXTHDR_FRAGMENT) {
2319 			unsigned short _frag_off;
2320 			__be16 *fp;
2321 			fp = skb_header_pointer(skb,
2322 						start+offsetof(struct frag_hdr,
2323 							       frag_off),
2324 						sizeof(_frag_off),
2325 						&_frag_off);
2326 			if (fp == NULL)
2327 				return -EBADMSG;
2328 
2329 			_frag_off = ntohs(*fp) & ~0x7;
2330 			if (_frag_off) {
2331 				if (target < 0 &&
2332 				    ((!ipv6_ext_hdr(hp->nexthdr)) ||
2333 				     hp->nexthdr == NEXTHDR_NONE)) {
2334 					if (fragoff)
2335 						*fragoff = _frag_off;
2336 					return hp->nexthdr;
2337 				}
2338 				return -ENOENT;
2339 			}
2340 			hdrlen = 8;
2341 		} else if (nexthdr == NEXTHDR_AUTH)
2342 			hdrlen = (hp->hdrlen + 2) << 2;
2343 		else
2344 			hdrlen = ipv6_optlen(hp);
2345 
2346 		nexthdr = hp->nexthdr;
2347 		len -= hdrlen;
2348 		start += hdrlen;
2349 	}
2350 
2351 	*offset = start;
2352 	return nexthdr;
2353 }
2354 
2355 EXPORT_SYMBOL(ip6t_register_table);
2356 EXPORT_SYMBOL(ip6t_unregister_table);
2357 EXPORT_SYMBOL(ip6t_do_table);
2358 EXPORT_SYMBOL(ipv6_find_hdr);
2359 
2360 module_init(ip6_tables_init);
2361 module_exit(ip6_tables_fini);
2362